summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/glslang/glslang
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/glslang/glslang')
-rw-r--r--src/3rdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp76
-rw-r--r--src/3rdparty/glslang/glslang/GenericCodeGen/Link.cpp91
-rw-r--r--src/3rdparty/glslang/glslang/Include/BaseTypes.h545
-rw-r--r--src/3rdparty/glslang/glslang/Include/Common.h291
-rw-r--r--src/3rdparty/glslang/glslang/Include/ConstantUnion.h938
-rw-r--r--src/3rdparty/glslang/glslang/Include/InfoSink.h144
-rw-r--r--src/3rdparty/glslang/glslang/Include/InitializeGlobals.h44
-rw-r--r--src/3rdparty/glslang/glslang/Include/PoolAlloc.h317
-rw-r--r--src/3rdparty/glslang/glslang/Include/ResourceLimits.h149
-rw-r--r--src/3rdparty/glslang/glslang/Include/ShHandle.h176
-rw-r--r--src/3rdparty/glslang/glslang/Include/Types.h2276
-rw-r--r--src/3rdparty/glslang/glslang/Include/arrays.h341
-rw-r--r--src/3rdparty/glslang/glslang/Include/intermediate.h1730
-rw-r--r--src/3rdparty/glslang/glslang/Include/revision.h3
-rw-r--r--src/3rdparty/glslang/glslang/Include/revision.template13
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Constant.cpp1405
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/InfoSink.cpp113
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Initialize.cpp9634
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Initialize.h110
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp302
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Intermediate.cpp3967
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/LiveTraverser.h138
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp628
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp7997
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.h510
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp315
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp118
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.h41
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Scan.cpp1793
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Scan.h276
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/ScanContext.h93
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp2041
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp396
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.h871
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Versions.cpp1126
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/Versions.h299
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/attribute.cpp257
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/attribute.h102
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/gl_types.h214
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/glslang.y3796
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp10468
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h509
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/intermOut.cpp1518
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/iomapper.cpp818
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/iomapper.h63
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/limits.cpp198
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/linkValidate.cpp1686
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/localintermediate.h896
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/parseConst.cpp204
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/parseVersions.h159
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/pch.cpp35
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/pch.h49
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp1320
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp181
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp119
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h702
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp1246
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp219
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h179
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp866
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h55
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/reflection.cpp1256
-rw-r--r--src/3rdparty/glslang/glslang/MachineIndependent/reflection.h203
-rw-r--r--src/3rdparty/glslang/glslang/OSDependent/Unix/ossource.cpp207
-rw-r--r--src/3rdparty/glslang/glslang/OSDependent/Windows/main.cpp74
-rw-r--r--src/3rdparty/glslang/glslang/OSDependent/Windows/ossource.cpp147
-rw-r--r--src/3rdparty/glslang/glslang/OSDependent/osinclude.h63
-rw-r--r--src/3rdparty/glslang/glslang/Public/ShaderLang.h846
-rw-r--r--src/3rdparty/glslang/glslang/updateGrammar3
69 files changed, 67965 insertions, 0 deletions
diff --git a/src/3rdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp b/src/3rdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp
new file mode 100644
index 0000000..b3c7226
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/GenericCodeGen/CodeGen.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+#include "../MachineIndependent/Versions.h"
+
+//
+// Here is where real machine specific high-level data would be defined.
+//
+class TGenericCompiler : public TCompiler {
+public:
+ TGenericCompiler(EShLanguage l, int dOptions) : TCompiler(l, infoSink), debugOptions(dOptions) { }
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile);
+ TInfoSink infoSink;
+ int debugOptions;
+};
+
+//
+// This function must be provided to create the actual
+// compile object used by higher level code. It returns
+// a subclass of TCompiler.
+//
+TCompiler* ConstructCompiler(EShLanguage language, int debugOptions)
+{
+ return new TGenericCompiler(language, debugOptions);
+}
+
+//
+// Delete the compiler made by ConstructCompiler
+//
+void DeleteCompiler(TCompiler* compiler)
+{
+ delete compiler;
+}
+
+//
+// Generate code from the given parse tree
+//
+bool TGenericCompiler::compile(TIntermNode* /*root*/, int /*version*/, EProfile /*profile*/)
+{
+ haveValidObjectCode = true;
+
+ return haveValidObjectCode;
+}
diff --git a/src/3rdparty/glslang/glslang/GenericCodeGen/Link.cpp b/src/3rdparty/glslang/glslang/GenericCodeGen/Link.cpp
new file mode 100644
index 0000000..c38db0f
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/GenericCodeGen/Link.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// The top level algorithms for linking multiple
+// shaders together.
+//
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+
+//
+// Actual link object, derived from the shader handle base classes.
+//
+class TGenericLinker : public TLinker {
+public:
+ TGenericLinker(EShExecutable e, int dOptions) : TLinker(e, infoSink), debugOptions(dOptions) { }
+ bool link(TCompilerList&, TUniformMap*) { return true; }
+ void getAttributeBindings(ShBindingTable const **) const { }
+ TInfoSink infoSink;
+ int debugOptions;
+};
+
+//
+// The internal view of a uniform/float object exchanged with the driver.
+//
+class TUniformLinkedMap : public TUniformMap {
+public:
+ TUniformLinkedMap() { }
+ virtual int getLocation(const char*) { return 0; }
+};
+
+TShHandleBase* ConstructLinker(EShExecutable executable, int debugOptions)
+{
+ return new TGenericLinker(executable, debugOptions);
+}
+
+void DeleteLinker(TShHandleBase* linker)
+{
+ delete linker;
+}
+
+TUniformMap* ConstructUniformMap()
+{
+ return new TUniformLinkedMap();
+}
+
+void DeleteUniformMap(TUniformMap* map)
+{
+ delete map;
+}
+
+TShHandleBase* ConstructBindings()
+{
+ return 0;
+}
+
+void DeleteBindingList(TShHandleBase* bindingList)
+{
+ delete bindingList;
+}
diff --git a/src/3rdparty/glslang/glslang/Include/BaseTypes.h b/src/3rdparty/glslang/glslang/Include/BaseTypes.h
new file mode 100644
index 0000000..1827c49
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/BaseTypes.h
@@ -0,0 +1,545 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _BASICTYPES_INCLUDED_
+#define _BASICTYPES_INCLUDED_
+
+namespace glslang {
+
+//
+// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
+//
+enum TBasicType {
+ EbtVoid,
+ EbtFloat,
+ EbtDouble,
+ EbtFloat16,
+ EbtInt8,
+ EbtUint8,
+ EbtInt16,
+ EbtUint16,
+ EbtInt,
+ EbtUint,
+ EbtInt64,
+ EbtUint64,
+ EbtBool,
+ EbtAtomicUint,
+ EbtSampler,
+ EbtStruct,
+ EbtBlock,
+
+#ifdef NV_EXTENSIONS
+ EbtAccStructNV,
+#endif
+
+ EbtReference,
+
+ // HLSL types that live only temporarily.
+ EbtString,
+
+ EbtNumTypes
+};
+
+//
+// Storage qualifiers. Should align with different kinds of storage or
+// resource or GLSL storage qualifier. Expansion is deprecated.
+//
+// N.B.: You probably DON'T want to add anything here, but rather just add it
+// to the built-in variables. See the comment above TBuiltInVariable.
+//
+// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
+// DO NOT follow the design pattern of, say EvqInstanceId, etc.
+//
+enum TStorageQualifier {
+ EvqTemporary, // For temporaries (within a function), read/write
+ EvqGlobal, // For globals read/write
+ EvqConst, // User-defined constant values, will be semantically constant and constant folded
+ EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqUniform, // read only, shared with app
+ EvqBuffer, // read/write, shared with app
+ EvqShared, // compute shader's read/write 'shared' qualifier
+
+#ifdef NV_EXTENSIONS
+ EvqPayloadNV,
+ EvqPayloadInNV,
+ EvqHitAttrNV,
+ EvqCallableDataNV,
+ EvqCallableDataInNV,
+#endif
+
+ // parameters
+ EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
+ EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
+ EvqInOut,
+ EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
+
+ // built-ins read by vertex shader
+ EvqVertexId,
+ EvqInstanceId,
+
+ // built-ins written by vertex shader
+ EvqPosition,
+ EvqPointSize,
+ EvqClipVertex,
+
+ // built-ins read by fragment shader
+ EvqFace,
+ EvqFragCoord,
+ EvqPointCoord,
+
+ // built-ins written by fragment shader
+ EvqFragColor,
+ EvqFragDepth,
+
+ // end of list
+ EvqLast
+};
+
+//
+// Subcategories of the TStorageQualifier, simply to give a direct mapping
+// between built-in variable names and an numerical value (the enum).
+//
+// For backward compatibility, there is some redundancy between the
+// TStorageQualifier and these. Existing members should both be maintained accurately.
+// However, any new built-in variable (and any existing non-redundant one)
+// must follow the pattern that the specific built-in is here, and only its
+// general qualifier is in TStorageQualifier.
+//
+// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
+// shows up as two different built-in variables in a single stage, but
+// only has a single enum in TBuiltInVariable, so both the
+// TStorageQualifier and the TBuitinVariable are needed to distinguish
+// between them.
+//
+enum TBuiltInVariable {
+ EbvNone,
+ EbvNumWorkGroups,
+ EbvWorkGroupSize,
+ EbvWorkGroupId,
+ EbvLocalInvocationId,
+ EbvGlobalInvocationId,
+ EbvLocalInvocationIndex,
+ EbvNumSubgroups,
+ EbvSubgroupID,
+ EbvSubGroupSize,
+ EbvSubGroupInvocation,
+ EbvSubGroupEqMask,
+ EbvSubGroupGeMask,
+ EbvSubGroupGtMask,
+ EbvSubGroupLeMask,
+ EbvSubGroupLtMask,
+ EbvSubgroupSize2,
+ EbvSubgroupInvocation2,
+ EbvSubgroupEqMask2,
+ EbvSubgroupGeMask2,
+ EbvSubgroupGtMask2,
+ EbvSubgroupLeMask2,
+ EbvSubgroupLtMask2,
+ EbvVertexId,
+ EbvInstanceId,
+ EbvVertexIndex,
+ EbvInstanceIndex,
+ EbvBaseVertex,
+ EbvBaseInstance,
+ EbvDrawId,
+ EbvPosition,
+ EbvPointSize,
+ EbvClipVertex,
+ EbvClipDistance,
+ EbvCullDistance,
+ EbvNormal,
+ EbvVertex,
+ EbvMultiTexCoord0,
+ EbvMultiTexCoord1,
+ EbvMultiTexCoord2,
+ EbvMultiTexCoord3,
+ EbvMultiTexCoord4,
+ EbvMultiTexCoord5,
+ EbvMultiTexCoord6,
+ EbvMultiTexCoord7,
+ EbvFrontColor,
+ EbvBackColor,
+ EbvFrontSecondaryColor,
+ EbvBackSecondaryColor,
+ EbvTexCoord,
+ EbvFogFragCoord,
+ EbvInvocationId,
+ EbvPrimitiveId,
+ EbvLayer,
+ EbvViewportIndex,
+ EbvPatchVertices,
+ EbvTessLevelOuter,
+ EbvTessLevelInner,
+ EbvBoundingBox,
+ EbvTessCoord,
+ EbvColor,
+ EbvSecondaryColor,
+ EbvFace,
+ EbvFragCoord,
+ EbvPointCoord,
+ EbvFragColor,
+ EbvFragData,
+ EbvFragDepth,
+ EbvFragStencilRef,
+ EbvSampleId,
+ EbvSamplePosition,
+ EbvSampleMask,
+ EbvHelperInvocation,
+
+#ifdef AMD_EXTENSIONS
+ EbvBaryCoordNoPersp,
+ EbvBaryCoordNoPerspCentroid,
+ EbvBaryCoordNoPerspSample,
+ EbvBaryCoordSmooth,
+ EbvBaryCoordSmoothCentroid,
+ EbvBaryCoordSmoothSample,
+ EbvBaryCoordPullModel,
+#endif
+
+ EbvViewIndex,
+ EbvDeviceIndex,
+
+ EbvFragSizeEXT,
+ EbvFragInvocationCountEXT,
+
+#ifdef NV_EXTENSIONS
+ EbvViewportMaskNV,
+ EbvSecondaryPositionNV,
+ EbvSecondaryViewportMaskNV,
+ EbvPositionPerViewNV,
+ EbvViewportMaskPerViewNV,
+ EbvFragFullyCoveredNV,
+ EbvFragmentSizeNV,
+ EbvInvocationsPerPixelNV,
+ // raytracing
+ EbvLaunchIdNV,
+ EbvLaunchSizeNV,
+ EbvInstanceCustomIndexNV,
+ EbvWorldRayOriginNV,
+ EbvWorldRayDirectionNV,
+ EbvObjectRayOriginNV,
+ EbvObjectRayDirectionNV,
+ EbvRayTminNV,
+ EbvRayTmaxNV,
+ EbvHitTNV,
+ EbvHitKindNV,
+ EbvObjectToWorldNV,
+ EbvWorldToObjectNV,
+ EbvIncomingRayFlagsNV,
+ EbvBaryCoordNV,
+ EbvBaryCoordNoPerspNV,
+ EbvTaskCountNV,
+ EbvPrimitiveCountNV,
+ EbvPrimitiveIndicesNV,
+ EbvClipDistancePerViewNV,
+ EbvCullDistancePerViewNV,
+ EbvLayerPerViewNV,
+ EbvMeshViewCountNV,
+ EbvMeshViewIndicesNV,
+#endif
+
+ // HLSL built-ins that live only temporarily, until they get remapped
+ // to one of the above.
+ EbvFragDepthGreater,
+ EbvFragDepthLesser,
+ EbvGsOutputStream,
+ EbvOutputPatch,
+ EbvInputPatch,
+
+ // structbuffer types
+ EbvAppendConsume, // no need to differentiate append and consume
+ EbvRWStructuredBuffer,
+ EbvStructuredBuffer,
+ EbvByteAddressBuffer,
+ EbvRWByteAddressBuffer,
+
+ EbvLast
+};
+
+// These will show up in error messages
+__inline const char* GetStorageQualifierString(TStorageQualifier q)
+{
+ switch (q) {
+ case EvqTemporary: return "temp"; break;
+ case EvqGlobal: return "global"; break;
+ case EvqConst: return "const"; break;
+ case EvqConstReadOnly: return "const (read only)"; break;
+ case EvqVaryingIn: return "in"; break;
+ case EvqVaryingOut: return "out"; break;
+ case EvqUniform: return "uniform"; break;
+ case EvqBuffer: return "buffer"; break;
+ case EvqShared: return "shared"; break;
+ case EvqIn: return "in"; break;
+ case EvqOut: return "out"; break;
+ case EvqInOut: return "inout"; break;
+ case EvqVertexId: return "gl_VertexId"; break;
+ case EvqInstanceId: return "gl_InstanceId"; break;
+ case EvqPosition: return "gl_Position"; break;
+ case EvqPointSize: return "gl_PointSize"; break;
+ case EvqClipVertex: return "gl_ClipVertex"; break;
+ case EvqFace: return "gl_FrontFacing"; break;
+ case EvqFragCoord: return "gl_FragCoord"; break;
+ case EvqPointCoord: return "gl_PointCoord"; break;
+ case EvqFragColor: return "fragColor"; break;
+ case EvqFragDepth: return "gl_FragDepth"; break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV: return "rayPayloadNV"; break;
+ case EvqPayloadInNV: return "rayPayloadInNV"; break;
+ case EvqHitAttrNV: return "hitAttributeNV"; break;
+ case EvqCallableDataNV: return "callableDataNV"; break;
+ case EvqCallableDataInNV: return "callableDataInNV"; break;
+#endif
+ default: return "unknown qualifier";
+ }
+}
+
+__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
+{
+ switch (v) {
+ case EbvNone: return "";
+ case EbvNumWorkGroups: return "NumWorkGroups";
+ case EbvWorkGroupSize: return "WorkGroupSize";
+ case EbvWorkGroupId: return "WorkGroupID";
+ case EbvLocalInvocationId: return "LocalInvocationID";
+ case EbvGlobalInvocationId: return "GlobalInvocationID";
+ case EbvLocalInvocationIndex: return "LocalInvocationIndex";
+ case EbvSubGroupSize: return "SubGroupSize";
+ case EbvSubGroupInvocation: return "SubGroupInvocation";
+ case EbvSubGroupEqMask: return "SubGroupEqMask";
+ case EbvSubGroupGeMask: return "SubGroupGeMask";
+ case EbvSubGroupGtMask: return "SubGroupGtMask";
+ case EbvSubGroupLeMask: return "SubGroupLeMask";
+ case EbvSubGroupLtMask: return "SubGroupLtMask";
+ case EbvVertexId: return "VertexId";
+ case EbvInstanceId: return "InstanceId";
+ case EbvVertexIndex: return "VertexIndex";
+ case EbvInstanceIndex: return "InstanceIndex";
+ case EbvBaseVertex: return "BaseVertex";
+ case EbvBaseInstance: return "BaseInstance";
+ case EbvDrawId: return "DrawId";
+ case EbvPosition: return "Position";
+ case EbvPointSize: return "PointSize";
+ case EbvClipVertex: return "ClipVertex";
+ case EbvClipDistance: return "ClipDistance";
+ case EbvCullDistance: return "CullDistance";
+ case EbvNormal: return "Normal";
+ case EbvVertex: return "Vertex";
+ case EbvMultiTexCoord0: return "MultiTexCoord0";
+ case EbvMultiTexCoord1: return "MultiTexCoord1";
+ case EbvMultiTexCoord2: return "MultiTexCoord2";
+ case EbvMultiTexCoord3: return "MultiTexCoord3";
+ case EbvMultiTexCoord4: return "MultiTexCoord4";
+ case EbvMultiTexCoord5: return "MultiTexCoord5";
+ case EbvMultiTexCoord6: return "MultiTexCoord6";
+ case EbvMultiTexCoord7: return "MultiTexCoord7";
+ case EbvFrontColor: return "FrontColor";
+ case EbvBackColor: return "BackColor";
+ case EbvFrontSecondaryColor: return "FrontSecondaryColor";
+ case EbvBackSecondaryColor: return "BackSecondaryColor";
+ case EbvTexCoord: return "TexCoord";
+ case EbvFogFragCoord: return "FogFragCoord";
+ case EbvInvocationId: return "InvocationID";
+ case EbvPrimitiveId: return "PrimitiveID";
+ case EbvLayer: return "Layer";
+ case EbvViewportIndex: return "ViewportIndex";
+ case EbvPatchVertices: return "PatchVertices";
+ case EbvTessLevelOuter: return "TessLevelOuter";
+ case EbvTessLevelInner: return "TessLevelInner";
+ case EbvBoundingBox: return "BoundingBox";
+ case EbvTessCoord: return "TessCoord";
+ case EbvColor: return "Color";
+ case EbvSecondaryColor: return "SecondaryColor";
+ case EbvFace: return "Face";
+ case EbvFragCoord: return "FragCoord";
+ case EbvPointCoord: return "PointCoord";
+ case EbvFragColor: return "FragColor";
+ case EbvFragData: return "FragData";
+ case EbvFragDepth: return "FragDepth";
+ case EbvFragStencilRef: return "FragStencilRef";
+ case EbvSampleId: return "SampleId";
+ case EbvSamplePosition: return "SamplePosition";
+ case EbvSampleMask: return "SampleMaskIn";
+ case EbvHelperInvocation: return "HelperInvocation";
+
+#ifdef AMD_EXTENSIONS
+ case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
+ case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
+ case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
+ case EbvBaryCoordSmooth: return "BaryCoordSmooth";
+ case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
+ case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
+ case EbvBaryCoordPullModel: return "BaryCoordPullModel";
+#endif
+
+ case EbvViewIndex: return "ViewIndex";
+ case EbvDeviceIndex: return "DeviceIndex";
+
+ case EbvFragSizeEXT: return "FragSizeEXT";
+ case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
+
+#ifdef NV_EXTENSIONS
+ case EbvViewportMaskNV: return "ViewportMaskNV";
+ case EbvSecondaryPositionNV: return "SecondaryPositionNV";
+ case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case EbvPositionPerViewNV: return "PositionPerViewNV";
+ case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+ case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
+ case EbvFragmentSizeNV: return "FragmentSizeNV";
+ case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
+ case EbvLaunchIdNV: return "LaunchIdNV";
+ case EbvLaunchSizeNV: return "LaunchSizeNV";
+ case EbvInstanceCustomIndexNV: return "InstanceCustomIndexNV";
+ case EbvWorldRayOriginNV: return "WorldRayOriginNV";
+ case EbvWorldRayDirectionNV: return "WorldRayDirectionNV";
+ case EbvObjectRayOriginNV: return "ObjectRayOriginNV";
+ case EbvObjectRayDirectionNV: return "ObjectRayDirectionNV";
+ case EbvRayTminNV: return "ObjectRayTminNV";
+ case EbvRayTmaxNV: return "ObjectRayTmaxNV";
+ case EbvHitTNV: return "HitTNV";
+ case EbvHitKindNV: return "HitKindNV";
+ case EbvIncomingRayFlagsNV: return "IncomingRayFlagsNV";
+ case EbvObjectToWorldNV: return "ObjectToWorldNV";
+ case EbvWorldToObjectNV: return "WorldToObjectNV";
+
+ case EbvBaryCoordNV: return "BaryCoordNV";
+ case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+ case EbvTaskCountNV: return "TaskCountNV";
+ case EbvPrimitiveCountNV: return "PrimitiveCountNV";
+ case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case EbvCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case EbvLayerPerViewNV: return "LayerPerViewNV";
+ case EbvMeshViewCountNV: return "MeshViewCountNV";
+ case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
+#endif
+ default: return "unknown built-in variable";
+ }
+}
+
+// In this enum, order matters; users can assume higher precision is a bigger value
+// and EpqNone is 0.
+enum TPrecisionQualifier {
+ EpqNone = 0,
+ EpqLow,
+ EpqMedium,
+ EpqHigh
+};
+
+__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
+{
+ switch (p) {
+ case EpqNone: return ""; break;
+ case EpqLow: return "lowp"; break;
+ case EpqMedium: return "mediump"; break;
+ case EpqHigh: return "highp"; break;
+ default: return "unknown precision qualifier";
+ }
+}
+
+__inline bool isTypeSignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtInt8:
+ case EbtInt16:
+ case EbtInt:
+ case EbtInt64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeUnsignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeInt(TBasicType type)
+{
+ return isTypeSignedInt(type) || isTypeUnsignedInt(type);
+}
+
+__inline bool isTypeFloat(TBasicType type)
+{
+ switch (type) {
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline int getTypeRank(TBasicType type) {
+ int res = -1;
+ switch(type) {
+ case EbtInt8:
+ case EbtUint8:
+ res = 0;
+ break;
+ case EbtInt16:
+ case EbtUint16:
+ res = 1;
+ break;
+ case EbtInt:
+ case EbtUint:
+ res = 2;
+ break;
+ case EbtInt64:
+ case EbtUint64:
+ res = 3;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ return res;
+}
+
+} // end namespace glslang
+
+#endif // _BASICTYPES_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/Common.h b/src/3rdparty/glslang/glslang/Include/Common.h
new file mode 100644
index 0000000..98e5a1a
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/Common.h
@@ -0,0 +1,291 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _COMMON_INCLUDED_
+#define _COMMON_INCLUDED_
+
+
+#if defined(__ANDROID__) || _MSC_VER < 1700
+#include <sstream>
+namespace std {
+template<typename T>
+std::string to_string(const T& val) {
+ std::ostringstream os;
+ os << val;
+ return os.str();
+}
+}
+#endif
+
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+ #include <basetsd.h>
+ #ifndef snprintf
+ #define snprintf sprintf_s
+ #endif
+ #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
+#elif defined (solaris)
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <sys/int_types.h>
+ #define UINT_PTR uintptr_t
+#else
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <stdint.h>
+ #define UINT_PTR uintptr_t
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+ #include <stdlib.h>
+ inline long long int strtoll (const char* str, char** endptr, int base)
+ {
+ return _strtoi64(str, endptr, base);
+ }
+ inline unsigned long long int strtoull (const char* str, char** endptr, int base)
+ {
+ return _strtoui64(str, endptr, base);
+ }
+ inline long long int atoll (const char* str)
+ {
+ return strtoll(str, NULL, 10);
+ }
+#endif
+
+#if defined(_MSC_VER)
+#define strdup _strdup
+#endif
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4786) // Don't warn about too long identifiers
+ #pragma warning(disable : 4514) // unused inline method
+ #pragma warning(disable : 4201) // nameless union
+#endif
+
+#include <set>
+#include <unordered_set>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#include <list>
+#include <algorithm>
+#include <string>
+#include <cstdio>
+#include <cassert>
+
+#include "PoolAlloc.h"
+
+//
+// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
+//
+#define POOL_ALLOCATOR_NEW_DELETE(A) \
+ void* operator new(size_t s) { return (A).allocate(s); } \
+ void* operator new(size_t, void *_Where) { return (_Where); } \
+ void operator delete(void*) { } \
+ void operator delete(void *, void *) { } \
+ void* operator new[](size_t s) { return (A).allocate(s); } \
+ void* operator new[](size_t, void *_Where) { return (_Where); } \
+ void operator delete[](void*) { } \
+ void operator delete[](void *, void *) { }
+
+namespace glslang {
+
+ //
+ // Pool version of string.
+ //
+ typedef pool_allocator<char> TStringAllocator;
+ typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
+
+} // end namespace glslang
+
+// Repackage the std::hash for use by unordered map/set with a TString key.
+namespace std {
+
+ template<> struct hash<glslang::TString> {
+ std::size_t operator()(const glslang::TString& s) const
+ {
+ const unsigned _FNV_offset_basis = 2166136261U;
+ const unsigned _FNV_prime = 16777619U;
+ unsigned _Val = _FNV_offset_basis;
+ size_t _Count = s.size();
+ const char* _First = s.c_str();
+ for (size_t _Next = 0; _Next < _Count; ++_Next)
+ {
+ _Val ^= (unsigned)_First[_Next];
+ _Val *= _FNV_prime;
+ }
+
+ return _Val;
+ }
+ };
+}
+
+namespace glslang {
+
+inline TString* NewPoolTString(const char* s)
+{
+ void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
+ return new(memory) TString(s);
+}
+
+template<class T> inline T* NewPoolObject(T*)
+{
+ return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
+}
+
+template<class T> inline T* NewPoolObject(T, int instances)
+{
+ return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
+}
+
+//
+// Pool allocator versions of vectors, lists, and maps
+//
+template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
+ TVector() : std::vector<T, pool_allocator<T> >() {}
+ TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
+ TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
+ TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
+};
+
+template <class T> class TList : public std::list<T, pool_allocator<T> > {
+};
+
+template <class K, class D, class CMP = std::less<K> >
+class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
+};
+
+template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
+class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
+};
+
+//
+// Persistent string memory. Should only be used for strings that survive
+// across compiles/links.
+//
+typedef std::basic_string<char> TPersistString;
+
+//
+// templatized min and max functions.
+//
+template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
+template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
+
+//
+// Create a TString object from an integer.
+//
+#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
+inline const TString String(const int i, const int base = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+ _itoa_s(i, text, sizeof(text), base);
+ return text;
+}
+#else
+inline const TString String(const int i, const int /*base*/ = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+
+ // we assume base 10 for all cases
+ snprintf(text, sizeof(text), "%d", i);
+
+ return text;
+}
+#endif
+
+struct TSourceLoc {
+ void init()
+ {
+ name = nullptr; string = 0; line = 0; column = 0;
+ }
+ void init(int stringNum) { init(); string = stringNum; }
+ // Returns the name if it exists. Otherwise, returns the string number.
+ std::string getStringNameOrNum(bool quoteStringName = true) const
+ {
+ if (name != nullptr) {
+ TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name;
+ std::string ret_str(qstr.c_str());
+ return ret_str;
+ }
+ return std::to_string((long long)string);
+ }
+ const char* getFilename() const
+ {
+ if (name == nullptr)
+ return nullptr;
+ return name->c_str();
+ }
+ const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); }
+ TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr
+ int string;
+ int line;
+ int column;
+};
+
+class TPragmaTable : public TMap<TString, TString> {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+};
+
+const int MaxTokenLength = 1024;
+
+template <class T> bool IsPow2(T powerOf2)
+{
+ if (powerOf2 <= 0)
+ return false;
+
+ return (powerOf2 & (powerOf2 - 1)) == 0;
+}
+
+// Round number up to a multiple of the given powerOf2, which is not
+// a power, just a number that must be a power of 2.
+template <class T> void RoundToPow2(T& number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
+}
+
+template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ return ! (number & (powerOf2 - 1));
+}
+
+} // end namespace glslang
+
+#endif // _COMMON_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ConstantUnion.h b/src/3rdparty/glslang/glslang/Include/ConstantUnion.h
new file mode 100644
index 0000000..3e93340
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ConstantUnion.h
@@ -0,0 +1,938 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _CONSTANT_UNION_INCLUDED_
+#define _CONSTANT_UNION_INCLUDED_
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+
+namespace glslang {
+
+class TConstUnion {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnion() : iConst(0), type(EbtInt) { }
+
+ void setI8Const(signed char i)
+ {
+ i8Const = i;
+ type = EbtInt8;
+ }
+
+ void setU8Const(unsigned char u)
+ {
+ u8Const = u;
+ type = EbtUint8;
+ }
+
+ void setI16Const(signed short i)
+ {
+ i16Const = i;
+ type = EbtInt16;
+ }
+
+ void setU16Const(unsigned short u)
+ {
+ u16Const = u;
+ type = EbtUint16;
+ }
+
+ void setIConst(int i)
+ {
+ iConst = i;
+ type = EbtInt;
+ }
+
+ void setUConst(unsigned int u)
+ {
+ uConst = u;
+ type = EbtUint;
+ }
+
+ void setI64Const(long long i64)
+ {
+ i64Const = i64;
+ type = EbtInt64;
+ }
+
+ void setU64Const(unsigned long long u64)
+ {
+ u64Const = u64;
+ type = EbtUint64;
+ }
+
+ void setDConst(double d)
+ {
+ dConst = d;
+ type = EbtDouble;
+ }
+
+ void setBConst(bool b)
+ {
+ bConst = b;
+ type = EbtBool;
+ }
+
+ void setSConst(const TString* s)
+ {
+ sConst = s;
+ type = EbtString;
+ }
+
+ signed char getI8Const() const { return i8Const; }
+ unsigned char getU8Const() const { return u8Const; }
+ signed short getI16Const() const { return i16Const; }
+ unsigned short getU16Const() const { return u16Const; }
+ int getIConst() const { return iConst; }
+ unsigned int getUConst() const { return uConst; }
+ long long getI64Const() const { return i64Const; }
+ unsigned long long getU64Const() const { return u64Const; }
+ double getDConst() const { return dConst; }
+ bool getBConst() const { return bConst; }
+ const TString* getSConst() const { return sConst; }
+
+ bool operator==(const signed char i) const
+ {
+ if (i == i8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned char u) const
+ {
+ if (u == u8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const signed short i) const
+ {
+ if (i == i16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned short u) const
+ {
+ if (u == u16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const int i) const
+ {
+ if (i == iConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned int u) const
+ {
+ if (u == uConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const long long i64) const
+ {
+ if (i64 == i64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned long long u64) const
+ {
+ if (u64 == u64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const double d) const
+ {
+ if (d == dConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const bool b) const
+ {
+ if (b == bConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const TConstUnion& constant) const
+ {
+ if (constant.type != type)
+ return false;
+
+ switch (type) {
+ case EbtInt16:
+ if (constant.i16Const == i16Const)
+ return true;
+
+ break;
+ case EbtUint16:
+ if (constant.u16Const == u16Const)
+ return true;
+
+ break;
+ case EbtInt8:
+ if (constant.i8Const == i8Const)
+ return true;
+
+ break;
+ case EbtUint8:
+ if (constant.u8Const == u8Const)
+ return true;
+
+ break;
+ case EbtInt:
+ if (constant.iConst == iConst)
+ return true;
+
+ break;
+ case EbtUint:
+ if (constant.uConst == uConst)
+ return true;
+
+ break;
+ case EbtInt64:
+ if (constant.i64Const == i64Const)
+ return true;
+
+ break;
+ case EbtUint64:
+ if (constant.u64Const == u64Const)
+ return true;
+
+ break;
+ case EbtDouble:
+ if (constant.dConst == dConst)
+ return true;
+
+ break;
+ case EbtBool:
+ if (constant.bConst == bConst)
+ return true;
+
+ break;
+ default:
+ assert(false && "Default missing");
+ }
+
+ return false;
+ }
+
+ bool operator!=(const signed char i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned char u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const signed short i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned short u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const int i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned int u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const long long i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned long long u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const float f) const
+ {
+ return !operator==(f);
+ }
+
+ bool operator!=(const bool b) const
+ {
+ return !operator==(b);
+ }
+
+ bool operator!=(const TConstUnion& constant) const
+ {
+ return !operator==(constant);
+ }
+
+ bool operator>(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const > constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const > constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const > constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const > constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst > constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst > constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const > constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const > constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst > constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ bool operator<(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const < constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const < constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const < constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const < constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst < constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst < constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const < constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const < constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst < constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ TConstUnion operator+(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator-(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator*(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator%(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator>>(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator<<(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator|(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator^(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator~() const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(~i8Const); break;
+ case EbtUint8: returnValue.setU8Const(~u8Const); break;
+ case EbtInt16: returnValue.setI16Const(~i16Const); break;
+ case EbtUint16: returnValue.setU16Const(~u16Const); break;
+ case EbtInt: returnValue.setIConst(~iConst); break;
+ case EbtUint: returnValue.setUConst(~uConst); break;
+ case EbtInt64: returnValue.setI64Const(~i64Const); break;
+ case EbtUint64: returnValue.setU64Const(~u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator||(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TBasicType getType() const { return type; }
+
+private:
+ union {
+ signed char i8Const; // used for i8vec, scalar int8s
+ unsigned char u8Const; // used for u8vec, scalar uint8s
+ signed short i16Const; // used for i16vec, scalar int16s
+ unsigned short u16Const; // used for u16vec, scalar uint16s
+ int iConst; // used for ivec, scalar ints
+ unsigned int uConst; // used for uvec, scalar uints
+ long long i64Const; // used for i64vec, scalar int64s
+ unsigned long long u64Const; // used for u64vec, scalar uint64s
+ bool bConst; // used for bvec, scalar bools
+ double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
+ const TString* sConst; // string constant
+ };
+
+ TBasicType type;
+};
+
+// Encapsulate having a pointer to an array of TConstUnion,
+// which only needs to be allocated if its size is going to be
+// bigger than 0.
+//
+// One convenience is being able to use [] to go inside the array, instead
+// of C++ assuming it as an array of pointers to vectors.
+//
+// General usage is that the size is known up front, and it is
+// created once with the proper size.
+//
+class TConstUnionArray {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnionArray() : unionArray(nullptr) { }
+ virtual ~TConstUnionArray() { }
+
+ explicit TConstUnionArray(int size)
+ {
+ if (size == 0)
+ unionArray = nullptr;
+ else
+ unionArray = new TConstUnionVector(size);
+ }
+ TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
+ TConstUnionArray(const TConstUnionArray& a, int start, int size)
+ {
+ unionArray = new TConstUnionVector(size);
+ for (int i = 0; i < size; ++i)
+ (*unionArray)[i] = a[start + i];
+ }
+
+ // Use this constructor for a smear operation
+ TConstUnionArray(int size, const TConstUnion& val)
+ {
+ unionArray = new TConstUnionVector(size, val);
+ }
+
+ int size() const { return unionArray ? (int)unionArray->size() : 0; }
+ TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
+ const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
+ bool operator==(const TConstUnionArray& rhs) const
+ {
+ // this includes the case that both are unallocated
+ if (unionArray == rhs.unionArray)
+ return true;
+
+ if (! unionArray || ! rhs.unionArray)
+ return false;
+
+ return *unionArray == *rhs.unionArray;
+ }
+ bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
+
+ double dot(const TConstUnionArray& rhs)
+ {
+ assert(rhs.unionArray->size() == unionArray->size());
+ double sum = 0.0;
+
+ for (size_t comp = 0; comp < unionArray->size(); ++comp)
+ sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
+
+ return sum;
+ }
+
+ bool empty() const { return unionArray == nullptr; }
+
+protected:
+ typedef TVector<TConstUnion> TConstUnionVector;
+ TConstUnionVector* unionArray;
+};
+
+} // end namespace glslang
+
+#endif // _CONSTANT_UNION_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/InfoSink.h b/src/3rdparty/glslang/glslang/Include/InfoSink.h
new file mode 100644
index 0000000..dceb603
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/InfoSink.h
@@ -0,0 +1,144 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INFOSINK_INCLUDED_
+#define _INFOSINK_INCLUDED_
+
+#include "../Include/Common.h"
+#include <cmath>
+
+namespace glslang {
+
+//
+// TPrefixType is used to centralize how info log messages start.
+// See below.
+//
+enum TPrefixType {
+ EPrefixNone,
+ EPrefixWarning,
+ EPrefixError,
+ EPrefixInternalError,
+ EPrefixUnimplemented,
+ EPrefixNote
+};
+
+enum TOutputStream {
+ ENull = 0,
+ EDebugger = 0x01,
+ EStdOut = 0x02,
+ EString = 0x04,
+};
+//
+// Encapsulate info logs for all objects that have them.
+//
+// The methods are a general set of tools for getting a variety of
+// messages and types inserted into the log.
+//
+class TInfoSinkBase {
+public:
+ TInfoSinkBase() : outputStream(4) {}
+ void erase() { sink.erase(); }
+ TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
+ TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
+ TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
+ snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
+ append(buf);
+ return *this; }
+ TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
+ const char* c_str() const { return sink.c_str(); }
+ void prefix(TPrefixType message) {
+ switch(message) {
+ case EPrefixNone: break;
+ case EPrefixWarning: append("WARNING: "); break;
+ case EPrefixError: append("ERROR: "); break;
+ case EPrefixInternalError: append("INTERNAL ERROR: "); break;
+ case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
+ case EPrefixNote: append("NOTE: "); break;
+ default: append("UNKNOWN ERROR: "); break;
+ }
+ }
+ void location(const TSourceLoc& loc) {
+ const int maxSize = 24;
+ char locText[maxSize];
+ snprintf(locText, maxSize, ":%d", loc.line);
+ append(loc.getStringNameOrNum(false).c_str());
+ append(locText);
+ append(": ");
+ }
+ void message(TPrefixType message, const char* s) {
+ prefix(message);
+ append(s);
+ append("\n");
+ }
+ void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
+ prefix(message);
+ location(loc);
+ append(s);
+ append("\n");
+ }
+
+ void setOutputStream(int output = 4)
+ {
+ outputStream = output;
+ }
+
+protected:
+ void append(const char* s);
+
+ void append(int count, char c);
+ void append(const TPersistString& t);
+ void append(const TString& t);
+
+ void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
+ sink.reserve(sink.capacity() + sink.capacity() / 2); }
+ void appendToStream(const char* s);
+ TPersistString sink;
+ int outputStream;
+};
+
+} // end namespace glslang
+
+class TInfoSink {
+public:
+ glslang::TInfoSinkBase info;
+ glslang::TInfoSinkBase debug;
+};
+
+#endif // _INFOSINK_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h b/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h
new file mode 100644
index 0000000..95d0a40
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __INITIALIZE_GLOBALS_INCLUDED_
+#define __INITIALIZE_GLOBALS_INCLUDED_
+
+namespace glslang {
+
+bool InitializePoolIndex();
+
+} // end namespace glslang
+
+#endif // __INITIALIZE_GLOBALS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/PoolAlloc.h b/src/3rdparty/glslang/glslang/Include/PoolAlloc.h
new file mode 100644
index 0000000..0e237a6
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/PoolAlloc.h
@@ -0,0 +1,317 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _POOLALLOC_INCLUDED_
+#define _POOLALLOC_INCLUDED_
+
+#ifdef _DEBUG
+# define GUARD_BLOCKS // define to enable guard block sanity checking
+#endif
+
+//
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
+//
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+// STL containers can use this allocator by using the pool_allocator
+// class as the allocator (second) template argument.
+//
+
+#include <cstddef>
+#include <cstring>
+#include <vector>
+
+namespace glslang {
+
+// If we are using guard blocks, we must track each individual
+// allocation. If we aren't using guard blocks, these
+// never get instantiated, so won't have any impact.
+//
+
+class TAllocation {
+public:
+ TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
+ size(size), mem(mem), prevAlloc(prev) {
+ // Allocations are bracketed:
+ // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+ // This would be cleaner with if (guardBlockSize)..., but that
+ // makes the compiler print warnings about 0 length memsets,
+ // even with the if() protecting them.
+# ifdef GUARD_BLOCKS
+ memset(preGuard(), guardBlockBeginVal, guardBlockSize);
+ memset(data(), userDataFill, size);
+ memset(postGuard(), guardBlockEndVal, guardBlockSize);
+# endif
+ }
+
+ void check() const {
+ checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), guardBlockEndVal, "after");
+ }
+
+ void checkAllocList() const;
+
+ // Return total size needed to accommodate user buffer of 'size',
+ // plus our tracking data.
+ inline static size_t allocationSize(size_t size) {
+ return size + 2 * guardBlockSize + headerSize();
+ }
+
+ // Offset from surrounding buffer to get to user data buffer.
+ inline static unsigned char* offsetAllocation(unsigned char* m) {
+ return m + guardBlockSize + headerSize();
+ }
+
+private:
+ void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char* preGuard() const { return mem + headerSize(); }
+ unsigned char* data() const { return preGuard() + guardBlockSize; }
+ unsigned char* postGuard() const { return data() + size; }
+
+ size_t size; // size of the user data area
+ unsigned char* mem; // beginning of our allocation (pts to header)
+ TAllocation* prevAlloc; // prior allocation in the chain
+
+ const static unsigned char guardBlockBeginVal;
+ const static unsigned char guardBlockEndVal;
+ const static unsigned char userDataFill;
+
+ const static size_t guardBlockSize;
+# ifdef GUARD_BLOCKS
+ inline static size_t headerSize() { return sizeof(TAllocation); }
+# else
+ inline static size_t headerSize() { return 0; }
+# endif
+};
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class TPoolAllocator {
+public:
+ TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~TPoolAllocator();
+
+ //
+ // Call push() to establish a new place to pop memory too. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void* allocate(size_t numBytes);
+
+ //
+ // There is no deallocate. The point of this class is that
+ // deallocation can be skipped by the user of it, as the model
+ // of use is to simultaneously deallocate everything at once
+ // by calling pop(), and to not have to solve memory leak problems.
+ //
+
+protected:
+ friend struct tHeader;
+
+ struct tHeader {
+ tHeader(tHeader* nextPage, size_t pageCount) :
+#ifdef GUARD_BLOCKS
+ lastAllocation(0),
+#endif
+ nextPage(nextPage), pageCount(pageCount) { }
+
+ ~tHeader() {
+#ifdef GUARD_BLOCKS
+ if (lastAllocation)
+ lastAllocation->checkAllocList();
+#endif
+ }
+
+#ifdef GUARD_BLOCKS
+ TAllocation* lastAllocation;
+#endif
+ tHeader* nextPage;
+ size_t pageCount;
+ };
+
+ struct tAllocState {
+ size_t offset;
+ tHeader* page;
+ };
+ typedef std::vector<tAllocState> tAllocStack;
+
+ // Track allocations if and only if we're using guard blocks
+#ifndef GUARD_BLOCKS
+ void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
+#else
+ void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
+ new(memory) TAllocation(numBytes, memory, block->lastAllocation);
+ block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
+#endif
+
+ // This is optimized entirely away if GUARD_BLOCKS is not defined.
+ return TAllocation::offsetAllocation(memory);
+ }
+
+ size_t pageSize; // granularity of allocation from the OS
+ size_t alignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+ size_t alignmentMask;
+ size_t headerSkip; // amount of memory to skip to make room for the
+ // header (basically, size of header, rounded
+ // up to make it aligned
+ size_t currentPageOffset; // next offset in top of inUseList to allocate from
+ tHeader* freeList; // list of popped memory
+ tHeader* inUseList; // list of all memory currently being used
+ tAllocStack stack; // stack of where to allocate from, to partition pool
+
+ int numCalls; // just an interesting statistic
+ size_t totalBytes; // just an interesting statistic
+private:
+ TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
+ TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
+};
+
+//
+// There could potentially be many pools with pops happening at
+// different times. But a simple use is to have a global pop
+// with everyone using the same global allocator.
+//
+extern TPoolAllocator& GetThreadPoolAllocator();
+void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
+
+//
+// This STL compatible allocator is intended to be used as the allocator
+// parameter to templatized STL containers, like vector and map.
+//
+// It will use the pools for allocation, and not
+// do any deallocation, but will still do destruction.
+//
+template<class T>
+class pool_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template<class Other>
+ struct rebind {
+ typedef pool_allocator<Other> other;
+ };
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pool_allocator() : allocator(GetThreadPoolAllocator()) { }
+ pool_allocator(TPoolAllocator& a) : allocator(a) { }
+ pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
+
+ template<class Other>
+ pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
+
+ pointer allocate(size_type n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+ pointer allocate(size_type n, const void*) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+
+ void deallocate(void*, size_type) { }
+ void deallocate(pointer, size_type) { }
+
+ pointer _Charalloc(size_t n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
+
+ void construct(pointer p, const T& val) { new ((void *)p) T(val); }
+ void destroy(pointer p) { p->T::~T(); }
+
+ bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
+ bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
+
+ size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
+ size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
+
+ void setAllocator(TPoolAllocator* a) { allocator = *a; }
+ TPoolAllocator& getAllocator() const { return allocator; }
+
+protected:
+ pool_allocator& operator=(const pool_allocator&) { return *this; }
+ TPoolAllocator& allocator;
+};
+
+} // end namespace glslang
+
+#endif // _POOLALLOC_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ResourceLimits.h b/src/3rdparty/glslang/glslang/Include/ResourceLimits.h
new file mode 100644
index 0000000..106b21d
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ResourceLimits.h
@@ -0,0 +1,149 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _RESOURCE_LIMITS_INCLUDED_
+#define _RESOURCE_LIMITS_INCLUDED_
+
+struct TLimits {
+ bool nonInductiveForLoops;
+ bool whileLoops;
+ bool doWhileLoops;
+ bool generalUniformIndexing;
+ bool generalAttributeMatrixVectorIndexing;
+ bool generalVaryingIndexing;
+ bool generalSamplerIndexing;
+ bool generalVariableIndexing;
+ bool generalConstantMatrixVectorIndexing;
+};
+
+struct TBuiltInResource {
+ int maxLights;
+ int maxClipPlanes;
+ int maxTextureUnits;
+ int maxTextureCoords;
+ int maxVertexAttribs;
+ int maxVertexUniformComponents;
+ int maxVaryingFloats;
+ int maxVertexTextureImageUnits;
+ int maxCombinedTextureImageUnits;
+ int maxTextureImageUnits;
+ int maxFragmentUniformComponents;
+ int maxDrawBuffers;
+ int maxVertexUniformVectors;
+ int maxVaryingVectors;
+ int maxFragmentUniformVectors;
+ int maxVertexOutputVectors;
+ int maxFragmentInputVectors;
+ int minProgramTexelOffset;
+ int maxProgramTexelOffset;
+ int maxClipDistances;
+ int maxComputeWorkGroupCountX;
+ int maxComputeWorkGroupCountY;
+ int maxComputeWorkGroupCountZ;
+ int maxComputeWorkGroupSizeX;
+ int maxComputeWorkGroupSizeY;
+ int maxComputeWorkGroupSizeZ;
+ int maxComputeUniformComponents;
+ int maxComputeTextureImageUnits;
+ int maxComputeImageUniforms;
+ int maxComputeAtomicCounters;
+ int maxComputeAtomicCounterBuffers;
+ int maxVaryingComponents;
+ int maxVertexOutputComponents;
+ int maxGeometryInputComponents;
+ int maxGeometryOutputComponents;
+ int maxFragmentInputComponents;
+ int maxImageUnits;
+ int maxCombinedImageUnitsAndFragmentOutputs;
+ int maxCombinedShaderOutputResources;
+ int maxImageSamples;
+ int maxVertexImageUniforms;
+ int maxTessControlImageUniforms;
+ int maxTessEvaluationImageUniforms;
+ int maxGeometryImageUniforms;
+ int maxFragmentImageUniforms;
+ int maxCombinedImageUniforms;
+ int maxGeometryTextureImageUnits;
+ int maxGeometryOutputVertices;
+ int maxGeometryTotalOutputComponents;
+ int maxGeometryUniformComponents;
+ int maxGeometryVaryingComponents;
+ int maxTessControlInputComponents;
+ int maxTessControlOutputComponents;
+ int maxTessControlTextureImageUnits;
+ int maxTessControlUniformComponents;
+ int maxTessControlTotalOutputComponents;
+ int maxTessEvaluationInputComponents;
+ int maxTessEvaluationOutputComponents;
+ int maxTessEvaluationTextureImageUnits;
+ int maxTessEvaluationUniformComponents;
+ int maxTessPatchComponents;
+ int maxPatchVertices;
+ int maxTessGenLevel;
+ int maxViewports;
+ int maxVertexAtomicCounters;
+ int maxTessControlAtomicCounters;
+ int maxTessEvaluationAtomicCounters;
+ int maxGeometryAtomicCounters;
+ int maxFragmentAtomicCounters;
+ int maxCombinedAtomicCounters;
+ int maxAtomicCounterBindings;
+ int maxVertexAtomicCounterBuffers;
+ int maxTessControlAtomicCounterBuffers;
+ int maxTessEvaluationAtomicCounterBuffers;
+ int maxGeometryAtomicCounterBuffers;
+ int maxFragmentAtomicCounterBuffers;
+ int maxCombinedAtomicCounterBuffers;
+ int maxAtomicCounterBufferSize;
+ int maxTransformFeedbackBuffers;
+ int maxTransformFeedbackInterleavedComponents;
+ int maxCullDistances;
+ int maxCombinedClipAndCullDistances;
+ int maxSamples;
+ int maxMeshOutputVerticesNV;
+ int maxMeshOutputPrimitivesNV;
+ int maxMeshWorkGroupSizeX_NV;
+ int maxMeshWorkGroupSizeY_NV;
+ int maxMeshWorkGroupSizeZ_NV;
+ int maxTaskWorkGroupSizeX_NV;
+ int maxTaskWorkGroupSizeY_NV;
+ int maxTaskWorkGroupSizeZ_NV;
+ int maxMeshViewCountNV;
+
+ TLimits limits;
+};
+
+#endif // _RESOURCE_LIMITS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ShHandle.h b/src/3rdparty/glslang/glslang/Include/ShHandle.h
new file mode 100644
index 0000000..df07bd8
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ShHandle.h
@@ -0,0 +1,176 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SHHANDLE_INCLUDED_
+#define _SHHANDLE_INCLUDED_
+
+//
+// Machine independent part of the compiler private objects
+// sent as ShHandle to the driver.
+//
+// This should not be included by driver code.
+//
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "../MachineIndependent/Versions.h"
+#include "InfoSink.h"
+
+class TCompiler;
+class TLinker;
+class TUniformMap;
+
+//
+// The base class used to back handles returned to the driver.
+//
+class TShHandleBase {
+public:
+ TShHandleBase() { pool = new glslang::TPoolAllocator; }
+ virtual ~TShHandleBase() { delete pool; }
+ virtual TCompiler* getAsCompiler() { return 0; }
+ virtual TLinker* getAsLinker() { return 0; }
+ virtual TUniformMap* getAsUniformMap() { return 0; }
+ virtual glslang::TPoolAllocator* getPool() const { return pool; }
+private:
+ glslang::TPoolAllocator* pool;
+};
+
+//
+// The base class for the machine dependent linker to derive from
+// for managing where uniforms live.
+//
+class TUniformMap : public TShHandleBase {
+public:
+ TUniformMap() { }
+ virtual ~TUniformMap() { }
+ virtual TUniformMap* getAsUniformMap() { return this; }
+ virtual int getLocation(const char* name) = 0;
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink infoSink;
+};
+
+class TIntermNode;
+
+//
+// The base class for the machine dependent compiler to derive from
+// for managing object code from the compile.
+//
+class TCompiler : public TShHandleBase {
+public:
+ TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
+ virtual ~TCompiler() { }
+ EShLanguage getLanguage() { return language; }
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
+
+ virtual TCompiler* getAsCompiler() { return this; }
+ virtual bool linkable() { return haveValidObjectCode; }
+
+ TInfoSink& infoSink;
+protected:
+ TCompiler& operator=(TCompiler&);
+
+ EShLanguage language;
+ bool haveValidObjectCode;
+};
+
+//
+// Link operations are based on a list of compile results...
+//
+typedef glslang::TVector<TCompiler*> TCompilerList;
+typedef glslang::TVector<TShHandleBase*> THandleList;
+
+//
+// The base class for the machine dependent linker to derive from
+// to manage the resulting executable.
+//
+
+class TLinker : public TShHandleBase {
+public:
+ TLinker(EShExecutable e, TInfoSink& iSink) :
+ infoSink(iSink),
+ executable(e),
+ haveReturnableObjectCode(false),
+ appAttributeBindings(0),
+ fixedAttributeBindings(0),
+ excludedAttributes(0),
+ excludedCount(0),
+ uniformBindings(0) { }
+ virtual TLinker* getAsLinker() { return this; }
+ virtual ~TLinker() { }
+ virtual bool link(TCompilerList&, TUniformMap*) = 0;
+ virtual bool link(THandleList&) { return false; }
+ virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
+ virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
+ virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
+ virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
+ virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
+ virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink& infoSink;
+protected:
+ TLinker& operator=(TLinker&);
+ EShExecutable executable;
+ bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
+
+ const ShBindingTable* appAttributeBindings;
+ const ShBindingTable* fixedAttributeBindings;
+ const int* excludedAttributes;
+ int excludedCount;
+ ShBindingTable* uniformBindings; // created by the linker
+};
+
+//
+// This is the interface between the machine independent code
+// and the machine dependent code.
+//
+// The machine dependent code should derive from the classes
+// above. Then Construct*() and Delete*() will create and
+// destroy the machine dependent objects, which contain the
+// above machine independent information.
+//
+TCompiler* ConstructCompiler(EShLanguage, int);
+
+TShHandleBase* ConstructLinker(EShExecutable, int);
+TShHandleBase* ConstructBindings();
+void DeleteLinker(TShHandleBase*);
+void DeleteBindingList(TShHandleBase* bindingList);
+
+TUniformMap* ConstructUniformMap();
+void DeleteCompiler(TCompiler*);
+
+void DeleteUniformMap(TUniformMap*);
+
+#endif // _SHHANDLE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/Types.h b/src/3rdparty/glslang/glslang/Include/Types.h
new file mode 100644
index 0000000..d0d9b60
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/Types.h
@@ -0,0 +1,2276 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2015-2016 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _TYPES_INCLUDED
+#define _TYPES_INCLUDED
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+#include "../Public/ShaderLang.h"
+#include "arrays.h"
+
+#include <algorithm>
+
+namespace glslang {
+
+const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded
+
+const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed
+inline bool IsAnonymous(const TString& name)
+{
+ return name.compare(0, 5, AnonymousPrefix) == 0;
+}
+
+//
+// Details within a sampler type
+//
+enum TSamplerDim {
+ EsdNone,
+ Esd1D,
+ Esd2D,
+ Esd3D,
+ EsdCube,
+ EsdRect,
+ EsdBuffer,
+ EsdSubpass, // goes only with non-sampled image (image is true)
+ EsdNumDims
+};
+
+struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler
+ TBasicType type : 8; // type returned by sampler
+ TSamplerDim dim : 8;
+ bool arrayed : 1;
+ bool shadow : 1;
+ bool ms : 1;
+ bool image : 1; // image, combined should be false
+ bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler
+ bool sampler : 1; // true means a pure sampler, other fields should be clear()
+ bool external : 1; // GL_OES_EGL_image_external
+ bool yuv : 1; // GL_EXT_YUV_target
+ unsigned int vectorSize : 3; // vector return type size.
+
+ // Some languages support structures as sample results. Storing the whole structure in the
+ // TSampler is too large, so there is an index to a separate table.
+ static const unsigned structReturnIndexBits = 4; // number of index bits to use.
+ static const unsigned structReturnSlots = (1<<structReturnIndexBits)-1; // number of valid values
+ static const unsigned noReturnStruct = structReturnSlots; // value if no return struct type.
+
+ // Index into a language specific table of texture return structures.
+ unsigned int structReturnIndex : structReturnIndexBits;
+
+ // Encapsulate getting members' vector sizes packed into the vectorSize bitfield.
+ unsigned int getVectorSize() const { return vectorSize; }
+
+ bool isImage() const { return image && dim != EsdSubpass; }
+ bool isSubpass() const { return dim == EsdSubpass; }
+ bool isCombined() const { return combined; }
+ bool isPureSampler() const { return sampler; }
+ bool isTexture() const { return !sampler && !image; }
+ bool isShadow() const { return shadow; }
+ bool isArrayed() const { return arrayed; }
+ bool isMultiSample() const { return ms; }
+ bool hasReturnStruct() const { return structReturnIndex != noReturnStruct; }
+
+ void clear()
+ {
+ type = EbtVoid;
+ dim = EsdNone;
+ arrayed = false;
+ shadow = false;
+ ms = false;
+ image = false;
+ combined = false;
+ sampler = false;
+ external = false;
+ yuv = false;
+ structReturnIndex = noReturnStruct;
+
+ // by default, returns a single vec4;
+ vectorSize = 4;
+ }
+
+ // make a combined sampler and texture
+ void set(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ combined = true;
+ }
+
+ // make an image
+ void setImage(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ image = true;
+ }
+
+ // make a texture with no sampler
+ void setTexture(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ }
+
+ // make a subpass input attachment
+ void setSubpass(TBasicType t, bool m = false)
+ {
+ clear();
+ type = t;
+ image = true;
+ dim = EsdSubpass;
+ ms = m;
+ }
+
+ // make a pure sampler, no texture, no image, nothing combined, the 'sampler' keyword
+ void setPureSampler(bool s)
+ {
+ clear();
+ sampler = true;
+ shadow = s;
+ }
+
+ bool operator==(const TSampler& right) const
+ {
+ return type == right.type &&
+ dim == right.dim &&
+ arrayed == right.arrayed &&
+ shadow == right.shadow &&
+ ms == right.ms &&
+ image == right.image &&
+ combined == right.combined &&
+ sampler == right.sampler &&
+ external == right.external &&
+ yuv == right.yuv &&
+ vectorSize == right.vectorSize &&
+ structReturnIndex == right.structReturnIndex;
+ }
+
+ bool operator!=(const TSampler& right) const
+ {
+ return ! operator==(right);
+ }
+
+ TString getString() const
+ {
+ TString s;
+
+ if (sampler) {
+ s.append("sampler");
+ return s;
+ }
+
+ switch (type) {
+ case EbtFloat: break;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: s.append("f16"); break;
+#endif
+ case EbtInt8: s.append("i8"); break;
+ case EbtUint16: s.append("u8"); break;
+ case EbtInt16: s.append("i16"); break;
+ case EbtUint8: s.append("u16"); break;
+ case EbtInt: s.append("i"); break;
+ case EbtUint: s.append("u"); break;
+ case EbtInt64: s.append("i64"); break;
+ case EbtUint64: s.append("u64"); break;
+ default: break; // some compilers want this
+ }
+ if (image) {
+ if (dim == EsdSubpass)
+ s.append("subpass");
+ else
+ s.append("image");
+ } else if (combined) {
+ s.append("sampler");
+ } else {
+ s.append("texture");
+ }
+ if (external) {
+ s.append("ExternalOES");
+ return s;
+ }
+ if (yuv) {
+ return "__" + s + "External2DY2YEXT";
+ }
+ switch (dim) {
+ case Esd1D: s.append("1D"); break;
+ case Esd2D: s.append("2D"); break;
+ case Esd3D: s.append("3D"); break;
+ case EsdCube: s.append("Cube"); break;
+ case EsdRect: s.append("2DRect"); break;
+ case EsdBuffer: s.append("Buffer"); break;
+ case EsdSubpass: s.append("Input"); break;
+ default: break; // some compilers want this
+ }
+ if (ms)
+ s.append("MS");
+ if (arrayed)
+ s.append("Array");
+ if (shadow)
+ s.append("Shadow");
+
+ return s;
+ }
+};
+
+//
+// Need to have association of line numbers to types in a list for building structs.
+//
+class TType;
+struct TTypeLoc {
+ TType* type;
+ TSourceLoc loc;
+};
+typedef TVector<TTypeLoc> TTypeList;
+
+typedef TVector<TString*> TIdentifierList;
+
+//
+// Following are a series of helper enums for managing layouts and qualifiers,
+// used for TPublicType, TType, others.
+//
+
+enum TLayoutPacking {
+ ElpNone,
+ ElpShared, // default, but different than saying nothing
+ ElpStd140,
+ ElpStd430,
+ ElpPacked,
+ ElpScalar,
+ ElpCount // If expanding, see bitfield width below
+};
+
+enum TLayoutMatrix {
+ ElmNone,
+ ElmRowMajor,
+ ElmColumnMajor, // default, but different than saying nothing
+ ElmCount // If expanding, see bitfield width below
+};
+
+// Union of geometry shader and tessellation shader geometry types.
+// They don't go into TType, but rather have current state per shader or
+// active parser type (TPublicType).
+enum TLayoutGeometry {
+ ElgNone,
+ ElgPoints,
+ ElgLines,
+ ElgLinesAdjacency,
+ ElgLineStrip,
+ ElgTriangles,
+ ElgTrianglesAdjacency,
+ ElgTriangleStrip,
+ ElgQuads,
+ ElgIsolines,
+};
+
+enum TVertexSpacing {
+ EvsNone,
+ EvsEqual,
+ EvsFractionalEven,
+ EvsFractionalOdd
+};
+
+enum TVertexOrder {
+ EvoNone,
+ EvoCw,
+ EvoCcw
+};
+
+// Note: order matters, as type of format is done by comparison.
+enum TLayoutFormat {
+ ElfNone,
+
+ // Float image
+ ElfRgba32f,
+ ElfRgba16f,
+ ElfR32f,
+ ElfRgba8,
+ ElfRgba8Snorm,
+
+ ElfEsFloatGuard, // to help with comparisons
+
+ ElfRg32f,
+ ElfRg16f,
+ ElfR11fG11fB10f,
+ ElfR16f,
+ ElfRgba16,
+ ElfRgb10A2,
+ ElfRg16,
+ ElfRg8,
+ ElfR16,
+ ElfR8,
+ ElfRgba16Snorm,
+ ElfRg16Snorm,
+ ElfRg8Snorm,
+ ElfR16Snorm,
+ ElfR8Snorm,
+
+ ElfFloatGuard, // to help with comparisons
+
+ // Int image
+ ElfRgba32i,
+ ElfRgba16i,
+ ElfRgba8i,
+ ElfR32i,
+
+ ElfEsIntGuard, // to help with comparisons
+
+ ElfRg32i,
+ ElfRg16i,
+ ElfRg8i,
+ ElfR16i,
+ ElfR8i,
+
+ ElfIntGuard, // to help with comparisons
+
+ // Uint image
+ ElfRgba32ui,
+ ElfRgba16ui,
+ ElfRgba8ui,
+ ElfR32ui,
+
+ ElfEsUintGuard, // to help with comparisons
+
+ ElfRg32ui,
+ ElfRg16ui,
+ ElfRgb10a2ui,
+ ElfRg8ui,
+ ElfR16ui,
+ ElfR8ui,
+
+ ElfCount
+};
+
+enum TLayoutDepth {
+ EldNone,
+ EldAny,
+ EldGreater,
+ EldLess,
+ EldUnchanged,
+
+ EldCount
+};
+
+enum TBlendEquationShift {
+ // No 'EBlendNone':
+ // These are used as bit-shift amounts. A mask of such shifts will have type 'int',
+ // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set.
+ EBlendMultiply,
+ EBlendScreen,
+ EBlendOverlay,
+ EBlendDarken,
+ EBlendLighten,
+ EBlendColordodge,
+ EBlendColorburn,
+ EBlendHardlight,
+ EBlendSoftlight,
+ EBlendDifference,
+ EBlendExclusion,
+ EBlendHslHue,
+ EBlendHslSaturation,
+ EBlendHslColor,
+ EBlendHslLuminosity,
+ EBlendAllEquations,
+
+ EBlendCount
+};
+
+class TQualifier {
+public:
+ static const int layoutNotSet = -1;
+
+ void clear()
+ {
+ precision = EpqNone;
+ invariant = false;
+ noContraction = false;
+ makeTemporary();
+ declaredBuiltIn = EbvNone;
+ }
+
+ // drop qualifiers that don't belong in a temporary variable
+ void makeTemporary()
+ {
+ semanticName = nullptr;
+ storage = EvqTemporary;
+ builtIn = EbvNone;
+ clearInterstage();
+ clearMemory();
+ specConstant = false;
+ nonUniform = false;
+ clearLayout();
+ }
+
+ void clearInterstage()
+ {
+ clearInterpolation();
+ patch = false;
+ sample = false;
+ }
+
+ void clearInterpolation()
+ {
+ centroid = false;
+ smooth = false;
+ flat = false;
+ nopersp = false;
+#ifdef AMD_EXTENSIONS
+ explicitInterp = false;
+#endif
+#ifdef NV_EXTENSIONS
+ pervertexNV = false;
+ perPrimitiveNV = false;
+ perViewNV = false;
+ perTaskNV = false;
+#endif
+ }
+
+ void clearMemory()
+ {
+ coherent = false;
+ devicecoherent = false;
+ queuefamilycoherent = false;
+ workgroupcoherent = false;
+ subgroupcoherent = false;
+ nonprivate = false;
+ volatil = false;
+ restrict = false;
+ readonly = false;
+ writeonly = false;
+ }
+
+ // Drop just the storage qualification, which perhaps should
+ // never be done, as it is fundamentally inconsistent, but need to
+ // explore what downstream consumers need.
+ // E.g., in a dereference, it is an inconsistency between:
+ // A) partially dereferenced resource is still in the storage class it started in
+ // B) partially dereferenced resource is a new temporary object
+ // If A, then nothing should change, if B, then everything should change, but this is half way.
+ void makePartialTemporary()
+ {
+ storage = EvqTemporary;
+ specConstant = false;
+ nonUniform = false;
+ }
+
+ const char* semanticName;
+ TStorageQualifier storage : 6;
+ TBuiltInVariable builtIn : 8;
+ TBuiltInVariable declaredBuiltIn : 8;
+ TPrecisionQualifier precision : 3;
+ bool invariant : 1; // require canonical treatment for cross-shader invariance
+ bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects
+ bool centroid : 1;
+ bool smooth : 1;
+ bool flat : 1;
+ bool nopersp : 1;
+#ifdef AMD_EXTENSIONS
+ bool explicitInterp : 1;
+#endif
+#ifdef NV_EXTENSIONS
+ bool pervertexNV : 1;
+ bool perPrimitiveNV : 1;
+ bool perViewNV : 1;
+ bool perTaskNV : 1;
+#endif
+ bool patch : 1;
+ bool sample : 1;
+ bool coherent : 1;
+ bool devicecoherent : 1;
+ bool queuefamilycoherent : 1;
+ bool workgroupcoherent : 1;
+ bool subgroupcoherent : 1;
+ bool nonprivate : 1;
+ bool volatil : 1;
+ bool restrict : 1;
+ bool readonly : 1;
+ bool writeonly : 1;
+ bool specConstant : 1; // having a constant_id is not sufficient: expressions have no id, but are still specConstant
+ bool nonUniform : 1;
+
+ bool isMemory() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
+ }
+ bool isMemoryQualifierImageAndSSBOOnly() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
+ }
+ bool bufferReferenceNeedsVulkanMemoryModel() const
+ {
+ // include qualifiers that map to load/store availability/visibility/nonprivate memory access operands
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate;
+ }
+
+ bool isInterpolation() const
+ {
+#ifdef AMD_EXTENSIONS
+ return flat || smooth || nopersp || explicitInterp;
+#else
+ return flat || smooth || nopersp;
+#endif
+ }
+
+#ifdef AMD_EXTENSIONS
+ bool isExplicitInterpolation() const
+ {
+ return explicitInterp;
+ }
+#endif
+
+ bool isAuxiliary() const
+ {
+#ifdef NV_EXTENSIONS
+ return centroid || patch || sample || pervertexNV;
+#else
+ return centroid || patch || sample;
+#endif
+ }
+
+ bool isPipeInput() const
+ {
+ switch (storage) {
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPipeOutput() const
+ {
+ switch (storage) {
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamInput() const
+ {
+ switch (storage) {
+ case EvqIn:
+ case EvqInOut:
+ case EvqConstReadOnly:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamOutput() const
+ {
+ switch (storage) {
+ case EvqOut:
+ case EvqInOut:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isUniformOrBuffer() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPerPrimitive() const
+ {
+#ifdef NV_EXTENSIONS
+ return perPrimitiveNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isPerView() const
+ {
+#ifdef NV_EXTENSIONS
+ return perViewNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isTaskMemory() const
+ {
+#ifdef NV_EXTENSIONS
+ return perTaskNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isIo() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // non-built-in symbols that might link between compilation units
+ bool isLinkable() const
+ {
+ switch (storage) {
+ case EvqGlobal:
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqShared:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // True if this type of IO is supposed to be arrayed with extra level for per-vertex data
+ bool isArrayedIo(EShLanguage language) const
+ {
+ switch (language) {
+ case EShLangGeometry:
+ return isPipeInput();
+ case EShLangTessControl:
+ return ! patch && (isPipeInput() || isPipeOutput());
+ case EShLangTessEvaluation:
+ return ! patch && isPipeInput();
+#ifdef NV_EXTENSIONS
+ case EShLangFragment:
+ return pervertexNV && isPipeInput();
+ case EShLangMeshNV:
+ return ! perTaskNV && isPipeOutput();
+#endif
+
+ default:
+ return false;
+ }
+ }
+
+ // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield
+ void clearLayout() // all layout
+ {
+ clearUniformLayout();
+
+ layoutPushConstant = false;
+ layoutBufferReference = false;
+#ifdef NV_EXTENSIONS
+ layoutPassthrough = false;
+ layoutViewportRelative = false;
+ // -2048 as the default value indicating layoutSecondaryViewportRelative is not set
+ layoutSecondaryViewportRelativeOffset = -2048;
+ layoutShaderRecordNV = false;
+#endif
+
+ layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd;
+
+ clearInterstageLayout();
+
+ layoutSpecConstantId = layoutSpecConstantIdEnd;
+
+ layoutFormat = ElfNone;
+ }
+ void clearInterstageLayout()
+ {
+ layoutLocation = layoutLocationEnd;
+ layoutComponent = layoutComponentEnd;
+ layoutIndex = layoutIndexEnd;
+ clearStreamLayout();
+ clearXfbLayout();
+ }
+ void clearStreamLayout()
+ {
+ layoutStream = layoutStreamEnd;
+ }
+ void clearXfbLayout()
+ {
+ layoutXfbBuffer = layoutXfbBufferEnd;
+ layoutXfbStride = layoutXfbStrideEnd;
+ layoutXfbOffset = layoutXfbOffsetEnd;
+ }
+
+ bool hasNonXfbLayout() const
+ {
+ return hasUniformLayout() ||
+ hasAnyLocation() ||
+ hasStream() ||
+ hasFormat() ||
+#ifdef NV_EXTENSIONS
+ layoutShaderRecordNV ||
+#endif
+ layoutPushConstant ||
+ layoutBufferReference;
+ }
+ bool hasLayout() const
+ {
+ return hasNonXfbLayout() ||
+ hasXfb();
+ }
+ TLayoutMatrix layoutMatrix : 3;
+ TLayoutPacking layoutPacking : 4;
+ int layoutOffset;
+ int layoutAlign;
+
+ unsigned int layoutLocation : 12;
+ static const unsigned int layoutLocationEnd = 0xFFF;
+
+ unsigned int layoutComponent : 3;
+ static const unsigned int layoutComponentEnd = 4;
+
+ unsigned int layoutSet : 7;
+ static const unsigned int layoutSetEnd = 0x3F;
+
+ unsigned int layoutBinding : 16;
+ static const unsigned int layoutBindingEnd = 0xFFFF;
+
+ unsigned int layoutIndex : 8;
+ static const unsigned int layoutIndexEnd = 0xFF;
+
+ unsigned int layoutStream : 8;
+ static const unsigned int layoutStreamEnd = 0xFF;
+
+ unsigned int layoutXfbBuffer : 4;
+ static const unsigned int layoutXfbBufferEnd = 0xF;
+
+ unsigned int layoutXfbStride : 14;
+ static const unsigned int layoutXfbStrideEnd = 0x3FFF;
+
+ unsigned int layoutXfbOffset : 13;
+ static const unsigned int layoutXfbOffsetEnd = 0x1FFF;
+
+ unsigned int layoutAttachment : 8; // for input_attachment_index
+ static const unsigned int layoutAttachmentEnd = 0XFF;
+
+ unsigned int layoutSpecConstantId : 11;
+ static const unsigned int layoutSpecConstantIdEnd = 0x7FF;
+
+ // stored as log2 of the actual alignment value
+ unsigned int layoutBufferReferenceAlign : 6;
+ static const unsigned int layoutBufferReferenceAlignEnd = 0x3F;
+
+ TLayoutFormat layoutFormat : 8;
+
+ bool layoutPushConstant;
+ bool layoutBufferReference;
+
+#ifdef NV_EXTENSIONS
+ bool layoutPassthrough;
+ bool layoutViewportRelative;
+ int layoutSecondaryViewportRelativeOffset;
+ bool layoutShaderRecordNV;
+#endif
+
+ bool hasUniformLayout() const
+ {
+ return hasMatrix() ||
+ hasPacking() ||
+ hasOffset() ||
+ hasBinding() ||
+ hasSet() ||
+ hasAlign();
+ }
+ void clearUniformLayout() // only uniform specific
+ {
+ layoutMatrix = ElmNone;
+ layoutPacking = ElpNone;
+ layoutOffset = layoutNotSet;
+ layoutAlign = layoutNotSet;
+
+ layoutSet = layoutSetEnd;
+ layoutBinding = layoutBindingEnd;
+ layoutAttachment = layoutAttachmentEnd;
+ }
+
+ bool hasMatrix() const
+ {
+ return layoutMatrix != ElmNone;
+ }
+ bool hasPacking() const
+ {
+ return layoutPacking != ElpNone;
+ }
+ bool hasOffset() const
+ {
+ return layoutOffset != layoutNotSet;
+ }
+ bool hasAlign() const
+ {
+ return layoutAlign != layoutNotSet;
+ }
+ bool hasAnyLocation() const
+ {
+ return hasLocation() ||
+ hasComponent() ||
+ hasIndex();
+ }
+ bool hasLocation() const
+ {
+ return layoutLocation != layoutLocationEnd;
+ }
+ bool hasComponent() const
+ {
+ return layoutComponent != layoutComponentEnd;
+ }
+ bool hasIndex() const
+ {
+ return layoutIndex != layoutIndexEnd;
+ }
+ bool hasSet() const
+ {
+ return layoutSet != layoutSetEnd;
+ }
+ bool hasBinding() const
+ {
+ return layoutBinding != layoutBindingEnd;
+ }
+ bool hasStream() const
+ {
+ return layoutStream != layoutStreamEnd;
+ }
+ bool hasFormat() const
+ {
+ return layoutFormat != ElfNone;
+ }
+ bool hasXfb() const
+ {
+ return hasXfbBuffer() ||
+ hasXfbStride() ||
+ hasXfbOffset();
+ }
+ bool hasXfbBuffer() const
+ {
+ return layoutXfbBuffer != layoutXfbBufferEnd;
+ }
+ bool hasXfbStride() const
+ {
+ return layoutXfbStride != layoutXfbStrideEnd;
+ }
+ bool hasXfbOffset() const
+ {
+ return layoutXfbOffset != layoutXfbOffsetEnd;
+ }
+ bool hasAttachment() const
+ {
+ return layoutAttachment != layoutAttachmentEnd;
+ }
+ bool hasSpecConstantId() const
+ {
+ // Not the same thing as being a specialization constant, this
+ // is just whether or not it was declared with an ID.
+ return layoutSpecConstantId != layoutSpecConstantIdEnd;
+ }
+ bool hasBufferReferenceAlign() const
+ {
+ return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd;
+ }
+ bool isSpecConstant() const
+ {
+ // True if type is a specialization constant, whether or not it
+ // had a specialization-constant ID, and false if it is not a
+ // true front-end constant.
+ return specConstant;
+ }
+ bool isNonUniform() const
+ {
+ return nonUniform;
+ }
+ bool isFrontEndConstant() const
+ {
+ // True if the front-end knows the final constant value.
+ // This allows front-end constant folding.
+ return storage == EvqConst && ! specConstant;
+ }
+ bool isConstant() const
+ {
+ // True if is either kind of constant; specialization or regular.
+ return isFrontEndConstant() || isSpecConstant();
+ }
+ void makeSpecConstant()
+ {
+ storage = EvqConst;
+ specConstant = true;
+ }
+ static const char* getLayoutPackingString(TLayoutPacking packing)
+ {
+ switch (packing) {
+ case ElpPacked: return "packed";
+ case ElpShared: return "shared";
+ case ElpStd140: return "std140";
+ case ElpStd430: return "std430";
+ case ElpScalar: return "scalar";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutMatrixString(TLayoutMatrix m)
+ {
+ switch (m) {
+ case ElmColumnMajor: return "column_major";
+ case ElmRowMajor: return "row_major";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutFormatString(TLayoutFormat f)
+ {
+ switch (f) {
+ case ElfRgba32f: return "rgba32f";
+ case ElfRgba16f: return "rgba16f";
+ case ElfRg32f: return "rg32f";
+ case ElfRg16f: return "rg16f";
+ case ElfR11fG11fB10f: return "r11f_g11f_b10f";
+ case ElfR32f: return "r32f";
+ case ElfR16f: return "r16f";
+ case ElfRgba16: return "rgba16";
+ case ElfRgb10A2: return "rgb10_a2";
+ case ElfRgba8: return "rgba8";
+ case ElfRg16: return "rg16";
+ case ElfRg8: return "rg8";
+ case ElfR16: return "r16";
+ case ElfR8: return "r8";
+ case ElfRgba16Snorm: return "rgba16_snorm";
+ case ElfRgba8Snorm: return "rgba8_snorm";
+ case ElfRg16Snorm: return "rg16_snorm";
+ case ElfRg8Snorm: return "rg8_snorm";
+ case ElfR16Snorm: return "r16_snorm";
+ case ElfR8Snorm: return "r8_snorm";
+
+ case ElfRgba32i: return "rgba32i";
+ case ElfRgba16i: return "rgba16i";
+ case ElfRgba8i: return "rgba8i";
+ case ElfRg32i: return "rg32i";
+ case ElfRg16i: return "rg16i";
+ case ElfRg8i: return "rg8i";
+ case ElfR32i: return "r32i";
+ case ElfR16i: return "r16i";
+ case ElfR8i: return "r8i";
+
+ case ElfRgba32ui: return "rgba32ui";
+ case ElfRgba16ui: return "rgba16ui";
+ case ElfRgba8ui: return "rgba8ui";
+ case ElfRg32ui: return "rg32ui";
+ case ElfRg16ui: return "rg16ui";
+ case ElfRgb10a2ui: return "rgb10_a2ui";
+ case ElfRg8ui: return "rg8ui";
+ case ElfR32ui: return "r32ui";
+ case ElfR16ui: return "r16ui";
+ case ElfR8ui: return "r8ui";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutDepthString(TLayoutDepth d)
+ {
+ switch (d) {
+ case EldAny: return "depth_any";
+ case EldGreater: return "depth_greater";
+ case EldLess: return "depth_less";
+ case EldUnchanged: return "depth_unchanged";
+ default: return "none";
+ }
+ }
+ static const char* getBlendEquationString(TBlendEquationShift e)
+ {
+ switch (e) {
+ case EBlendMultiply: return "blend_support_multiply";
+ case EBlendScreen: return "blend_support_screen";
+ case EBlendOverlay: return "blend_support_overlay";
+ case EBlendDarken: return "blend_support_darken";
+ case EBlendLighten: return "blend_support_lighten";
+ case EBlendColordodge: return "blend_support_colordodge";
+ case EBlendColorburn: return "blend_support_colorburn";
+ case EBlendHardlight: return "blend_support_hardlight";
+ case EBlendSoftlight: return "blend_support_softlight";
+ case EBlendDifference: return "blend_support_difference";
+ case EBlendExclusion: return "blend_support_exclusion";
+ case EBlendHslHue: return "blend_support_hsl_hue";
+ case EBlendHslSaturation: return "blend_support_hsl_saturation";
+ case EBlendHslColor: return "blend_support_hsl_color";
+ case EBlendHslLuminosity: return "blend_support_hsl_luminosity";
+ case EBlendAllEquations: return "blend_support_all_equations";
+ default: return "unknown";
+ }
+ }
+ static const char* getGeometryString(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return "points";
+ case ElgLines: return "lines";
+ case ElgLinesAdjacency: return "lines_adjacency";
+ case ElgLineStrip: return "line_strip";
+ case ElgTriangles: return "triangles";
+ case ElgTrianglesAdjacency: return "triangles_adjacency";
+ case ElgTriangleStrip: return "triangle_strip";
+ case ElgQuads: return "quads";
+ case ElgIsolines: return "isolines";
+ default: return "none";
+ }
+ }
+ static const char* getVertexSpacingString(TVertexSpacing spacing)
+ {
+ switch (spacing) {
+ case EvsEqual: return "equal_spacing";
+ case EvsFractionalEven: return "fractional_even_spacing";
+ case EvsFractionalOdd: return "fractional_odd_spacing";
+ default: return "none";
+ }
+ }
+ static const char* getVertexOrderString(TVertexOrder order)
+ {
+ switch (order) {
+ case EvoCw: return "cw";
+ case EvoCcw: return "ccw";
+ default: return "none";
+ }
+ }
+ static int mapGeometryToSize(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return 1;
+ case ElgLines: return 2;
+ case ElgLinesAdjacency: return 4;
+ case ElgTriangles: return 3;
+ case ElgTrianglesAdjacency: return 6;
+ default: return 0;
+ }
+ }
+};
+
+// Qualifiers that don't need to be keep per object. They have shader scope, not object scope.
+// So, they will not be part of TType, TQualifier, etc.
+struct TShaderQualifiers {
+ TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives
+ bool pixelCenterInteger; // fragment shader
+ bool originUpperLeft; // fragment shader
+ int invocations;
+ int vertices; // for tessellation "vertices", geometry & mesh "max_vertices"
+ TVertexSpacing spacing;
+ TVertexOrder order;
+ bool pointMode;
+ int localSize[3]; // compute shader
+ int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize
+ bool earlyFragmentTests; // fragment input
+ bool postDepthCoverage; // fragment input
+ TLayoutDepth layoutDepth;
+ bool blendEquation; // true if any blend equation was specified
+ int numViews; // multiview extenstions
+
+#ifdef NV_EXTENSIONS
+ bool layoutOverrideCoverage; // true if layout override_coverage set
+ bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set
+ bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set
+ int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set
+#endif
+
+ void init()
+ {
+ geometry = ElgNone;
+ originUpperLeft = false;
+ pixelCenterInteger = false;
+ invocations = TQualifier::layoutNotSet;
+ vertices = TQualifier::layoutNotSet;
+ spacing = EvsNone;
+ order = EvoNone;
+ pointMode = false;
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ earlyFragmentTests = false;
+ postDepthCoverage = false;
+ layoutDepth = EldNone;
+ blendEquation = false;
+ numViews = TQualifier::layoutNotSet;
+#ifdef NV_EXTENSIONS
+ layoutOverrideCoverage = false;
+ layoutDerivativeGroupQuads = false;
+ layoutDerivativeGroupLinear = false;
+ primitives = TQualifier::layoutNotSet;
+#endif
+ }
+
+ // Merge in characteristics from the 'src' qualifier. They can override when
+ // set, but never erase when not set.
+ void merge(const TShaderQualifiers& src)
+ {
+ if (src.geometry != ElgNone)
+ geometry = src.geometry;
+ if (src.pixelCenterInteger)
+ pixelCenterInteger = src.pixelCenterInteger;
+ if (src.originUpperLeft)
+ originUpperLeft = src.originUpperLeft;
+ if (src.invocations != TQualifier::layoutNotSet)
+ invocations = src.invocations;
+ if (src.vertices != TQualifier::layoutNotSet)
+ vertices = src.vertices;
+ if (src.spacing != EvsNone)
+ spacing = src.spacing;
+ if (src.order != EvoNone)
+ order = src.order;
+ if (src.pointMode)
+ pointMode = true;
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSize[i] > 1)
+ localSize[i] = src.localSize[i];
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = src.localSizeSpecId[i];
+ }
+ if (src.earlyFragmentTests)
+ earlyFragmentTests = true;
+ if (src.postDepthCoverage)
+ postDepthCoverage = true;
+ if (src.layoutDepth)
+ layoutDepth = src.layoutDepth;
+ if (src.blendEquation)
+ blendEquation = src.blendEquation;
+ if (src.numViews != TQualifier::layoutNotSet)
+ numViews = src.numViews;
+#ifdef NV_EXTENSIONS
+ if (src.layoutOverrideCoverage)
+ layoutOverrideCoverage = src.layoutOverrideCoverage;
+ if (src.layoutDerivativeGroupQuads)
+ layoutDerivativeGroupQuads = src.layoutDerivativeGroupQuads;
+ if (src.layoutDerivativeGroupLinear)
+ layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear;
+ if (src.primitives != TQualifier::layoutNotSet)
+ primitives = src.primitives;
+#endif
+ }
+};
+
+//
+// TPublicType is just temporarily used while parsing and not quite the same
+// information kept per node in TType. Due to the bison stack, it can't have
+// types that it thinks have non-trivial constructors. It should
+// just be used while recognizing the grammar, not anything else.
+// Once enough is known about the situation, the proper information
+// moved into a TType, or the parse context, etc.
+//
+class TPublicType {
+public:
+ TBasicType basicType;
+ TSampler sampler;
+ TQualifier qualifier;
+ TShaderQualifiers shaderQualifiers;
+ int vectorSize : 4;
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool coopmat : 1;
+ TArraySizes* arraySizes;
+ const TType* userDef;
+ TSourceLoc loc;
+ TArraySizes* typeParameters;
+
+ void initType(const TSourceLoc& l)
+ {
+ basicType = EbtVoid;
+ vectorSize = 1;
+ matrixRows = 0;
+ matrixCols = 0;
+ arraySizes = nullptr;
+ userDef = nullptr;
+ loc = l;
+ typeParameters = nullptr;
+ coopmat = false;
+ }
+
+ void initQualifiers(bool global = false)
+ {
+ qualifier.clear();
+ if (global)
+ qualifier.storage = EvqGlobal;
+ }
+
+ void init(const TSourceLoc& l, bool global = false)
+ {
+ initType(l);
+ sampler.clear();
+ initQualifiers(global);
+ shaderQualifiers.init();
+ }
+
+ void setVector(int s)
+ {
+ matrixRows = 0;
+ matrixCols = 0;
+ vectorSize = s;
+ }
+
+ void setMatrix(int c, int r)
+ {
+ matrixRows = r;
+ matrixCols = c;
+ vectorSize = 0;
+ }
+
+ bool isScalar() const
+ {
+ return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr;
+ }
+
+ // "Image" is a superset of "Subpass"
+ bool isImage() const { return basicType == EbtSampler && sampler.isImage(); }
+ bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); }
+};
+
+//
+// Base class for things that have a type.
+//
+class TType {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ // for "empty" type (no args) or simple scalar/vector/matrix
+ explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for explicit precision qualifier
+ TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ qualifier.precision = p;
+ assert(p >= EpqNone && p <= EpqHigh);
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for turning a TPublicType into a TType, using a shallow copy
+ explicit TType(const TPublicType& p) :
+ basicType(p.basicType),
+ vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat),
+ arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters)
+ {
+ if (basicType == EbtSampler)
+ sampler = p.sampler;
+ else
+ sampler.clear();
+ qualifier = p.qualifier;
+ if (p.userDef) {
+ if (p.userDef->basicType == EbtReference) {
+ basicType = EbtReference;
+ referentType = p.userDef->referentType;
+ } else {
+ structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues
+ }
+ typeName = NewPoolTString(p.userDef->getTypeName().c_str());
+ }
+ if (p.coopmat && p.basicType == EbtFloat &&
+ p.typeParameters && p.typeParameters->getNumDims() > 0 &&
+ p.typeParameters->getDimSize(0) == 16) {
+ basicType = EbtFloat16;
+ qualifier.precision = EpqNone;
+ }
+ }
+ // for construction of sampler types
+ TType(const TSampler& sampler, TStorageQualifier q = EvqUniform, TArraySizes* as = nullptr) :
+ basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr),
+ sampler(sampler), typeParameters(nullptr)
+ {
+ qualifier.clear();
+ qualifier.storage = q;
+ }
+ // to efficiently make a dereferenced type
+ // without ever duplicating the outer structure that will be thrown away
+ // and using only shallow copy
+ TType(const TType& type, int derefIndex, bool rowMajor = false)
+ {
+ if (type.isArray()) {
+ shallowCopy(type);
+ if (type.getArraySizes()->getNumDims() == 1) {
+ arraySizes = nullptr;
+ } else {
+ // want our own copy of the array, so we can edit it
+ arraySizes = new TArraySizes;
+ arraySizes->copyDereferenced(*type.arraySizes);
+ }
+ } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) {
+ // do a structure dereference
+ const TTypeList& memberList = *type.getStruct();
+ shallowCopy(*memberList[derefIndex].type);
+ return;
+ } else {
+ // do a vector/matrix dereference
+ shallowCopy(type);
+ if (matrixCols > 0) {
+ // dereference from matrix to vector
+ if (rowMajor)
+ vectorSize = matrixCols;
+ else
+ vectorSize = matrixRows;
+ matrixCols = 0;
+ matrixRows = 0;
+ if (vectorSize == 1)
+ vector1 = true;
+ } else if (isVector()) {
+ // dereference from vector to scalar
+ vectorSize = 1;
+ vector1 = false;
+ } else if (isCoopMat()) {
+ coopmat = false;
+ typeParameters = nullptr;
+ }
+ }
+ }
+ // for making structures, ...
+ TType(TTypeList* userDef, const TString& n) :
+ basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // For interface blocks
+ TType(TTypeList* userDef, const TString& n, const TQualifier& q) :
+ basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // for block reference (first parameter must be EbtReference)
+ explicit TType(TBasicType t, const TType &p, const TString& n) :
+ basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ assert(t == EbtReference);
+ typeName = NewPoolTString(n.c_str());
+ qualifier.clear();
+ qualifier.storage = p.qualifier.storage;
+ referentType = p.clone();
+ }
+ virtual ~TType() {}
+
+ // Not for use across pool pops; it will cause multiple instances of TType to point to the same information.
+ // This only works if that information (like a structure's list of types) does not change and
+ // the instances are sharing the same pool.
+ void shallowCopy(const TType& copyOf)
+ {
+ basicType = copyOf.basicType;
+ sampler = copyOf.sampler;
+ qualifier = copyOf.qualifier;
+ vectorSize = copyOf.vectorSize;
+ matrixCols = copyOf.matrixCols;
+ matrixRows = copyOf.matrixRows;
+ vector1 = copyOf.vector1;
+ arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents
+ fieldName = copyOf.fieldName;
+ typeName = copyOf.typeName;
+ if (isStruct()) {
+ structure = copyOf.structure;
+ } else {
+ referentType = copyOf.referentType;
+ }
+ typeParameters = copyOf.typeParameters;
+ coopmat = copyOf.coopmat;
+ }
+
+ // Make complete copy of the whole type graph rooted at 'copyOf'.
+ void deepCopy(const TType& copyOf)
+ {
+ TMap<TTypeList*,TTypeList*> copied; // to enable copying a type graph as a graph, not a tree
+ deepCopy(copyOf, copied);
+ }
+
+ // Recursively make temporary
+ void makeTemporary()
+ {
+ getQualifier().makeTemporary();
+
+ if (isStruct())
+ for (unsigned int i = 0; i < structure->size(); ++i)
+ (*structure)[i].type->makeTemporary();
+ }
+
+ TType* clone() const
+ {
+ TType *newType = new TType();
+ newType->deepCopy(*this);
+
+ return newType;
+ }
+
+ void makeVector() { vector1 = true; }
+
+ virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; }
+ virtual bool hiddenMember() const { return basicType == EbtVoid; }
+
+ virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); }
+ virtual const TString& getTypeName() const
+ {
+ assert(typeName);
+ return *typeName;
+ }
+
+ virtual const TString& getFieldName() const
+ {
+ assert(fieldName);
+ return *fieldName;
+ }
+
+ virtual TBasicType getBasicType() const { return basicType; }
+ virtual const TSampler& getSampler() const { return sampler; }
+ virtual TSampler& getSampler() { return sampler; }
+
+ virtual TQualifier& getQualifier() { return qualifier; }
+ virtual const TQualifier& getQualifier() const { return qualifier; }
+
+ virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both
+ virtual int getMatrixCols() const { return matrixCols; }
+ virtual int getMatrixRows() const { return matrixRows; }
+ virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); }
+ virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); }
+ virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); }
+ virtual bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; }
+ virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); }
+ virtual const TArraySizes* getArraySizes() const { return arraySizes; }
+ virtual TArraySizes* getArraySizes() { return arraySizes; }
+ virtual TType* getReferentType() const { return referentType; }
+ virtual const TArraySizes* getTypeParameters() const { return typeParameters; }
+ virtual TArraySizes* getTypeParameters() { return typeParameters; }
+
+ virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); }
+ virtual bool isScalarOrVec1() const { return isScalar() || vector1; }
+ virtual bool isVector() const { return vectorSize > 1 || vector1; }
+ virtual bool isMatrix() const { return matrixCols ? true : false; }
+ virtual bool isArray() const { return arraySizes != nullptr; }
+ virtual bool isSizedArray() const { return isArray() && arraySizes->isSized(); }
+ virtual bool isUnsizedArray() const { return isArray() && !arraySizes->isSized(); }
+ virtual bool isArrayVariablyIndexed() const { assert(isArray()); return arraySizes->isVariablyIndexed(); }
+ virtual void setArrayVariablyIndexed() { assert(isArray()); arraySizes->setVariablyIndexed(); }
+ virtual void updateImplicitArraySize(int size) { assert(isArray()); arraySizes->updateImplicitSize(size); }
+ virtual bool isStruct() const { return basicType == EbtStruct || basicType == EbtBlock; }
+ virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; }
+ virtual bool isIntegerDomain() const
+ {
+ switch (basicType) {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtAtomicUint:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+ virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint
+#ifdef NV_EXTENSIONS
+ || basicType == EbtAccStructNV
+#endif
+ ; }
+ virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; }
+
+ // "Image" is a superset of "Subpass"
+ virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); }
+ virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); }
+ virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); }
+ virtual bool isParameterized() const { return typeParameters != nullptr; }
+ virtual bool isCoopMat() const { return coopmat; }
+
+ // return true if this type contains any subtype which satisfies the given predicate.
+ template <typename P>
+ bool contains(P predicate) const
+ {
+ if (predicate(this))
+ return true;
+
+ const auto hasa = [predicate](const TTypeLoc& tl) { return tl.type->contains(predicate); };
+
+ return isStruct() && std::any_of(structure->begin(), structure->end(), hasa);
+ }
+
+ // Recursively checks if the type contains the given basic type
+ virtual bool containsBasicType(TBasicType checkType) const
+ {
+ return contains([checkType](const TType* t) { return t->basicType == checkType; } );
+ }
+
+ // Recursively check the structure for any arrays, needed for some error checks
+ virtual bool containsArray() const
+ {
+ return contains([](const TType* t) { return t->isArray(); } );
+ }
+
+ // Check the structure for any structures, needed for some error checks
+ virtual bool containsStructure() const
+ {
+ return contains([this](const TType* t) { return t != this && t->isStruct(); } );
+ }
+
+ // Recursively check the structure for any unsized arrays, needed for triggering a copyUp().
+ virtual bool containsUnsizedArray() const
+ {
+ return contains([](const TType* t) { return t->isUnsizedArray(); } );
+ }
+
+ virtual bool containsOpaque() const
+ {
+ return contains([](const TType* t) { return t->isOpaque(); } );
+ }
+
+ // Recursively checks if the type contains a built-in variable
+ virtual bool containsBuiltIn() const
+ {
+ return contains([](const TType* t) { return t->isBuiltIn(); } );
+ }
+
+ virtual bool containsNonOpaque() const
+ {
+ const auto nonOpaque = [](const TType* t) {
+ switch (t->basicType) {
+ case EbtVoid:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ case EbtReference:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ return contains(nonOpaque);
+ }
+
+ virtual bool containsSpecializationSize() const
+ {
+ return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } );
+ }
+
+ virtual bool contains16BitInt() const
+ {
+ return containsBasicType(EbtInt16) || containsBasicType(EbtUint16);
+ }
+
+ virtual bool contains8BitInt() const
+ {
+ return containsBasicType(EbtInt8) || containsBasicType(EbtUint8);
+ }
+
+ virtual bool containsCoopMat() const
+ {
+ return contains([](const TType* t) { return t->coopmat; } );
+ }
+
+ // Array editing methods. Array descriptors can be shared across
+ // type instances. This allows all uses of the same array
+ // to be updated at once. E.g., all nodes can be explicitly sized
+ // by tracking and correcting one implicit size. Or, all nodes
+ // can get the explicit size on a redeclaration that gives size.
+ //
+ // N.B.: Don't share with the shared symbol tables (symbols are
+ // marked as isReadOnly(). Such symbols with arrays that will be
+ // edited need to copyUp() on first use, so that
+ // A) the edits don't effect the shared symbol table, and
+ // B) the edits are shared across all users.
+ void updateArraySizes(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(arraySizes != nullptr);
+ assert(type.arraySizes != nullptr);
+ *arraySizes = *type.arraySizes;
+ }
+ void copyArraySizes(const TArraySizes& s)
+ {
+ // For setting a fresh new set of array sizes, not yet worrying about sharing.
+ arraySizes = new TArraySizes;
+ *arraySizes = s;
+ }
+ void transferArraySizes(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ arraySizes = s;
+ }
+ void clearArraySizes()
+ {
+ arraySizes = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyArrayInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (arraySizes == nullptr)
+ copyArraySizes(*s);
+ else
+ arraySizes->addInnerSizes(*s);
+ }
+ }
+ void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); }
+
+ // Recursively make the implicit array size the explicit array size.
+ // Expicit arrays are compile-time or link-time sized, never run-time sized.
+ // Sometimes, policy calls for an array to be run-time sized even if it was
+ // never variably indexed: Don't turn a 'skipNonvariablyIndexed' array into
+ // an explicit array.
+ void adoptImplicitArraySizes(bool skipNonvariablyIndexed)
+ {
+ if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed()))
+ changeOuterArraySize(getImplicitArraySize());
+#ifdef NV_EXTENSIONS
+ // For multi-dim per-view arrays, set unsized inner dimension size to 1
+ if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized())
+ arraySizes->clearInnerUnsized();
+#endif
+ if (isStruct() && structure->size() > 0) {
+ int lastMember = (int)structure->size() - 1;
+ for (int i = 0; i < lastMember; ++i)
+ (*structure)[i].type->adoptImplicitArraySizes(false);
+ // implement the "last member of an SSBO" policy
+ (*structure)[lastMember].type->adoptImplicitArraySizes(getQualifier().storage == EvqBuffer);
+ }
+ }
+
+
+ void updateTypeParameters(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(typeParameters != nullptr);
+ assert(type.typeParameters != nullptr);
+ *typeParameters = *type.typeParameters;
+ }
+ void copyTypeParameters(const TArraySizes& s)
+ {
+ // For setting a fresh new set of type parameters, not yet worrying about sharing.
+ typeParameters = new TArraySizes;
+ *typeParameters = s;
+ }
+ void transferTypeParameters(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ typeParameters = s;
+ }
+ void clearTypeParameters()
+ {
+ typeParameters = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyTypeParametersInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (typeParameters == nullptr)
+ copyTypeParameters(*s);
+ else
+ typeParameters->addInnerSizes(*s);
+ }
+ }
+
+
+
+ const char* getBasicString() const
+ {
+ return TType::getBasicString(basicType);
+ }
+
+ static const char* getBasicString(TBasicType t)
+ {
+ switch (t) {
+ case EbtVoid: return "void";
+ case EbtFloat: return "float";
+ case EbtDouble: return "double";
+ case EbtFloat16: return "float16_t";
+ case EbtInt8: return "int8_t";
+ case EbtUint8: return "uint8_t";
+ case EbtInt16: return "int16_t";
+ case EbtUint16: return "uint16_t";
+ case EbtInt: return "int";
+ case EbtUint: return "uint";
+ case EbtInt64: return "int64_t";
+ case EbtUint64: return "uint64_t";
+ case EbtBool: return "bool";
+ case EbtAtomicUint: return "atomic_uint";
+ case EbtSampler: return "sampler/image";
+ case EbtStruct: return "structure";
+ case EbtBlock: return "block";
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV: return "accelerationStructureNV";
+#endif
+ case EbtReference: return "reference";
+ default: return "unknown type";
+ }
+ }
+
+ TString getCompleteString() const
+ {
+ TString typeString;
+
+ const auto appendStr = [&](const char* s) { typeString.append(s); };
+ const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); };
+ const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); };
+
+ if (qualifier.hasLayout()) {
+ // To reduce noise, skip this if the only layout is an xfb_buffer
+ // with no triggering xfb_offset.
+ TQualifier noXfbBuffer = qualifier;
+ noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd;
+ if (noXfbBuffer.hasLayout()) {
+ appendStr("layout(");
+ if (qualifier.hasAnyLocation()) {
+ appendStr(" location=");
+ appendUint(qualifier.layoutLocation);
+ if (qualifier.hasComponent()) {
+ appendStr(" component=");
+ appendUint(qualifier.layoutComponent);
+ }
+ if (qualifier.hasIndex()) {
+ appendStr(" index=");
+ appendUint(qualifier.layoutIndex);
+ }
+ }
+ if (qualifier.hasSet()) {
+ appendStr(" set=");
+ appendUint(qualifier.layoutSet);
+ }
+ if (qualifier.hasBinding()) {
+ appendStr(" binding=");
+ appendUint(qualifier.layoutBinding);
+ }
+ if (qualifier.hasStream()) {
+ appendStr(" stream=");
+ appendUint(qualifier.layoutStream);
+ }
+ if (qualifier.hasMatrix()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutMatrixString(qualifier.layoutMatrix));
+ }
+ if (qualifier.hasPacking()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutPackingString(qualifier.layoutPacking));
+ }
+ if (qualifier.hasOffset()) {
+ appendStr(" offset=");
+ appendInt(qualifier.layoutOffset);
+ }
+ if (qualifier.hasAlign()) {
+ appendStr(" align=");
+ appendInt(qualifier.layoutAlign);
+ }
+ if (qualifier.hasFormat()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutFormatString(qualifier.layoutFormat));
+ }
+ if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset()) {
+ appendStr(" xfb_buffer=");
+ appendUint(qualifier.layoutXfbBuffer);
+ }
+ if (qualifier.hasXfbOffset()) {
+ appendStr(" xfb_offset=");
+ appendUint(qualifier.layoutXfbOffset);
+ }
+ if (qualifier.hasXfbStride()) {
+ appendStr(" xfb_stride=");
+ appendUint(qualifier.layoutXfbStride);
+ }
+ if (qualifier.hasAttachment()) {
+ appendStr(" input_attachment_index=");
+ appendUint(qualifier.layoutAttachment);
+ }
+ if (qualifier.hasSpecConstantId()) {
+ appendStr(" constant_id=");
+ appendUint(qualifier.layoutSpecConstantId);
+ }
+ if (qualifier.layoutPushConstant)
+ appendStr(" push_constant");
+ if (qualifier.layoutBufferReference)
+ appendStr(" buffer_reference");
+ if (qualifier.hasBufferReferenceAlign()) {
+ appendStr(" buffer_reference_align=");
+ appendUint(1u << qualifier.layoutBufferReferenceAlign);
+ }
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutPassthrough)
+ appendStr(" passthrough");
+ if (qualifier.layoutViewportRelative)
+ appendStr(" layoutViewportRelative");
+ if (qualifier.layoutSecondaryViewportRelativeOffset != -2048) {
+ appendStr(" layoutSecondaryViewportRelativeOffset=");
+ appendInt(qualifier.layoutSecondaryViewportRelativeOffset);
+ }
+ if (qualifier.layoutShaderRecordNV)
+ appendStr(" shaderRecordNV");
+#endif
+
+ appendStr(")");
+ }
+ }
+
+ if (qualifier.invariant)
+ appendStr(" invariant");
+ if (qualifier.noContraction)
+ appendStr(" noContraction");
+ if (qualifier.centroid)
+ appendStr(" centroid");
+ if (qualifier.smooth)
+ appendStr(" smooth");
+ if (qualifier.flat)
+ appendStr(" flat");
+ if (qualifier.nopersp)
+ appendStr(" noperspective");
+#ifdef AMD_EXTENSIONS
+ if (qualifier.explicitInterp)
+ appendStr(" __explicitInterpAMD");
+#endif
+#ifdef NV_EXTENSIONS
+ if (qualifier.pervertexNV)
+ appendStr(" pervertexNV");
+ if (qualifier.perPrimitiveNV)
+ appendStr(" perprimitiveNV");
+ if (qualifier.perViewNV)
+ appendStr(" perviewNV");
+ if (qualifier.perTaskNV)
+ appendStr(" taskNV");
+#endif
+ if (qualifier.patch)
+ appendStr(" patch");
+ if (qualifier.sample)
+ appendStr(" sample");
+ if (qualifier.coherent)
+ appendStr(" coherent");
+ if (qualifier.devicecoherent)
+ appendStr(" devicecoherent");
+ if (qualifier.queuefamilycoherent)
+ appendStr(" queuefamilycoherent");
+ if (qualifier.workgroupcoherent)
+ appendStr(" workgroupcoherent");
+ if (qualifier.subgroupcoherent)
+ appendStr(" subgroupcoherent");
+ if (qualifier.nonprivate)
+ appendStr(" nonprivate");
+ if (qualifier.volatil)
+ appendStr(" volatile");
+ if (qualifier.restrict)
+ appendStr(" restrict");
+ if (qualifier.readonly)
+ appendStr(" readonly");
+ if (qualifier.writeonly)
+ appendStr(" writeonly");
+ if (qualifier.specConstant)
+ appendStr(" specialization-constant");
+ if (qualifier.nonUniform)
+ appendStr(" nonuniform");
+ appendStr(" ");
+ appendStr(getStorageQualifierString());
+ if (isArray()) {
+ for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) {
+ int size = arraySizes->getDimSize(i);
+ if (size == UnsizedArraySize && i == 0 && arraySizes->isVariablyIndexed())
+ appendStr(" runtime-sized array of");
+ else {
+ if (size == UnsizedArraySize) {
+ appendStr(" unsized");
+ if (i == 0) {
+ appendStr(" ");
+ appendInt(arraySizes->getImplicitSize());
+ }
+ } else {
+ appendStr(" ");
+ appendInt(arraySizes->getDimSize(i));
+ }
+ appendStr("-element array of");
+ }
+ }
+ }
+ if (isParameterized()) {
+ appendStr("<");
+ for(int i = 0; i < (int)typeParameters->getNumDims(); ++i) {
+ appendInt(typeParameters->getDimSize(i));
+ if (i != (int)typeParameters->getNumDims() - 1)
+ appendStr(", ");
+ }
+ appendStr(">");
+ }
+ if (qualifier.precision != EpqNone) {
+ appendStr(" ");
+ appendStr(getPrecisionQualifierString());
+ }
+ if (isMatrix()) {
+ appendStr(" ");
+ appendInt(matrixCols);
+ appendStr("X");
+ appendInt(matrixRows);
+ appendStr(" matrix of");
+ } else if (isVector()) {
+ appendStr(" ");
+ appendInt(vectorSize);
+ appendStr("-component vector of");
+ }
+
+ appendStr(" ");
+ typeString.append(getBasicTypeString());
+
+ if (qualifier.builtIn != EbvNone) {
+ appendStr(" ");
+ appendStr(getBuiltInVariableString());
+ }
+
+ // Add struct/block members
+ if (isStruct()) {
+ appendStr("{");
+ for (size_t i = 0; i < structure->size(); ++i) {
+ if (! (*structure)[i].type->hiddenMember()) {
+ typeString.append((*structure)[i].type->getCompleteString());
+ typeString.append(" ");
+ typeString.append((*structure)[i].type->getFieldName());
+ if (i < structure->size() - 1)
+ appendStr(", ");
+ }
+ }
+ appendStr("}");
+ }
+
+ return typeString;
+ }
+
+ TString getBasicTypeString() const
+ {
+ if (basicType == EbtSampler)
+ return sampler.getString();
+ else
+ return getBasicString();
+ }
+
+ const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); }
+ const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); }
+ const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); }
+ const TTypeList* getStruct() const { assert(isStruct()); return structure; }
+ void setStruct(TTypeList* s) { assert(isStruct()); structure = s; }
+ TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads
+
+ int computeNumComponents() const
+ {
+ int components = 0;
+
+ if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) {
+ for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++)
+ components += ((*tl).type)->computeNumComponents();
+ } else if (matrixCols)
+ components = matrixCols * matrixRows;
+ else
+ components = vectorSize;
+
+ if (arraySizes != nullptr) {
+ components *= arraySizes->getCumulativeSize();
+ }
+
+ return components;
+ }
+
+ // append this type's mangled name to the passed in 'name'
+ void appendMangledName(TString& name) const
+ {
+ buildMangledName(name);
+ name += ';' ;
+ }
+
+ // Do two structure types match? They could be declared independently,
+ // in different places, but still might satisfy the definition of matching.
+ // From the spec:
+ //
+ // "Structures must have the same name, sequence of type names, and
+ // type definitions, and member names to be considered the same type.
+ // This rule applies recursively for nested or embedded types."
+ //
+ bool sameStructType(const TType& right) const
+ {
+ // Most commonly, they are both nullptr, or the same pointer to the same actual structure
+ if ((!isStruct() && !right.isStruct()) ||
+ (isStruct() && right.isStruct() && structure == right.structure))
+ return true;
+
+ // Both being nullptr was caught above, now they both have to be structures of the same number of elements
+ if (!isStruct() || !right.isStruct() ||
+ structure->size() != right.structure->size())
+ return false;
+
+ // Structure names have to match
+ if (*typeName != *right.typeName)
+ return false;
+
+ // Compare the names and types of all the members, which have to match
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName())
+ return false;
+
+ if (*(*structure)[i].type != *(*right.structure)[i].type)
+ return false;
+ }
+
+ return true;
+ }
+
+ bool sameReferenceType(const TType& right) const
+ {
+ if ((basicType == EbtReference) != (right.basicType == EbtReference))
+ return false;
+
+ if ((basicType != EbtReference) && (right.basicType != EbtReference))
+ return true;
+
+ assert(referentType != nullptr);
+ assert(right.referentType != nullptr);
+
+ if (referentType == right.referentType)
+ return true;
+
+ return *referentType == *right.referentType;
+ }
+
+ // See if two types match, in all aspects except arrayness
+ bool sameElementType(const TType& right) const
+ {
+ return basicType == right.basicType && sameElementShape(right);
+ }
+
+ // See if two type's arrayness match
+ bool sameArrayness(const TType& right) const
+ {
+ return ((arraySizes == nullptr && right.arraySizes == nullptr) ||
+ (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes));
+ }
+
+ // See if two type's arrayness match in everything except their outer dimension
+ bool sameInnerArrayness(const TType& right) const
+ {
+ assert(arraySizes != nullptr && right.arraySizes != nullptr);
+ return arraySizes->sameInnerArrayness(*right.arraySizes);
+ }
+
+ // See if two type's parameters match
+ bool sameTypeParameters(const TType& right) const
+ {
+ return ((typeParameters == nullptr && right.typeParameters == nullptr) ||
+ (typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters));
+ }
+
+ // See if two type's elements match in all ways except basic type
+ bool sameElementShape(const TType& right) const
+ {
+ return sampler == right.sampler &&
+ vectorSize == right.vectorSize &&
+ matrixCols == right.matrixCols &&
+ matrixRows == right.matrixRows &&
+ vector1 == right.vector1 &&
+ coopmat == right.coopmat &&
+ sameStructType(right) &&
+ sameReferenceType(right);
+ }
+
+ // See if a cooperative matrix type parameter with unspecified parameters is
+ // an OK function parameter
+ bool coopMatParameterOK(const TType& right) const
+ {
+ return coopmat && right.coopmat &&
+ typeParameters == nullptr && right.typeParameters != nullptr;
+ }
+
+ // See if two types match in all ways (just the actual type, not qualification)
+ bool operator==(const TType& right) const
+ {
+ return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right);
+ }
+
+ bool operator!=(const TType& right) const
+ {
+ return ! operator==(right);
+ }
+
+ unsigned int getBufferReferenceAlignment() const
+ {
+ if (getBasicType() == glslang::EbtReference) {
+ return getReferentType()->getQualifier().hasBufferReferenceAlign() ?
+ (1u << getReferentType()->getQualifier().layoutBufferReferenceAlign) : 16u;
+ } else {
+ return 0;
+ }
+ }
+
+protected:
+ // Require consumer to pick between deep copy and shallow copy.
+ TType(const TType& type);
+ TType& operator=(const TType& type);
+
+ // Recursively copy a type graph, while preserving the graph-like
+ // quality. That is, don't make more than one copy of a structure that
+ // gets reused multiple times in the type graph.
+ void deepCopy(const TType& copyOf, TMap<TTypeList*,TTypeList*>& copiedMap)
+ {
+ shallowCopy(copyOf);
+
+ if (copyOf.arraySizes) {
+ arraySizes = new TArraySizes;
+ *arraySizes = *copyOf.arraySizes;
+ }
+
+ if (copyOf.typeParameters) {
+ typeParameters = new TArraySizes;
+ *typeParameters = *copyOf.typeParameters;
+ }
+
+ if (copyOf.isStruct() && copyOf.structure) {
+ auto prevCopy = copiedMap.find(copyOf.structure);
+ if (prevCopy != copiedMap.end())
+ structure = prevCopy->second;
+ else {
+ structure = new TTypeList;
+ copiedMap[copyOf.structure] = structure;
+ for (unsigned int i = 0; i < copyOf.structure->size(); ++i) {
+ TTypeLoc typeLoc;
+ typeLoc.loc = (*copyOf.structure)[i].loc;
+ typeLoc.type = new TType();
+ typeLoc.type->deepCopy(*(*copyOf.structure)[i].type, copiedMap);
+ structure->push_back(typeLoc);
+ }
+ }
+ }
+
+ if (copyOf.fieldName)
+ fieldName = NewPoolTString(copyOf.fieldName->c_str());
+ if (copyOf.typeName)
+ typeName = NewPoolTString(copyOf.typeName->c_str());
+ }
+
+
+ void buildMangledName(TString&) const;
+
+ TBasicType basicType : 8;
+ int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate.
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar.
+ // GLSL 4.5 never has a 1-component vector; so this will always be false until such
+ // functionality is added.
+ // HLSL does have a 1-component vectors, so this will be true to disambiguate
+ // from a scalar.
+ bool coopmat : 1;
+ TQualifier qualifier;
+
+ TArraySizes* arraySizes; // nullptr unless an array; can be shared across types
+ // A type can't be both a structure (EbtStruct/EbtBlock) and a reference (EbtReference), so
+ // conserve space by making these a union
+ union {
+ TTypeList* structure; // invalid unless this is a struct; can be shared across types
+ TType *referentType; // invalid unless this is an EbtReference
+ };
+ TString *fieldName; // for structure field names
+ TString *typeName; // for structure type name
+ TSampler sampler;
+ TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types
+};
+
+} // end namespace glslang
+
+#endif // _TYPES_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/arrays.h b/src/3rdparty/glslang/glslang/Include/arrays.h
new file mode 100644
index 0000000..7f047d9
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/arrays.h
@@ -0,0 +1,341 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement types for tracking GLSL arrays, arrays of arrays, etc.
+//
+
+#ifndef _ARRAYS_INCLUDED
+#define _ARRAYS_INCLUDED
+
+#include <algorithm>
+
+namespace glslang {
+
+// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
+const int UnsizedArraySize = 0;
+
+class TIntermTyped;
+extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
+
+// Specialization constants need both a nominal size and a node that defines
+// the specialization constant being used. Array types are the same when their
+// size and specialization constant nodes are the same.
+struct TArraySize {
+ unsigned int size;
+ TIntermTyped* node; // nullptr means no specialization constant node
+ bool operator==(const TArraySize& rhs) const
+ {
+ if (size != rhs.size)
+ return false;
+ if (node == nullptr || rhs.node == nullptr)
+ return node == rhs.node;
+
+ return SameSpecializationConstants(node, rhs.node);
+ }
+};
+
+//
+// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
+// It has generic-container semantics, while TArraySizes has array-of-array semantics.
+// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
+//
+struct TSmallArrayVector {
+ //
+ // TODO: memory: TSmallArrayVector is intended to be smaller.
+ // Almost all arrays could be handled by two sizes each fitting
+ // in 16 bits, needing a real vector only in the cases where there
+ // are more than 3 sizes or a size needing more than 16 bits.
+ //
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TSmallArrayVector() : sizes(nullptr) { }
+ virtual ~TSmallArrayVector() { dealloc(); }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TSmallArrayVector& operator=(const TSmallArrayVector& from)
+ {
+ if (from.sizes == nullptr)
+ sizes = nullptr;
+ else {
+ alloc();
+ *sizes = *from.sizes;
+ }
+
+ return *this;
+ }
+
+ int size() const
+ {
+ if (sizes == nullptr)
+ return 0;
+ return (int)sizes->size();
+ }
+
+ unsigned int frontSize() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().size;
+ }
+
+ TIntermTyped* frontNode() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().node;
+ }
+
+ void changeFront(unsigned int s)
+ {
+ assert(sizes != nullptr);
+ // this should only happen for implicitly sized arrays, not specialization constants
+ assert(sizes->front().node == nullptr);
+ sizes->front().size = s;
+ }
+
+ void push_back(unsigned int e, TIntermTyped* n)
+ {
+ alloc();
+ TArraySize pair = { e, n };
+ sizes->push_back(pair);
+ }
+
+ void push_back(const TSmallArrayVector& newDims)
+ {
+ alloc();
+ sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end());
+ }
+
+ void pop_front()
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ if (sizes->size() == 1)
+ dealloc();
+ else
+ sizes->erase(sizes->begin());
+ }
+
+ // 'this' should currently not be holding anything, and copyNonFront
+ // will make it hold a copy of all but the first element of rhs.
+ // (This would be useful for making a type that is dereferenced by
+ // one dimension.)
+ void copyNonFront(const TSmallArrayVector& rhs)
+ {
+ assert(sizes == nullptr);
+ if (rhs.size() > 1) {
+ alloc();
+ sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
+ }
+ }
+
+ unsigned int getDimSize(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].size;
+ }
+
+ void setDimSize(int i, unsigned int size) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ assert((*sizes)[i].node == nullptr);
+ (*sizes)[i].size = size;
+ }
+
+ TIntermTyped* getDimNode(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].node;
+ }
+
+ bool operator==(const TSmallArrayVector& rhs) const
+ {
+ if (sizes == nullptr && rhs.sizes == nullptr)
+ return true;
+ if (sizes == nullptr || rhs.sizes == nullptr)
+ return false;
+ return *sizes == *rhs.sizes;
+ }
+ bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
+
+protected:
+ TSmallArrayVector(const TSmallArrayVector&);
+
+ void alloc()
+ {
+ if (sizes == nullptr)
+ sizes = new TVector<TArraySize>;
+ }
+ void dealloc()
+ {
+ delete sizes;
+ sizes = nullptr;
+ }
+
+ TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
+};
+
+//
+// Represent an array, or array of arrays, to arbitrary depth. This is not
+// done through a hierarchy of types in a type tree, rather all contiguous arrayness
+// in the type hierarchy is localized into this single cumulative object.
+//
+// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
+// for the vast majority of types that are non-array types.
+//
+// Order Policy: these are all identical:
+// - left to right order within a contiguous set of ...[..][..][..]... in the source language
+// - index order 0, 1, 2, ... within the 'sizes' member below
+// - outer-most to inner-most
+//
+struct TArraySizes {
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TArraySizes() : implicitArraySize(1), variablyIndexed(false) { }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TArraySizes& operator=(const TArraySizes& from)
+ {
+ implicitArraySize = from.implicitArraySize;
+ variablyIndexed = from.variablyIndexed;
+ sizes = from.sizes;
+
+ return *this;
+ }
+
+ // translate from array-of-array semantics to container semantics
+ int getNumDims() const { return sizes.size(); }
+ int getDimSize(int dim) const { return sizes.getDimSize(dim); }
+ TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
+ void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
+ int getOuterSize() const { return sizes.frontSize(); }
+ TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
+ int getCumulativeSize() const
+ {
+ int size = 1;
+ for (int d = 0; d < sizes.size(); ++d) {
+ // this only makes sense in paths that have a known array size
+ assert(sizes.getDimSize(d) != UnsizedArraySize);
+ size *= sizes.getDimSize(d);
+ }
+ return size;
+ }
+ void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
+ void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
+ void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
+ void addInnerSize(TArraySize pair) {
+ sizes.push_back(pair.size, pair.node);
+ }
+ void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
+ void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
+ int getImplicitSize() const { return implicitArraySize; }
+ void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); }
+ bool isInnerUnsized() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ return true;
+ }
+
+ return false;
+ }
+ bool clearInnerUnsized()
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ setDimSize(d, 1);
+ }
+
+ return false;
+ }
+ bool isInnerSpecialization() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimNode(d) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+ bool isOuterSpecialization()
+ {
+ return sizes.getDimNode(0) != nullptr;
+ }
+
+ bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); }
+ bool isSized() const { return getOuterSize() != UnsizedArraySize; }
+ void dereference() { sizes.pop_front(); }
+ void copyDereferenced(const TArraySizes& rhs)
+ {
+ assert(sizes.size() == 0);
+ if (rhs.sizes.size() > 1)
+ sizes.copyNonFront(rhs.sizes);
+ }
+
+ bool sameInnerArrayness(const TArraySizes& rhs) const
+ {
+ if (sizes.size() != rhs.sizes.size())
+ return false;
+
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
+ sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
+ return false;
+ }
+
+ return true;
+ }
+
+ void setVariablyIndexed() { variablyIndexed = true; }
+ bool isVariablyIndexed() const { return variablyIndexed; }
+
+ bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
+ bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
+
+protected:
+ TSmallArrayVector sizes;
+
+ TArraySizes(const TArraySizes&);
+
+ // For tracking maximum referenced compile-time constant index.
+ // Applies only to the outer-most dimension. Potentially becomes
+ // the implicit size of the array, if not variably indexed and
+ // otherwise legal.
+ int implicitArraySize;
+ bool variablyIndexed; // true if array is indexed with a non compile-time constant
+};
+
+} // end namespace glslang
+
+#endif // _ARRAYS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/intermediate.h b/src/3rdparty/glslang/glslang/Include/intermediate.h
new file mode 100644
index 0000000..32e684c
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/intermediate.h
@@ -0,0 +1,1730 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Definition of the in-memory high-level intermediate representation
+// of shaders. This is a tree that parser creates.
+//
+// Nodes in the tree are defined as a hierarchy of classes derived from
+// TIntermNode. Each is a node in a tree. There is no preset branching factor;
+// each node can have it's own type of list of children.
+//
+
+#ifndef __INTERMEDIATE_H
+#define __INTERMEDIATE_H
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+ #pragma warning(disable : 4464) // relative include path contains '..'
+ #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
+#endif
+
+#include "../Include/Common.h"
+#include "../Include/Types.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+class TIntermediate;
+
+//
+// Operators used by the high-level (parse tree) representation.
+//
+enum TOperator {
+ EOpNull, // if in a node, should only mean a node is still being built
+ EOpSequence, // denotes a list of statements, or parameters, etc.
+ EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
+ EOpFunctionCall,
+ EOpFunction, // For function definition
+ EOpParameters, // an aggregate listing the parameters to a function
+
+ //
+ // Unary operators
+ //
+
+ EOpNegative,
+ EOpLogicalNot,
+ EOpVectorLogicalNot,
+ EOpBitwiseNot,
+
+ EOpPostIncrement,
+ EOpPostDecrement,
+ EOpPreIncrement,
+ EOpPreDecrement,
+
+ // (u)int* -> bool
+ EOpConvInt8ToBool,
+ EOpConvUint8ToBool,
+ EOpConvInt16ToBool,
+ EOpConvUint16ToBool,
+ EOpConvIntToBool,
+ EOpConvUintToBool,
+ EOpConvInt64ToBool,
+ EOpConvUint64ToBool,
+
+ // float* -> bool
+ EOpConvFloat16ToBool,
+ EOpConvFloatToBool,
+ EOpConvDoubleToBool,
+
+ // bool -> (u)int*
+ EOpConvBoolToInt8,
+ EOpConvBoolToUint8,
+ EOpConvBoolToInt16,
+ EOpConvBoolToUint16,
+ EOpConvBoolToInt,
+ EOpConvBoolToUint,
+ EOpConvBoolToInt64,
+ EOpConvBoolToUint64,
+
+ // bool -> float*
+ EOpConvBoolToFloat16,
+ EOpConvBoolToFloat,
+ EOpConvBoolToDouble,
+
+ // int8_t -> (u)int*
+ EOpConvInt8ToInt16,
+ EOpConvInt8ToInt,
+ EOpConvInt8ToInt64,
+ EOpConvInt8ToUint8,
+ EOpConvInt8ToUint16,
+ EOpConvInt8ToUint,
+ EOpConvInt8ToUint64,
+
+ // uint8_t -> (u)int*
+ EOpConvUint8ToInt8,
+ EOpConvUint8ToInt16,
+ EOpConvUint8ToInt,
+ EOpConvUint8ToInt64,
+ EOpConvUint8ToUint16,
+ EOpConvUint8ToUint,
+ EOpConvUint8ToUint64,
+
+ // int8_t -> float*
+ EOpConvInt8ToFloat16,
+ EOpConvInt8ToFloat,
+ EOpConvInt8ToDouble,
+
+ // uint8_t -> float*
+ EOpConvUint8ToFloat16,
+ EOpConvUint8ToFloat,
+ EOpConvUint8ToDouble,
+
+ // int16_t -> (u)int*
+ EOpConvInt16ToInt8,
+ EOpConvInt16ToInt,
+ EOpConvInt16ToInt64,
+ EOpConvInt16ToUint8,
+ EOpConvInt16ToUint16,
+ EOpConvInt16ToUint,
+ EOpConvInt16ToUint64,
+
+ // uint16_t -> (u)int*
+ EOpConvUint16ToInt8,
+ EOpConvUint16ToInt16,
+ EOpConvUint16ToInt,
+ EOpConvUint16ToInt64,
+ EOpConvUint16ToUint8,
+ EOpConvUint16ToUint,
+ EOpConvUint16ToUint64,
+
+ // int16_t -> float*
+ EOpConvInt16ToFloat16,
+ EOpConvInt16ToFloat,
+ EOpConvInt16ToDouble,
+
+ // uint16_t -> float*
+ EOpConvUint16ToFloat16,
+ EOpConvUint16ToFloat,
+ EOpConvUint16ToDouble,
+
+ // int32_t -> (u)int*
+ EOpConvIntToInt8,
+ EOpConvIntToInt16,
+ EOpConvIntToInt64,
+ EOpConvIntToUint8,
+ EOpConvIntToUint16,
+ EOpConvIntToUint,
+ EOpConvIntToUint64,
+
+ // uint32_t -> (u)int*
+ EOpConvUintToInt8,
+ EOpConvUintToInt16,
+ EOpConvUintToInt,
+ EOpConvUintToInt64,
+ EOpConvUintToUint8,
+ EOpConvUintToUint16,
+ EOpConvUintToUint64,
+
+ // int32_t -> float*
+ EOpConvIntToFloat16,
+ EOpConvIntToFloat,
+ EOpConvIntToDouble,
+
+ // uint32_t -> float*
+ EOpConvUintToFloat16,
+ EOpConvUintToFloat,
+ EOpConvUintToDouble,
+
+ // int64_t -> (u)int*
+ EOpConvInt64ToInt8,
+ EOpConvInt64ToInt16,
+ EOpConvInt64ToInt,
+ EOpConvInt64ToUint8,
+ EOpConvInt64ToUint16,
+ EOpConvInt64ToUint,
+ EOpConvInt64ToUint64,
+
+ // uint64_t -> (u)int*
+ EOpConvUint64ToInt8,
+ EOpConvUint64ToInt16,
+ EOpConvUint64ToInt,
+ EOpConvUint64ToInt64,
+ EOpConvUint64ToUint8,
+ EOpConvUint64ToUint16,
+ EOpConvUint64ToUint,
+
+ // int64_t -> float*
+ EOpConvInt64ToFloat16,
+ EOpConvInt64ToFloat,
+ EOpConvInt64ToDouble,
+
+ // uint64_t -> float*
+ EOpConvUint64ToFloat16,
+ EOpConvUint64ToFloat,
+ EOpConvUint64ToDouble,
+
+ // float16_t -> (u)int*
+ EOpConvFloat16ToInt8,
+ EOpConvFloat16ToInt16,
+ EOpConvFloat16ToInt,
+ EOpConvFloat16ToInt64,
+ EOpConvFloat16ToUint8,
+ EOpConvFloat16ToUint16,
+ EOpConvFloat16ToUint,
+ EOpConvFloat16ToUint64,
+
+ // float16_t -> float*
+ EOpConvFloat16ToFloat,
+ EOpConvFloat16ToDouble,
+
+ // float -> (u)int*
+ EOpConvFloatToInt8,
+ EOpConvFloatToInt16,
+ EOpConvFloatToInt,
+ EOpConvFloatToInt64,
+ EOpConvFloatToUint8,
+ EOpConvFloatToUint16,
+ EOpConvFloatToUint,
+ EOpConvFloatToUint64,
+
+ // float -> float*
+ EOpConvFloatToFloat16,
+ EOpConvFloatToDouble,
+
+ // float64 _t-> (u)int*
+ EOpConvDoubleToInt8,
+ EOpConvDoubleToInt16,
+ EOpConvDoubleToInt,
+ EOpConvDoubleToInt64,
+ EOpConvDoubleToUint8,
+ EOpConvDoubleToUint16,
+ EOpConvDoubleToUint,
+ EOpConvDoubleToUint64,
+
+ // float64_t -> float*
+ EOpConvDoubleToFloat16,
+ EOpConvDoubleToFloat,
+
+ // uint64_t <-> pointer
+ EOpConvUint64ToPtr,
+ EOpConvPtrToUint64,
+
+ //
+ // binary operations
+ //
+
+ EOpAdd,
+ EOpSub,
+ EOpMul,
+ EOpDiv,
+ EOpMod,
+ EOpRightShift,
+ EOpLeftShift,
+ EOpAnd,
+ EOpInclusiveOr,
+ EOpExclusiveOr,
+ EOpEqual,
+ EOpNotEqual,
+ EOpVectorEqual,
+ EOpVectorNotEqual,
+ EOpLessThan,
+ EOpGreaterThan,
+ EOpLessThanEqual,
+ EOpGreaterThanEqual,
+ EOpComma,
+
+ EOpVectorTimesScalar,
+ EOpVectorTimesMatrix,
+ EOpMatrixTimesVector,
+ EOpMatrixTimesScalar,
+
+ EOpLogicalOr,
+ EOpLogicalXor,
+ EOpLogicalAnd,
+
+ EOpIndexDirect,
+ EOpIndexIndirect,
+ EOpIndexDirectStruct,
+
+ EOpVectorSwizzle,
+
+ EOpMethod,
+ EOpScoping,
+
+ //
+ // Built-in functions mapped to operators
+ //
+
+ EOpRadians,
+ EOpDegrees,
+ EOpSin,
+ EOpCos,
+ EOpTan,
+ EOpAsin,
+ EOpAcos,
+ EOpAtan,
+ EOpSinh,
+ EOpCosh,
+ EOpTanh,
+ EOpAsinh,
+ EOpAcosh,
+ EOpAtanh,
+
+ EOpPow,
+ EOpExp,
+ EOpLog,
+ EOpExp2,
+ EOpLog2,
+ EOpSqrt,
+ EOpInverseSqrt,
+
+ EOpAbs,
+ EOpSign,
+ EOpFloor,
+ EOpTrunc,
+ EOpRound,
+ EOpRoundEven,
+ EOpCeil,
+ EOpFract,
+ EOpModf,
+ EOpMin,
+ EOpMax,
+ EOpClamp,
+ EOpMix,
+ EOpStep,
+ EOpSmoothStep,
+
+ EOpIsNan,
+ EOpIsInf,
+
+ EOpFma,
+
+ EOpFrexp,
+ EOpLdexp,
+
+ EOpFloatBitsToInt,
+ EOpFloatBitsToUint,
+ EOpIntBitsToFloat,
+ EOpUintBitsToFloat,
+ EOpDoubleBitsToInt64,
+ EOpDoubleBitsToUint64,
+ EOpInt64BitsToDouble,
+ EOpUint64BitsToDouble,
+ EOpFloat16BitsToInt16,
+ EOpFloat16BitsToUint16,
+ EOpInt16BitsToFloat16,
+ EOpUint16BitsToFloat16,
+ EOpPackSnorm2x16,
+ EOpUnpackSnorm2x16,
+ EOpPackUnorm2x16,
+ EOpUnpackUnorm2x16,
+ EOpPackSnorm4x8,
+ EOpUnpackSnorm4x8,
+ EOpPackUnorm4x8,
+ EOpUnpackUnorm4x8,
+ EOpPackHalf2x16,
+ EOpUnpackHalf2x16,
+ EOpPackDouble2x32,
+ EOpUnpackDouble2x32,
+ EOpPackInt2x32,
+ EOpUnpackInt2x32,
+ EOpPackUint2x32,
+ EOpUnpackUint2x32,
+ EOpPackFloat2x16,
+ EOpUnpackFloat2x16,
+ EOpPackInt2x16,
+ EOpUnpackInt2x16,
+ EOpPackUint2x16,
+ EOpUnpackUint2x16,
+ EOpPackInt4x16,
+ EOpUnpackInt4x16,
+ EOpPackUint4x16,
+ EOpUnpackUint4x16,
+ EOpPack16,
+ EOpPack32,
+ EOpPack64,
+ EOpUnpack32,
+ EOpUnpack16,
+ EOpUnpack8,
+
+ EOpLength,
+ EOpDistance,
+ EOpDot,
+ EOpCross,
+ EOpNormalize,
+ EOpFaceForward,
+ EOpReflect,
+ EOpRefract,
+
+#ifdef AMD_EXTENSIONS
+ EOpMin3,
+ EOpMax3,
+ EOpMid3,
+#endif
+
+ EOpDPdx, // Fragment only
+ EOpDPdy, // Fragment only
+ EOpFwidth, // Fragment only
+ EOpDPdxFine, // Fragment only
+ EOpDPdyFine, // Fragment only
+ EOpFwidthFine, // Fragment only
+ EOpDPdxCoarse, // Fragment only
+ EOpDPdyCoarse, // Fragment only
+ EOpFwidthCoarse, // Fragment only
+
+ EOpInterpolateAtCentroid, // Fragment only
+ EOpInterpolateAtSample, // Fragment only
+ EOpInterpolateAtOffset, // Fragment only
+
+#ifdef AMD_EXTENSIONS
+ EOpInterpolateAtVertex,
+#endif
+
+ EOpMatrixTimesMatrix,
+ EOpOuterProduct,
+ EOpDeterminant,
+ EOpMatrixInverse,
+ EOpTranspose,
+
+ EOpFtransform,
+
+ EOpNoise,
+
+ EOpEmitVertex, // geometry only
+ EOpEndPrimitive, // geometry only
+ EOpEmitStreamVertex, // geometry only
+ EOpEndStreamPrimitive, // geometry only
+
+ EOpBarrier,
+ EOpMemoryBarrier,
+ EOpMemoryBarrierAtomicCounter,
+ EOpMemoryBarrierBuffer,
+ EOpMemoryBarrierImage,
+ EOpMemoryBarrierShared, // compute only
+ EOpGroupMemoryBarrier, // compute only
+
+ EOpBallot,
+ EOpReadInvocation,
+ EOpReadFirstInvocation,
+
+ EOpAnyInvocation,
+ EOpAllInvocations,
+ EOpAllInvocationsEqual,
+
+ EOpSubgroupGuardStart,
+ EOpSubgroupBarrier,
+ EOpSubgroupMemoryBarrier,
+ EOpSubgroupMemoryBarrierBuffer,
+ EOpSubgroupMemoryBarrierImage,
+ EOpSubgroupMemoryBarrierShared, // compute only
+ EOpSubgroupElect,
+ EOpSubgroupAll,
+ EOpSubgroupAny,
+ EOpSubgroupAllEqual,
+ EOpSubgroupBroadcast,
+ EOpSubgroupBroadcastFirst,
+ EOpSubgroupBallot,
+ EOpSubgroupInverseBallot,
+ EOpSubgroupBallotBitExtract,
+ EOpSubgroupBallotBitCount,
+ EOpSubgroupBallotInclusiveBitCount,
+ EOpSubgroupBallotExclusiveBitCount,
+ EOpSubgroupBallotFindLSB,
+ EOpSubgroupBallotFindMSB,
+ EOpSubgroupShuffle,
+ EOpSubgroupShuffleXor,
+ EOpSubgroupShuffleUp,
+ EOpSubgroupShuffleDown,
+ EOpSubgroupAdd,
+ EOpSubgroupMul,
+ EOpSubgroupMin,
+ EOpSubgroupMax,
+ EOpSubgroupAnd,
+ EOpSubgroupOr,
+ EOpSubgroupXor,
+ EOpSubgroupInclusiveAdd,
+ EOpSubgroupInclusiveMul,
+ EOpSubgroupInclusiveMin,
+ EOpSubgroupInclusiveMax,
+ EOpSubgroupInclusiveAnd,
+ EOpSubgroupInclusiveOr,
+ EOpSubgroupInclusiveXor,
+ EOpSubgroupExclusiveAdd,
+ EOpSubgroupExclusiveMul,
+ EOpSubgroupExclusiveMin,
+ EOpSubgroupExclusiveMax,
+ EOpSubgroupExclusiveAnd,
+ EOpSubgroupExclusiveOr,
+ EOpSubgroupExclusiveXor,
+ EOpSubgroupClusteredAdd,
+ EOpSubgroupClusteredMul,
+ EOpSubgroupClusteredMin,
+ EOpSubgroupClusteredMax,
+ EOpSubgroupClusteredAnd,
+ EOpSubgroupClusteredOr,
+ EOpSubgroupClusteredXor,
+ EOpSubgroupQuadBroadcast,
+ EOpSubgroupQuadSwapHorizontal,
+ EOpSubgroupQuadSwapVertical,
+ EOpSubgroupQuadSwapDiagonal,
+
+#ifdef NV_EXTENSIONS
+ EOpSubgroupPartition,
+ EOpSubgroupPartitionedAdd,
+ EOpSubgroupPartitionedMul,
+ EOpSubgroupPartitionedMin,
+ EOpSubgroupPartitionedMax,
+ EOpSubgroupPartitionedAnd,
+ EOpSubgroupPartitionedOr,
+ EOpSubgroupPartitionedXor,
+ EOpSubgroupPartitionedInclusiveAdd,
+ EOpSubgroupPartitionedInclusiveMul,
+ EOpSubgroupPartitionedInclusiveMin,
+ EOpSubgroupPartitionedInclusiveMax,
+ EOpSubgroupPartitionedInclusiveAnd,
+ EOpSubgroupPartitionedInclusiveOr,
+ EOpSubgroupPartitionedInclusiveXor,
+ EOpSubgroupPartitionedExclusiveAdd,
+ EOpSubgroupPartitionedExclusiveMul,
+ EOpSubgroupPartitionedExclusiveMin,
+ EOpSubgroupPartitionedExclusiveMax,
+ EOpSubgroupPartitionedExclusiveAnd,
+ EOpSubgroupPartitionedExclusiveOr,
+ EOpSubgroupPartitionedExclusiveXor,
+#endif
+
+ EOpSubgroupGuardStop,
+
+#ifdef AMD_EXTENSIONS
+ EOpMinInvocations,
+ EOpMaxInvocations,
+ EOpAddInvocations,
+ EOpMinInvocationsNonUniform,
+ EOpMaxInvocationsNonUniform,
+ EOpAddInvocationsNonUniform,
+ EOpMinInvocationsInclusiveScan,
+ EOpMaxInvocationsInclusiveScan,
+ EOpAddInvocationsInclusiveScan,
+ EOpMinInvocationsInclusiveScanNonUniform,
+ EOpMaxInvocationsInclusiveScanNonUniform,
+ EOpAddInvocationsInclusiveScanNonUniform,
+ EOpMinInvocationsExclusiveScan,
+ EOpMaxInvocationsExclusiveScan,
+ EOpAddInvocationsExclusiveScan,
+ EOpMinInvocationsExclusiveScanNonUniform,
+ EOpMaxInvocationsExclusiveScanNonUniform,
+ EOpAddInvocationsExclusiveScanNonUniform,
+ EOpSwizzleInvocations,
+ EOpSwizzleInvocationsMasked,
+ EOpWriteInvocation,
+ EOpMbcnt,
+
+ EOpCubeFaceIndex,
+ EOpCubeFaceCoord,
+ EOpTime,
+#endif
+
+ EOpAtomicAdd,
+ EOpAtomicMin,
+ EOpAtomicMax,
+ EOpAtomicAnd,
+ EOpAtomicOr,
+ EOpAtomicXor,
+ EOpAtomicExchange,
+ EOpAtomicCompSwap,
+ EOpAtomicLoad,
+ EOpAtomicStore,
+
+ EOpAtomicCounterIncrement, // results in pre-increment value
+ EOpAtomicCounterDecrement, // results in post-decrement value
+ EOpAtomicCounter,
+ EOpAtomicCounterAdd,
+ EOpAtomicCounterSubtract,
+ EOpAtomicCounterMin,
+ EOpAtomicCounterMax,
+ EOpAtomicCounterAnd,
+ EOpAtomicCounterOr,
+ EOpAtomicCounterXor,
+ EOpAtomicCounterExchange,
+ EOpAtomicCounterCompSwap,
+
+ EOpAny,
+ EOpAll,
+
+ EOpCooperativeMatrixLoad,
+ EOpCooperativeMatrixStore,
+ EOpCooperativeMatrixMulAdd,
+
+ //
+ // Branch
+ //
+
+ EOpKill, // Fragment only
+ EOpReturn,
+ EOpBreak,
+ EOpContinue,
+ EOpCase,
+ EOpDefault,
+
+ //
+ // Constructors
+ //
+
+ EOpConstructGuardStart,
+ EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
+ EOpConstructUint,
+ EOpConstructInt8,
+ EOpConstructUint8,
+ EOpConstructInt16,
+ EOpConstructUint16,
+ EOpConstructInt64,
+ EOpConstructUint64,
+ EOpConstructBool,
+ EOpConstructFloat,
+ EOpConstructDouble,
+ EOpConstructVec2,
+ EOpConstructVec3,
+ EOpConstructVec4,
+ EOpConstructDVec2,
+ EOpConstructDVec3,
+ EOpConstructDVec4,
+ EOpConstructBVec2,
+ EOpConstructBVec3,
+ EOpConstructBVec4,
+ EOpConstructI8Vec2,
+ EOpConstructI8Vec3,
+ EOpConstructI8Vec4,
+ EOpConstructU8Vec2,
+ EOpConstructU8Vec3,
+ EOpConstructU8Vec4,
+ EOpConstructI16Vec2,
+ EOpConstructI16Vec3,
+ EOpConstructI16Vec4,
+ EOpConstructU16Vec2,
+ EOpConstructU16Vec3,
+ EOpConstructU16Vec4,
+ EOpConstructIVec2,
+ EOpConstructIVec3,
+ EOpConstructIVec4,
+ EOpConstructUVec2,
+ EOpConstructUVec3,
+ EOpConstructUVec4,
+ EOpConstructI64Vec2,
+ EOpConstructI64Vec3,
+ EOpConstructI64Vec4,
+ EOpConstructU64Vec2,
+ EOpConstructU64Vec3,
+ EOpConstructU64Vec4,
+ EOpConstructMat2x2,
+ EOpConstructMat2x3,
+ EOpConstructMat2x4,
+ EOpConstructMat3x2,
+ EOpConstructMat3x3,
+ EOpConstructMat3x4,
+ EOpConstructMat4x2,
+ EOpConstructMat4x3,
+ EOpConstructMat4x4,
+ EOpConstructDMat2x2,
+ EOpConstructDMat2x3,
+ EOpConstructDMat2x4,
+ EOpConstructDMat3x2,
+ EOpConstructDMat3x3,
+ EOpConstructDMat3x4,
+ EOpConstructDMat4x2,
+ EOpConstructDMat4x3,
+ EOpConstructDMat4x4,
+ EOpConstructIMat2x2,
+ EOpConstructIMat2x3,
+ EOpConstructIMat2x4,
+ EOpConstructIMat3x2,
+ EOpConstructIMat3x3,
+ EOpConstructIMat3x4,
+ EOpConstructIMat4x2,
+ EOpConstructIMat4x3,
+ EOpConstructIMat4x4,
+ EOpConstructUMat2x2,
+ EOpConstructUMat2x3,
+ EOpConstructUMat2x4,
+ EOpConstructUMat3x2,
+ EOpConstructUMat3x3,
+ EOpConstructUMat3x4,
+ EOpConstructUMat4x2,
+ EOpConstructUMat4x3,
+ EOpConstructUMat4x4,
+ EOpConstructBMat2x2,
+ EOpConstructBMat2x3,
+ EOpConstructBMat2x4,
+ EOpConstructBMat3x2,
+ EOpConstructBMat3x3,
+ EOpConstructBMat3x4,
+ EOpConstructBMat4x2,
+ EOpConstructBMat4x3,
+ EOpConstructBMat4x4,
+ EOpConstructFloat16,
+ EOpConstructF16Vec2,
+ EOpConstructF16Vec3,
+ EOpConstructF16Vec4,
+ EOpConstructF16Mat2x2,
+ EOpConstructF16Mat2x3,
+ EOpConstructF16Mat2x4,
+ EOpConstructF16Mat3x2,
+ EOpConstructF16Mat3x3,
+ EOpConstructF16Mat3x4,
+ EOpConstructF16Mat4x2,
+ EOpConstructF16Mat4x3,
+ EOpConstructF16Mat4x4,
+ EOpConstructStruct,
+ EOpConstructTextureSampler,
+ EOpConstructNonuniform, // expected to be transformed away, not present in final AST
+ EOpConstructReference,
+ EOpConstructCooperativeMatrix,
+ EOpConstructGuardEnd,
+
+ //
+ // moves
+ //
+
+ EOpAssign,
+ EOpAddAssign,
+ EOpSubAssign,
+ EOpMulAssign,
+ EOpVectorTimesMatrixAssign,
+ EOpVectorTimesScalarAssign,
+ EOpMatrixTimesScalarAssign,
+ EOpMatrixTimesMatrixAssign,
+ EOpDivAssign,
+ EOpModAssign,
+ EOpAndAssign,
+ EOpInclusiveOrAssign,
+ EOpExclusiveOrAssign,
+ EOpLeftShiftAssign,
+ EOpRightShiftAssign,
+
+ //
+ // Array operators
+ //
+
+ // Can apply to arrays, vectors, or matrices.
+ // Can be decomposed to a constant at compile time, but this does not always happen,
+ // due to link-time effects. So, consumer can expect either a link-time sized or
+ // run-time sized array.
+ EOpArrayLength,
+
+ //
+ // Image operations
+ //
+
+ EOpImageGuardBegin,
+
+ EOpImageQuerySize,
+ EOpImageQuerySamples,
+ EOpImageLoad,
+ EOpImageStore,
+#ifdef AMD_EXTENSIONS
+ EOpImageLoadLod,
+ EOpImageStoreLod,
+#endif
+ EOpImageAtomicAdd,
+ EOpImageAtomicMin,
+ EOpImageAtomicMax,
+ EOpImageAtomicAnd,
+ EOpImageAtomicOr,
+ EOpImageAtomicXor,
+ EOpImageAtomicExchange,
+ EOpImageAtomicCompSwap,
+ EOpImageAtomicLoad,
+ EOpImageAtomicStore,
+
+ EOpSubpassLoad,
+ EOpSubpassLoadMS,
+ EOpSparseImageLoad,
+#ifdef AMD_EXTENSIONS
+ EOpSparseImageLoadLod,
+#endif
+
+ EOpImageGuardEnd,
+
+ //
+ // Texture operations
+ //
+
+ EOpTextureGuardBegin,
+
+ EOpTextureQuerySize,
+ EOpTextureQueryLod,
+ EOpTextureQueryLevels,
+ EOpTextureQuerySamples,
+
+ EOpSamplingGuardBegin,
+
+ EOpTexture,
+ EOpTextureProj,
+ EOpTextureLod,
+ EOpTextureOffset,
+ EOpTextureFetch,
+ EOpTextureFetchOffset,
+ EOpTextureProjOffset,
+ EOpTextureLodOffset,
+ EOpTextureProjLod,
+ EOpTextureProjLodOffset,
+ EOpTextureGrad,
+ EOpTextureGradOffset,
+ EOpTextureProjGrad,
+ EOpTextureProjGradOffset,
+ EOpTextureGather,
+ EOpTextureGatherOffset,
+ EOpTextureGatherOffsets,
+ EOpTextureClamp,
+ EOpTextureOffsetClamp,
+ EOpTextureGradClamp,
+ EOpTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpTextureGatherLod,
+ EOpTextureGatherLodOffset,
+ EOpTextureGatherLodOffsets,
+ EOpFragmentMaskFetch,
+ EOpFragmentFetch,
+#endif
+
+ EOpSparseTextureGuardBegin,
+
+ EOpSparseTexture,
+ EOpSparseTextureLod,
+ EOpSparseTextureOffset,
+ EOpSparseTextureFetch,
+ EOpSparseTextureFetchOffset,
+ EOpSparseTextureLodOffset,
+ EOpSparseTextureGrad,
+ EOpSparseTextureGradOffset,
+ EOpSparseTextureGather,
+ EOpSparseTextureGatherOffset,
+ EOpSparseTextureGatherOffsets,
+ EOpSparseTexelsResident,
+ EOpSparseTextureClamp,
+ EOpSparseTextureOffsetClamp,
+ EOpSparseTextureGradClamp,
+ EOpSparseTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpSparseTextureGatherLod,
+ EOpSparseTextureGatherLodOffset,
+ EOpSparseTextureGatherLodOffsets,
+#endif
+
+ EOpSparseTextureGuardEnd,
+
+#ifdef NV_EXTENSIONS
+ EOpImageFootprintGuardBegin,
+ EOpImageSampleFootprintNV,
+ EOpImageSampleFootprintClampNV,
+ EOpImageSampleFootprintLodNV,
+ EOpImageSampleFootprintGradNV,
+ EOpImageSampleFootprintGradClampNV,
+ EOpImageFootprintGuardEnd,
+#endif
+ EOpSamplingGuardEnd,
+ EOpTextureGuardEnd,
+
+ //
+ // Integer operations
+ //
+
+ EOpAddCarry,
+ EOpSubBorrow,
+ EOpUMulExtended,
+ EOpIMulExtended,
+ EOpBitfieldExtract,
+ EOpBitfieldInsert,
+ EOpBitFieldReverse,
+ EOpBitCount,
+ EOpFindLSB,
+ EOpFindMSB,
+
+#ifdef NV_EXTENSIONS
+ EOpTraceNV,
+ EOpReportIntersectionNV,
+ EOpIgnoreIntersectionNV,
+ EOpTerminateRayNV,
+ EOpExecuteCallableNV,
+ EOpWritePackedPrimitiveIndices4x8NV,
+#endif
+ //
+ // HLSL operations
+ //
+
+ EOpClip, // discard if input value < 0
+ EOpIsFinite,
+ EOpLog10, // base 10 log
+ EOpRcp, // 1/x
+ EOpSaturate, // clamp from 0 to 1
+ EOpSinCos, // sin and cos in out parameters
+ EOpGenMul, // mul(x,y) on any of mat/vec/scalars
+ EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
+ EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
+ EOpInterlockedAnd, // ...
+ EOpInterlockedCompareExchange, // ...
+ EOpInterlockedCompareStore, // ...
+ EOpInterlockedExchange, // ...
+ EOpInterlockedMax, // ...
+ EOpInterlockedMin, // ...
+ EOpInterlockedOr, // ...
+ EOpInterlockedXor, // ...
+ EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
+ EOpDeviceMemoryBarrier, // ...
+ EOpDeviceMemoryBarrierWithGroupSync, // ...
+ EOpWorkgroupMemoryBarrier, // ...
+ EOpWorkgroupMemoryBarrierWithGroupSync, // ...
+ EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
+ EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
+ EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
+ EOpLit, // HLSL lighting coefficient vector
+ EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
+ EOpAsDouble, // slightly different from EOpUint64BitsToDouble
+ EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
+
+ EOpMethodSample, // Texture object methods. These are translated to existing
+ EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodSampleCmpLevelZero, // ...
+ EOpMethodSampleGrad, // ...
+ EOpMethodSampleLevel, // ...
+ EOpMethodLoad, // ...
+ EOpMethodGetDimensions, // ...
+ EOpMethodGetSamplePosition, // ...
+ EOpMethodGather, // ...
+ EOpMethodCalculateLevelOfDetail, // ...
+ EOpMethodCalculateLevelOfDetailUnclamped, // ...
+
+ // Load already defined above for textures
+ EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
+ EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodStore, // ...
+ EOpMethodStore2, // ...
+ EOpMethodStore3, // ...
+ EOpMethodStore4, // ...
+ EOpMethodIncrementCounter, // ...
+ EOpMethodDecrementCounter, // ...
+ // EOpMethodAppend is defined for geo shaders below
+ EOpMethodConsume,
+
+ // SM5 texture methods
+ EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
+ EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
+ EOpMethodGatherBlue, // because HLSL arguments are slightly different.
+ EOpMethodGatherAlpha, // ...
+ EOpMethodGatherCmp, // ...
+ EOpMethodGatherCmpRed, // ...
+ EOpMethodGatherCmpGreen, // ...
+ EOpMethodGatherCmpBlue, // ...
+ EOpMethodGatherCmpAlpha, // ...
+
+ // geometry methods
+ EOpMethodAppend, // Geometry shader methods
+ EOpMethodRestartStrip, // ...
+
+ // matrix
+ EOpMatrixSwizzle, // select multiple matrix components (non-column)
+
+ // SM6 wave ops
+ EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
+ EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
+ EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
+ EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
+};
+
+class TIntermTraverser;
+class TIntermOperator;
+class TIntermAggregate;
+class TIntermUnary;
+class TIntermBinary;
+class TIntermConstantUnion;
+class TIntermSelection;
+class TIntermSwitch;
+class TIntermBranch;
+class TIntermTyped;
+class TIntermMethod;
+class TIntermSymbol;
+class TIntermLoop;
+
+} // end namespace glslang
+
+//
+// Base class for the tree nodes
+//
+// (Put outside the glslang namespace, as it's used as part of the external interface.)
+//
+class TIntermNode {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+
+ TIntermNode() { loc.init(); }
+ virtual const glslang::TSourceLoc& getLoc() const { return loc; }
+ virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
+ virtual void traverse(glslang::TIntermTraverser*) = 0;
+ virtual glslang::TIntermTyped* getAsTyped() { return 0; }
+ virtual glslang::TIntermOperator* getAsOperator() { return 0; }
+ virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
+ virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
+ virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
+ virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
+ virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
+ virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
+ virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
+ virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
+ virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
+ virtual glslang::TIntermLoop* getAsLoopNode() { return 0; }
+
+ virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
+ virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
+ virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
+ virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
+ virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
+ virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
+ virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
+ virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
+ virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
+ virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
+ virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
+ virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; }
+ virtual ~TIntermNode() { }
+
+protected:
+ TIntermNode(const TIntermNode&);
+ TIntermNode& operator=(const TIntermNode&);
+ glslang::TSourceLoc loc;
+};
+
+namespace glslang {
+
+//
+// This is just to help yacc.
+//
+struct TIntermNodePair {
+ TIntermNode* node1;
+ TIntermNode* node2;
+};
+
+//
+// Intermediate class for nodes that have a type.
+//
+class TIntermTyped : public TIntermNode {
+public:
+ TIntermTyped(const TType& t) { type.shallowCopy(t); }
+ TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
+ virtual TIntermTyped* getAsTyped() { return this; }
+ virtual const TIntermTyped* getAsTyped() const { return this; }
+ virtual void setType(const TType& t) { type.shallowCopy(t); }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { return type; }
+
+ virtual TBasicType getBasicType() const { return type.getBasicType(); }
+ virtual TQualifier& getQualifier() { return type.getQualifier(); }
+ virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
+ virtual void propagatePrecision(TPrecisionQualifier);
+ virtual int getVectorSize() const { return type.getVectorSize(); }
+ virtual int getMatrixCols() const { return type.getMatrixCols(); }
+ virtual int getMatrixRows() const { return type.getMatrixRows(); }
+ virtual bool isMatrix() const { return type.isMatrix(); }
+ virtual bool isArray() const { return type.isArray(); }
+ virtual bool isVector() const { return type.isVector(); }
+ virtual bool isScalar() const { return type.isScalar(); }
+ virtual bool isStruct() const { return type.isStruct(); }
+ virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
+ virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
+ TString getCompleteString() const { return type.getCompleteString(); }
+
+protected:
+ TIntermTyped& operator=(const TIntermTyped&);
+ TType type;
+};
+
+//
+// Handle for, do-while, and while loops.
+//
+class TIntermLoop : public TIntermNode {
+public:
+ TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
+ body(aBody),
+ test(aTest),
+ terminal(aTerminal),
+ first(testFirst),
+ unroll(false),
+ dontUnroll(false),
+ dependency(0)
+ { }
+
+ virtual TIntermLoop* getAsLoopNode() { return this; }
+ virtual const TIntermLoop* getAsLoopNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TIntermNode* getBody() const { return body; }
+ TIntermTyped* getTest() const { return test; }
+ TIntermTyped* getTerminal() const { return terminal; }
+ bool testFirst() const { return first; }
+
+ void setUnroll() { unroll = true; }
+ void setDontUnroll() { dontUnroll = true; }
+ bool getUnroll() const { return unroll; }
+ bool getDontUnroll() const { return dontUnroll; }
+
+ static const unsigned int dependencyInfinite = 0xFFFFFFFF;
+ void setLoopDependency(int d) { dependency = d; }
+ int getLoopDependency() const { return dependency; }
+
+protected:
+ TIntermNode* body; // code to loop over
+ TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
+ TIntermTyped* terminal; // exists for for-loops
+ bool first; // true for while and for, not for do-while
+ bool unroll; // true if unroll requested
+ bool dontUnroll; // true if request to not unroll
+ unsigned int dependency; // loop dependency hint; 0 means not set or unknown
+};
+
+//
+// Handle case, break, continue, return, and kill.
+//
+class TIntermBranch : public TIntermNode {
+public:
+ TIntermBranch(TOperator op, TIntermTyped* e) :
+ flowOp(op),
+ expression(e) { }
+ virtual TIntermBranch* getAsBranchNode() { return this; }
+ virtual const TIntermBranch* getAsBranchNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TOperator getFlowOp() const { return flowOp; }
+ TIntermTyped* getExpression() const { return expression; }
+protected:
+ TOperator flowOp;
+ TIntermTyped* expression;
+};
+
+//
+// Represent method names before seeing their calling signature
+// or resolving them to operations. Just an expression as the base object
+// and a textural name.
+//
+class TIntermMethod : public TIntermTyped {
+public:
+ TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
+ virtual TIntermMethod* getAsMethodNode() { return this; }
+ virtual const TIntermMethod* getAsMethodNode() const { return this; }
+ virtual const TString& getMethodName() const { return method; }
+ virtual TIntermTyped* getObject() const { return object; }
+ virtual void traverse(TIntermTraverser*);
+protected:
+ TIntermTyped* object;
+ TString method;
+};
+
+//
+// Nodes that correspond to symbols or constants in the source code.
+//
+class TIntermSymbol : public TIntermTyped {
+public:
+ // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
+ // per process threadPoolAllocator, then it causes increased memory usage per compile
+ // it is essential to use "symbol = sym" to assign to symbol
+ TIntermSymbol(int i, const TString& n, const TType& t)
+ : TIntermTyped(t), id(i),
+#ifdef ENABLE_HLSL
+ flattenSubset(-1),
+#endif
+ constSubtree(nullptr)
+ { name = n; }
+ virtual int getId() const { return id; }
+ virtual void changeId(int i) { id = i; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermSymbol* getAsSymbolNode() { return this; }
+ virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
+ void setConstArray(const TConstUnionArray& c) { constArray = c; }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ TIntermTyped* getConstSubtree() const { return constSubtree; }
+#ifdef ENABLE_HLSL
+ void setFlattenSubset(int subset) { flattenSubset = subset; }
+ int getFlattenSubset() const { return flattenSubset; } // -1 means full object
+#endif
+
+ // This is meant for cases where a node has already been constructed, and
+ // later on, it becomes necessary to switch to a different symbol.
+ virtual void switchId(int newId) { id = newId; }
+
+protected:
+ int id; // the unique id of the symbol this node represents
+#ifdef ENABLE_HLSL
+ int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
+#endif
+ TString name; // the name of the symbol this node represents
+ TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
+ TIntermTyped* constSubtree;
+};
+
+class TIntermConstantUnion : public TIntermTyped {
+public:
+ TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
+ virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
+ virtual TIntermTyped* fold(TOperator, const TType&) const;
+ void setLiteral() { literal = true; }
+ void setExpression() { literal = false; }
+ bool isLiteral() const { return literal; }
+
+protected:
+ TIntermConstantUnion& operator=(const TIntermConstantUnion&);
+
+ const TConstUnionArray constArray;
+ bool literal; // true if node represents a literal in the source code
+};
+
+// Represent the independent aspects of a texturing TOperator
+struct TCrackedTextureOp {
+ bool query;
+ bool proj;
+ bool lod;
+ bool fetch;
+ bool offset;
+ bool offsets;
+ bool gather;
+ bool grad;
+ bool subpass;
+ bool lodClamp;
+#ifdef AMD_EXTENSIONS
+ bool fragMask;
+#endif
+};
+
+//
+// Intermediate class for node types that hold operators.
+//
+class TIntermOperator : public TIntermTyped {
+public:
+ virtual TIntermOperator* getAsOperator() { return this; }
+ virtual const TIntermOperator* getAsOperator() const { return this; }
+ TOperator getOp() const { return op; }
+ void setOp(TOperator newOp) { op = newOp; }
+ bool modifiesState() const;
+ bool isConstructor() const;
+ bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
+ bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
+ bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
+ bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
+#ifdef NV_EXTENSIONS
+ bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
+#endif
+ bool isSparseImage() const { return op == EOpSparseImageLoad; }
+
+ void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
+ TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
+ operationPrecision :
+ type.getQualifier().precision; }
+ TString getCompleteString() const
+ {
+ TString cs = type.getCompleteString();
+ if (getOperationPrecision() != type.getQualifier().precision) {
+ cs += ", operation at ";
+ cs += GetPrecisionQualifierString(getOperationPrecision());
+ }
+
+ return cs;
+ }
+
+ // Crack the op into the individual dimensions of texturing operation.
+ void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
+ {
+ cracked.query = false;
+ cracked.proj = false;
+ cracked.lod = false;
+ cracked.fetch = false;
+ cracked.offset = false;
+ cracked.offsets = false;
+ cracked.gather = false;
+ cracked.grad = false;
+ cracked.subpass = false;
+ cracked.lodClamp = false;
+#ifdef AMD_EXTENSIONS
+ cracked.fragMask = false;
+#endif
+
+ switch (op) {
+ case EOpImageQuerySize:
+ case EOpImageQuerySamples:
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLod:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpSparseTexelsResident:
+ cracked.query = true;
+ break;
+ case EOpTexture:
+ case EOpSparseTexture:
+ break;
+ case EOpTextureClamp:
+ case EOpSparseTextureClamp:
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureProj:
+ cracked.proj = true;
+ break;
+ case EOpTextureLod:
+ case EOpSparseTextureLod:
+ cracked.lod = true;
+ break;
+ case EOpTextureOffset:
+ case EOpSparseTextureOffset:
+ cracked.offset = true;
+ break;
+ case EOpTextureOffsetClamp:
+ case EOpSparseTextureOffsetClamp:
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureFetch:
+ case EOpSparseTextureFetch:
+ cracked.fetch = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureFetchOffset:
+ case EOpSparseTextureFetchOffset:
+ cracked.fetch = true;
+ cracked.offset = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureProjOffset:
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureLodOffset:
+ case EOpSparseTextureLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureProjLod:
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGrad:
+ case EOpSparseTextureGrad:
+ cracked.grad = true;
+ break;
+ case EOpTextureGradClamp:
+ case EOpSparseTextureGradClamp:
+ cracked.grad = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGradOffset:
+ case EOpSparseTextureGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureProjGrad:
+ cracked.grad = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGradOffsetClamp:
+ case EOpSparseTextureGradOffsetClamp:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGather:
+ case EOpSparseTextureGather:
+ cracked.gather = true;
+ break;
+ case EOpTextureGatherOffset:
+ case EOpSparseTextureGatherOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureGatherOffsets:
+ case EOpSparseTextureGatherOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case EOpTextureGatherLod:
+ case EOpSparseTextureGatherLod:
+ cracked.gather = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffset:
+ case EOpSparseTextureGatherLodOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffsets:
+ case EOpSparseTextureGatherLodOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ cracked.lod = true;
+ break;
+ case EOpImageLoadLod:
+ case EOpImageStoreLod:
+ case EOpSparseImageLoadLod:
+ cracked.lod = true;
+ break;
+ case EOpFragmentMaskFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+ case EOpFragmentFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case EOpImageSampleFootprintNV:
+ break;
+ case EOpImageSampleFootprintClampNV:
+ cracked.lodClamp = true;
+ break;
+ case EOpImageSampleFootprintLodNV:
+ cracked.lod = true;
+ break;
+ case EOpImageSampleFootprintGradNV:
+ cracked.grad = true;
+ break;
+ case EOpImageSampleFootprintGradClampNV:
+ cracked.lodClamp = true;
+ cracked.grad = true;
+ break;
+#endif
+ case EOpSubpassLoad:
+ case EOpSubpassLoadMS:
+ cracked.subpass = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+protected:
+ TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
+ TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
+ TOperator op;
+ // The result precision is in the inherited TType, and is usually meant to be both
+ // the operation precision and the result precision. However, some more complex things,
+ // like built-in function calls, distinguish between the two, in which case non-EqpNone
+ // 'operationPrecision' overrides the result precision as far as operation precision
+ // is concerned.
+ TPrecisionQualifier operationPrecision;
+};
+
+//
+// Nodes for all the basic binary math operators.
+//
+class TIntermBinary : public TIntermOperator {
+public:
+ TIntermBinary(TOperator o) : TIntermOperator(o) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setLeft(TIntermTyped* n) { left = n; }
+ virtual void setRight(TIntermTyped* n) { right = n; }
+ virtual TIntermTyped* getLeft() const { return left; }
+ virtual TIntermTyped* getRight() const { return right; }
+ virtual TIntermBinary* getAsBinaryNode() { return this; }
+ virtual const TIntermBinary* getAsBinaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* left;
+ TIntermTyped* right;
+};
+
+//
+// Nodes for unary math operators.
+//
+class TIntermUnary : public TIntermOperator {
+public:
+ TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
+ TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setOperand(TIntermTyped* o) { operand = o; }
+ virtual TIntermTyped* getOperand() { return operand; }
+ virtual const TIntermTyped* getOperand() const { return operand; }
+ virtual TIntermUnary* getAsUnaryNode() { return this; }
+ virtual const TIntermUnary* getAsUnaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* operand;
+};
+
+typedef TVector<TIntermNode*> TIntermSequence;
+typedef TVector<TStorageQualifier> TQualifierList;
+//
+// Nodes that operate on an arbitrary sized set of children.
+//
+class TIntermAggregate : public TIntermOperator {
+public:
+ TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
+ TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
+ ~TIntermAggregate() { delete pragmaTable; }
+ virtual TIntermAggregate* getAsAggregate() { return this; }
+ virtual const TIntermAggregate* getAsAggregate() const { return this; }
+ virtual void setOperator(TOperator o) { op = o; }
+ virtual TIntermSequence& getSequence() { return sequence; }
+ virtual const TIntermSequence& getSequence() const { return sequence; }
+ virtual void setName(const TString& n) { name = n; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual void setUserDefined() { userDefined = true; }
+ virtual bool isUserDefined() { return userDefined; }
+ virtual TQualifierList& getQualifierList() { return qualifier; }
+ virtual const TQualifierList& getQualifierList() const { return qualifier; }
+ void setOptimize(bool o) { optimize = o; }
+ void setDebug(bool d) { debug = d; }
+ bool getOptimize() const { return optimize; }
+ bool getDebug() const { return debug; }
+ void setPragmaTable(const TPragmaTable& pTable);
+ const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
+protected:
+ TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
+ TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
+ TIntermSequence sequence;
+ TQualifierList qualifier;
+ TString name;
+ bool userDefined; // used for user defined function names
+ bool optimize;
+ bool debug;
+ TPragmaTable* pragmaTable;
+};
+
+//
+// For if tests.
+//
+class TIntermSelection : public TIntermTyped {
+public:
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
+ TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
+ TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* getCondition() const { return condition; }
+ virtual TIntermNode* getTrueBlock() const { return trueBlock; }
+ virtual TIntermNode* getFalseBlock() const { return falseBlock; }
+ virtual TIntermSelection* getAsSelectionNode() { return this; }
+ virtual const TIntermSelection* getAsSelectionNode() const { return this; }
+
+ void setNoShortCircuit() { shortCircuit = false; }
+ bool getShortCircuit() const { return shortCircuit; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermNode* trueBlock;
+ TIntermNode* falseBlock;
+ bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+//
+// For switch statements. Designed use is that a switch will have sequence of nodes
+// that are either case/default nodes or a *single* node that represents all the code
+// in between (if any) consecutive case/defaults. So, a traversal need only deal with
+// 0 or 1 nodes per case/default statement.
+//
+class TIntermSwitch : public TIntermNode {
+public:
+ TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermNode* getCondition() const { return condition; }
+ virtual TIntermAggregate* getBody() const { return body; }
+ virtual TIntermSwitch* getAsSwitchNode() { return this; }
+ virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermAggregate* body;
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+enum TVisit
+{
+ EvPreVisit,
+ EvInVisit,
+ EvPostVisit
+};
+
+//
+// For traversing the tree. User should derive from this,
+// put their traversal specific data in it, and then pass
+// it to a Traverse method.
+//
+// When using this, just fill in the methods for nodes you want visited.
+// Return false from a pre-visit to skip visiting that node's subtree.
+//
+// Explicitly set postVisit to true if you want post visiting, otherwise,
+// filled in methods will only be called at pre-visit time (before processing
+// the subtree). Similarly for inVisit for in-order visiting of nodes with
+// multiple children.
+//
+// If you only want post-visits, explicitly turn off preVisit (and inVisit)
+// and turn on postVisit.
+//
+// In general, for the visit*() methods, return true from interior nodes
+// to have the traversal continue on to children.
+//
+// If you process children yourself, or don't want them processed, return false.
+//
+class TIntermTraverser {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+ TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
+ preVisit(preVisit),
+ inVisit(inVisit),
+ postVisit(postVisit),
+ rightToLeft(rightToLeft),
+ depth(0),
+ maxDepth(0) { }
+ virtual ~TIntermTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol*) { }
+ virtual void visitConstantUnion(TIntermConstantUnion*) { }
+ virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
+ virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
+ virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
+ virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
+ virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
+ virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
+ virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
+
+ int getMaxDepth() const { return maxDepth; }
+
+ void incrementDepth(TIntermNode *current)
+ {
+ depth++;
+ maxDepth = (std::max)(maxDepth, depth);
+ path.push_back(current);
+ }
+
+ void decrementDepth()
+ {
+ depth--;
+ path.pop_back();
+ }
+
+ TIntermNode *getParentNode()
+ {
+ return path.size() == 0 ? NULL : path.back();
+ }
+
+ const bool preVisit;
+ const bool inVisit;
+ const bool postVisit;
+ const bool rightToLeft;
+
+protected:
+ TIntermTraverser& operator=(TIntermTraverser&);
+
+ int depth;
+ int maxDepth;
+
+ // All the nodes from root to the current node's parent during traversing.
+ TVector<TIntermNode *> path;
+};
+
+// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
+// sized with the same symbol, involving no operations"
+inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
+{
+ return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
+ node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
+}
+
+} // end namespace glslang
+
+#endif // __INTERMEDIATE_H
diff --git a/src/3rdparty/glslang/glslang/Include/revision.h b/src/3rdparty/glslang/glslang/Include/revision.h
new file mode 100644
index 0000000..f810a33
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/revision.h
@@ -0,0 +1,3 @@
+// This header is generated by the make-revision script.
+
+#define GLSLANG_PATCH_LEVEL 3170
diff --git a/src/3rdparty/glslang/glslang/Include/revision.template b/src/3rdparty/glslang/glslang/Include/revision.template
new file mode 100644
index 0000000..4a16bee
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/revision.template
@@ -0,0 +1,13 @@
+// The file revision.h should be updated to the latest version, somehow, on
+// check-in, if glslang has changed.
+//
+// revision.template is the source for revision.h when using SubWCRev as the
+// method of updating revision.h. You don't have to do it this way, the
+// requirement is only that revision.h gets updated.
+//
+// revision.h is under source control so that not all consumers of glslang
+// source have to figure out how to create revision.h just to get a build
+// going. However, if it is not updated, it can be a version behind.
+
+#define GLSLANG_REVISION "$WCREV$"
+#define GLSLANG_DATE "$WCDATE$"
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Constant.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/Constant.cpp
new file mode 100644
index 0000000..b75e3ef
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Constant.cpp
@@ -0,0 +1,1405 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "localintermediate.h"
+#include <cmath>
+#include <cfloat>
+#include <cstdlib>
+#include <climits>
+
+namespace {
+
+using namespace glslang;
+
+typedef union {
+ double d;
+ int i[2];
+} DoubleIntUnion;
+
+// Some helper functions
+
+bool isNan(double x)
+{
+ DoubleIntUnion u;
+ // tough to find a platform independent library function, do it directly
+ u.d = x;
+ int bitPatternL = u.i[0];
+ int bitPatternH = u.i[1];
+ return (bitPatternH & 0x7ff80000) == 0x7ff80000 &&
+ ((bitPatternH & 0xFFFFF) != 0 || bitPatternL != 0);
+}
+
+bool isInf(double x)
+{
+ DoubleIntUnion u;
+ // tough to find a platform independent library function, do it directly
+ u.d = x;
+ int bitPatternL = u.i[0];
+ int bitPatternH = u.i[1];
+ return (bitPatternH & 0x7ff00000) == 0x7ff00000 &&
+ (bitPatternH & 0xFFFFF) == 0 && bitPatternL == 0;
+}
+
+const double pi = 3.1415926535897932384626433832795;
+
+} // end anonymous namespace
+
+
+namespace glslang {
+
+//
+// The fold functions see if an operation on a constant can be done in place,
+// without generating run-time code.
+//
+// Returns the node to keep using, which may or may not be the node passed in.
+//
+// Note: As of version 1.2, all constant operations must be folded. It is
+// not opportunistic, but rather a semantic requirement.
+//
+
+//
+// Do folding between a pair of nodes.
+// 'this' is the left-hand operand and 'rightConstantNode' is the right-hand operand.
+//
+// Returns a new node representing the result.
+//
+TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* rightConstantNode) const
+{
+ // For most cases, the return type matches the argument type, so set that
+ // up and just code to exceptions below.
+ TType returnType;
+ returnType.shallowCopy(getType());
+
+ //
+ // A pair of nodes is to be folded together
+ //
+
+ const TIntermConstantUnion *rightNode = rightConstantNode->getAsConstantUnion();
+ TConstUnionArray leftUnionArray = getConstArray();
+ TConstUnionArray rightUnionArray = rightNode->getConstArray();
+
+ // Figure out the size of the result
+ int newComps;
+ int constComps;
+ switch(op) {
+ case EOpMatrixTimesMatrix:
+ newComps = rightNode->getMatrixCols() * getMatrixRows();
+ break;
+ case EOpMatrixTimesVector:
+ newComps = getMatrixRows();
+ break;
+ case EOpVectorTimesMatrix:
+ newComps = rightNode->getMatrixCols();
+ break;
+ default:
+ newComps = getType().computeNumComponents();
+ constComps = rightConstantNode->getType().computeNumComponents();
+ if (constComps == 1 && newComps > 1) {
+ // for a case like vec4 f = vec4(2,3,4,5) + 1.2;
+ TConstUnionArray smearedArray(newComps, rightNode->getConstArray()[0]);
+ rightUnionArray = smearedArray;
+ } else if (constComps > 1 && newComps == 1) {
+ // for a case like vec4 f = 1.2 + vec4(2,3,4,5);
+ newComps = constComps;
+ rightUnionArray = rightNode->getConstArray();
+ TConstUnionArray smearedArray(newComps, getConstArray()[0]);
+ leftUnionArray = smearedArray;
+ returnType.shallowCopy(rightNode->getType());
+ }
+ break;
+ }
+
+ TConstUnionArray newConstArray(newComps);
+ TType constBool(EbtBool, EvqConst);
+
+ switch(op) {
+ case EOpAdd:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] + rightUnionArray[i];
+ break;
+ case EOpSub:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] - rightUnionArray[i];
+ break;
+
+ case EOpMul:
+ case EOpVectorTimesScalar:
+ case EOpMatrixTimesScalar:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] * rightUnionArray[i];
+ break;
+ case EOpMatrixTimesMatrix:
+ for (int row = 0; row < getMatrixRows(); row++) {
+ for (int column = 0; column < rightNode->getMatrixCols(); column++) {
+ double sum = 0.0f;
+ for (int i = 0; i < rightNode->getMatrixRows(); i++)
+ sum += leftUnionArray[i * getMatrixRows() + row].getDConst() * rightUnionArray[column * rightNode->getMatrixRows() + i].getDConst();
+ newConstArray[column * getMatrixRows() + row].setDConst(sum);
+ }
+ }
+ returnType.shallowCopy(TType(getType().getBasicType(), EvqConst, 0, rightNode->getMatrixCols(), getMatrixRows()));
+ break;
+ case EOpDiv:
+ for (int i = 0; i < newComps; i++) {
+ switch (getType().getBasicType()) {
+ case EbtDouble:
+ case EbtFloat:
+ case EbtFloat16:
+ if (rightUnionArray[i].getDConst() != 0.0)
+ newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst());
+ else if (leftUnionArray[i].getDConst() > 0.0)
+ newConstArray[i].setDConst((double)INFINITY);
+ else if (leftUnionArray[i].getDConst() < 0.0)
+ newConstArray[i].setDConst(-(double)INFINITY);
+ else
+ newConstArray[i].setDConst((double)NAN);
+ break;
+ case EbtInt8:
+ if (rightUnionArray[i] == (signed char)0)
+ newConstArray[i].setI8Const((signed char)0x7F);
+ else if (rightUnionArray[i].getI8Const() == (signed char)-1 && leftUnionArray[i].getI8Const() == (signed char)-0x80)
+ newConstArray[i].setI8Const((signed char)-0x80);
+ else
+ newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const());
+ break;
+
+ case EbtUint8:
+ if (rightUnionArray[i] == (unsigned char)0u)
+ newConstArray[i].setU8Const((unsigned char)0xFFu);
+ else
+ newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const());
+ break;
+
+ case EbtInt16:
+ if (rightUnionArray[i] == (signed short)0)
+ newConstArray[i].setI16Const((signed short)0x7FFF);
+ else if (rightUnionArray[i].getI16Const() == (signed short)-1 && leftUnionArray[i].getI16Const() == (signed short)-0x8000)
+ newConstArray[i].setI16Const((signed short)-0x8000);
+ else
+ newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const());
+ break;
+
+ case EbtUint16:
+ if (rightUnionArray[i] == (unsigned short)0u)
+ newConstArray[i].setU16Const((unsigned short)0xFFFFu);
+ else
+ newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const());
+ break;
+
+ case EbtInt:
+ if (rightUnionArray[i] == 0)
+ newConstArray[i].setIConst(0x7FFFFFFF);
+ else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll)
+ newConstArray[i].setIConst((int)-0x80000000ll);
+ else
+ newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst());
+ break;
+
+ case EbtUint:
+ if (rightUnionArray[i] == 0u)
+ newConstArray[i].setUConst(0xFFFFFFFFu);
+ else
+ newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst());
+ break;
+
+ case EbtInt64:
+ if (rightUnionArray[i] == 0ll)
+ newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll);
+ else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)-0x8000000000000000ll)
+ newConstArray[i].setI64Const((long long)-0x8000000000000000ll);
+ else
+ newConstArray[i].setI64Const(leftUnionArray[i].getI64Const() / rightUnionArray[i].getI64Const());
+ break;
+
+ case EbtUint64:
+ if (rightUnionArray[i] == 0ull)
+ newConstArray[i].setU64Const(0xFFFFFFFFFFFFFFFFull);
+ else
+ newConstArray[i].setU64Const(leftUnionArray[i].getU64Const() / rightUnionArray[i].getU64Const());
+ break;
+ default:
+ return 0;
+ }
+ }
+ break;
+
+ case EOpMatrixTimesVector:
+ for (int i = 0; i < getMatrixRows(); i++) {
+ double sum = 0.0f;
+ for (int j = 0; j < rightNode->getVectorSize(); j++) {
+ sum += leftUnionArray[j*getMatrixRows() + i].getDConst() * rightUnionArray[j].getDConst();
+ }
+ newConstArray[i].setDConst(sum);
+ }
+
+ returnType.shallowCopy(TType(getBasicType(), EvqConst, getMatrixRows()));
+ break;
+
+ case EOpVectorTimesMatrix:
+ for (int i = 0; i < rightNode->getMatrixCols(); i++) {
+ double sum = 0.0f;
+ for (int j = 0; j < getVectorSize(); j++)
+ sum += leftUnionArray[j].getDConst() * rightUnionArray[i*rightNode->getMatrixRows() + j].getDConst();
+ newConstArray[i].setDConst(sum);
+ }
+
+ returnType.shallowCopy(TType(getBasicType(), EvqConst, rightNode->getMatrixCols()));
+ break;
+
+ case EOpMod:
+ for (int i = 0; i < newComps; i++) {
+ if (rightUnionArray[i] == 0)
+ newConstArray[i] = leftUnionArray[i];
+ else {
+ switch (getType().getBasicType()) {
+ case EbtInt:
+ if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == INT_MIN) {
+ newConstArray[i].setIConst(0);
+ break;
+ } else goto modulo_default;
+
+ case EbtInt64:
+ if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) {
+ newConstArray[i].setI64Const(0);
+ break;
+ } else goto modulo_default;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) {
+ newConstArray[i].setIConst(0);
+ break;
+ } else goto modulo_default;
+#endif
+ default:
+ modulo_default:
+ newConstArray[i] = leftUnionArray[i] % rightUnionArray[i];
+ }
+ }
+ }
+ break;
+
+ case EOpRightShift:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] >> rightUnionArray[i];
+ break;
+
+ case EOpLeftShift:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] << rightUnionArray[i];
+ break;
+
+ case EOpAnd:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] & rightUnionArray[i];
+ break;
+ case EOpInclusiveOr:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] | rightUnionArray[i];
+ break;
+ case EOpExclusiveOr:
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] ^ rightUnionArray[i];
+ break;
+
+ case EOpLogicalAnd: // this code is written for possible future use, will not get executed currently
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] && rightUnionArray[i];
+ break;
+
+ case EOpLogicalOr: // this code is written for possible future use, will not get executed currently
+ for (int i = 0; i < newComps; i++)
+ newConstArray[i] = leftUnionArray[i] || rightUnionArray[i];
+ break;
+
+ case EOpLogicalXor:
+ for (int i = 0; i < newComps; i++) {
+ switch (getType().getBasicType()) {
+ case EbtBool: newConstArray[i].setBConst((leftUnionArray[i] == rightUnionArray[i]) ? false : true); break;
+ default: assert(false && "Default missing");
+ }
+ }
+ break;
+
+ case EOpLessThan:
+ newConstArray[0].setBConst(leftUnionArray[0] < rightUnionArray[0]);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpGreaterThan:
+ newConstArray[0].setBConst(leftUnionArray[0] > rightUnionArray[0]);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpLessThanEqual:
+ newConstArray[0].setBConst(! (leftUnionArray[0] > rightUnionArray[0]));
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpGreaterThanEqual:
+ newConstArray[0].setBConst(! (leftUnionArray[0] < rightUnionArray[0]));
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpEqual:
+ newConstArray[0].setBConst(rightNode->getConstArray() == leftUnionArray);
+ returnType.shallowCopy(constBool);
+ break;
+ case EOpNotEqual:
+ newConstArray[0].setBConst(rightNode->getConstArray() != leftUnionArray);
+ returnType.shallowCopy(constBool);
+ break;
+
+ default:
+ return 0;
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType);
+ newNode->setLoc(getLoc());
+
+ return newNode;
+}
+
+//
+// Do single unary node folding
+//
+// Returns a new node representing the result.
+//
+TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType) const
+{
+ // First, size the result, which is mostly the same as the argument's size,
+ // but not always, and classify what is componentwise.
+ // Also, eliminate cases that can't be compile-time constant.
+ int resultSize;
+ bool componentWise = true;
+
+ int objectSize = getType().computeNumComponents();
+ switch (op) {
+ case EOpDeterminant:
+ case EOpAny:
+ case EOpAll:
+ case EOpLength:
+ componentWise = false;
+ resultSize = 1;
+ break;
+
+ case EOpEmitStreamVertex:
+ case EOpEndStreamPrimitive:
+ // These don't actually fold
+ return 0;
+
+ case EOpPackSnorm2x16:
+ case EOpPackUnorm2x16:
+ case EOpPackHalf2x16:
+ componentWise = false;
+ resultSize = 1;
+ break;
+
+ case EOpUnpackSnorm2x16:
+ case EOpUnpackUnorm2x16:
+ case EOpUnpackHalf2x16:
+ componentWise = false;
+ resultSize = 2;
+ break;
+
+ case EOpPack16:
+ case EOpPack32:
+ case EOpPack64:
+ case EOpUnpack32:
+ case EOpUnpack16:
+ case EOpUnpack8:
+ case EOpNormalize:
+ componentWise = false;
+ resultSize = objectSize;
+ break;
+
+ default:
+ resultSize = objectSize;
+ break;
+ }
+
+ // Set up for processing
+ TConstUnionArray newConstArray(resultSize);
+ const TConstUnionArray& unionArray = getConstArray();
+
+ // Process non-component-wise operations
+ switch (op) {
+ case EOpLength:
+ case EOpNormalize:
+ {
+ double sum = 0;
+ for (int i = 0; i < objectSize; i++)
+ sum += unionArray[i].getDConst() * unionArray[i].getDConst();
+ double length = sqrt(sum);
+ if (op == EOpLength)
+ newConstArray[0].setDConst(length);
+ else {
+ for (int i = 0; i < objectSize; i++)
+ newConstArray[i].setDConst(unionArray[i].getDConst() / length);
+ }
+ break;
+ }
+
+ case EOpAny:
+ {
+ bool result = false;
+ for (int i = 0; i < objectSize; i++) {
+ if (unionArray[i].getBConst())
+ result = true;
+ }
+ newConstArray[0].setBConst(result);
+ break;
+ }
+ case EOpAll:
+ {
+ bool result = true;
+ for (int i = 0; i < objectSize; i++) {
+ if (! unionArray[i].getBConst())
+ result = false;
+ }
+ newConstArray[0].setBConst(result);
+ break;
+ }
+
+ // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
+
+ case EOpPackSnorm2x16:
+ case EOpPackUnorm2x16:
+ case EOpPackHalf2x16:
+ case EOpPack16:
+ case EOpPack32:
+ case EOpPack64:
+ case EOpUnpack32:
+ case EOpUnpack16:
+ case EOpUnpack8:
+
+ case EOpUnpackSnorm2x16:
+ case EOpUnpackUnorm2x16:
+ case EOpUnpackHalf2x16:
+
+ case EOpDeterminant:
+ case EOpMatrixInverse:
+ case EOpTranspose:
+ return 0;
+
+ default:
+ assert(componentWise);
+ break;
+ }
+
+ // Turn off the componentwise loop
+ if (! componentWise)
+ objectSize = 0;
+
+ // Process component-wise operations
+ for (int i = 0; i < objectSize; i++) {
+ switch (op) {
+ case EOpNegative:
+ switch (getType().getBasicType()) {
+ case EbtDouble:
+ case EbtFloat16:
+ case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
+ case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break;
+ case EbtUint8: newConstArray[i].setU8Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU8Const()))); break;
+ case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break;
+ case EbtUint16:newConstArray[i].setU16Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU16Const()))); break;
+ case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
+ case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
+ case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break;
+ case EbtUint64: newConstArray[i].setU64Const(static_cast<unsigned long long>(-static_cast<long long>(unionArray[i].getU64Const()))); break;
+ default:
+ return 0;
+ }
+ break;
+ case EOpLogicalNot:
+ case EOpVectorLogicalNot:
+ switch (getType().getBasicType()) {
+ case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break;
+ default:
+ return 0;
+ }
+ break;
+ case EOpBitwiseNot:
+ newConstArray[i] = ~unionArray[i];
+ break;
+ case EOpRadians:
+ newConstArray[i].setDConst(unionArray[i].getDConst() * pi / 180.0);
+ break;
+ case EOpDegrees:
+ newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / pi);
+ break;
+ case EOpSin:
+ newConstArray[i].setDConst(sin(unionArray[i].getDConst()));
+ break;
+ case EOpCos:
+ newConstArray[i].setDConst(cos(unionArray[i].getDConst()));
+ break;
+ case EOpTan:
+ newConstArray[i].setDConst(tan(unionArray[i].getDConst()));
+ break;
+ case EOpAsin:
+ newConstArray[i].setDConst(asin(unionArray[i].getDConst()));
+ break;
+ case EOpAcos:
+ newConstArray[i].setDConst(acos(unionArray[i].getDConst()));
+ break;
+ case EOpAtan:
+ newConstArray[i].setDConst(atan(unionArray[i].getDConst()));
+ break;
+
+ case EOpDPdx:
+ case EOpDPdy:
+ case EOpFwidth:
+ case EOpDPdxFine:
+ case EOpDPdyFine:
+ case EOpFwidthFine:
+ case EOpDPdxCoarse:
+ case EOpDPdyCoarse:
+ case EOpFwidthCoarse:
+ // The derivatives are all mandated to create a constant 0.
+ newConstArray[i].setDConst(0.0);
+ break;
+
+ case EOpExp:
+ newConstArray[i].setDConst(exp(unionArray[i].getDConst()));
+ break;
+ case EOpLog:
+ newConstArray[i].setDConst(log(unionArray[i].getDConst()));
+ break;
+ case EOpExp2:
+ {
+ const double inv_log2_e = 0.69314718055994530941723212145818;
+ newConstArray[i].setDConst(exp(unionArray[i].getDConst() * inv_log2_e));
+ break;
+ }
+ case EOpLog2:
+ {
+ const double log2_e = 1.4426950408889634073599246810019;
+ newConstArray[i].setDConst(log2_e * log(unionArray[i].getDConst()));
+ break;
+ }
+ case EOpSqrt:
+ newConstArray[i].setDConst(sqrt(unionArray[i].getDConst()));
+ break;
+ case EOpInverseSqrt:
+ newConstArray[i].setDConst(1.0 / sqrt(unionArray[i].getDConst()));
+ break;
+
+ case EOpAbs:
+ if (unionArray[i].getType() == EbtDouble)
+ newConstArray[i].setDConst(fabs(unionArray[i].getDConst()));
+ else if (unionArray[i].getType() == EbtInt)
+ newConstArray[i].setIConst(abs(unionArray[i].getIConst()));
+ else
+ newConstArray[i] = unionArray[i];
+ break;
+ case EOpSign:
+ #define SIGN(X) (X == 0 ? 0 : (X < 0 ? -1 : 1))
+ if (unionArray[i].getType() == EbtDouble)
+ newConstArray[i].setDConst(SIGN(unionArray[i].getDConst()));
+ else
+ newConstArray[i].setIConst(SIGN(unionArray[i].getIConst()));
+ break;
+ case EOpFloor:
+ newConstArray[i].setDConst(floor(unionArray[i].getDConst()));
+ break;
+ case EOpTrunc:
+ if (unionArray[i].getDConst() > 0)
+ newConstArray[i].setDConst(floor(unionArray[i].getDConst()));
+ else
+ newConstArray[i].setDConst(ceil(unionArray[i].getDConst()));
+ break;
+ case EOpRound:
+ newConstArray[i].setDConst(floor(0.5 + unionArray[i].getDConst()));
+ break;
+ case EOpRoundEven:
+ {
+ double flr = floor(unionArray[i].getDConst());
+ bool even = flr / 2.0 == floor(flr / 2.0);
+ double rounded = even ? ceil(unionArray[i].getDConst() - 0.5) : floor(unionArray[i].getDConst() + 0.5);
+ newConstArray[i].setDConst(rounded);
+ break;
+ }
+ case EOpCeil:
+ newConstArray[i].setDConst(ceil(unionArray[i].getDConst()));
+ break;
+ case EOpFract:
+ {
+ double x = unionArray[i].getDConst();
+ newConstArray[i].setDConst(x - floor(x));
+ break;
+ }
+
+ case EOpIsNan:
+ {
+ newConstArray[i].setBConst(isNan(unionArray[i].getDConst()));
+ break;
+ }
+ case EOpIsInf:
+ {
+ newConstArray[i].setBConst(isInf(unionArray[i].getDConst()));
+ break;
+ }
+
+ case EOpConvInt8ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break;
+ case EOpConvUint8ToBool:
+ newConstArray[i].setBConst(unionArray[i].getU8Const() != 0); break;
+ case EOpConvInt16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break;
+ case EOpConvUint16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break;
+ case EOpConvIntToBool:
+ newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break;
+ case EOpConvUintToBool:
+ newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break;
+ case EOpConvInt64ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
+ case EOpConvUint64ToBool:
+ newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
+ case EOpConvFloat16ToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+ case EOpConvFloatToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+ case EOpConvDoubleToBool:
+ newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
+
+ case EOpConvBoolToInt8:
+ newConstArray[i].setI8Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint8:
+ newConstArray[i].setU8Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt:
+ newConstArray[i].setIConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint:
+ newConstArray[i].setUConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getBConst()); break;
+ case EOpConvBoolToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToFloat:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+ case EOpConvBoolToDouble:
+ newConstArray[i].setDConst(unionArray[i].getBConst()); break;
+
+ case EOpConvInt8ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToInt:
+ newConstArray[i].setIConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint8:
+ newConstArray[i].setU8Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint:
+ newConstArray[i].setUConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI8Const()); break;
+ case EOpConvUint8ToInt8:
+ newConstArray[i].setI8Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt:
+ newConstArray[i].setIConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint:
+ newConstArray[i].setUConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getU8Const()); break;
+ case EOpConvInt8ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvInt8ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getI8Const()); break;
+ case EOpConvUint8ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+ case EOpConvUint8ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getU8Const()); break;
+
+ case EOpConvInt16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI16Const())); break;
+ case EOpConvInt16ToInt:
+ newConstArray[i].setIConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI16Const())); break;
+ case EOpConvInt16ToUint16:
+ newConstArray[i].setU16Const(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint:
+ newConstArray[i].setUConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI16Const()); break;
+ case EOpConvUint16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU16Const())); break;
+ case EOpConvUint16ToInt16:
+ newConstArray[i].setI16Const(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToInt:
+ newConstArray[i].setIConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU16Const())); break;
+
+ case EOpConvUint16ToUint:
+ newConstArray[i].setUConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getU16Const()); break;
+ case EOpConvInt16ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvInt16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getI16Const()); break;
+ case EOpConvUint16ToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+ case EOpConvUint16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getU16Const()); break;
+
+ case EOpConvIntToInt8:
+ newConstArray[i].setI8Const((signed char)unionArray[i].getIConst()); break;
+ case EOpConvIntToInt16:
+ newConstArray[i].setI16Const((signed short)unionArray[i].getIConst()); break;
+ case EOpConvIntToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getIConst()); break;
+ case EOpConvIntToUint8:
+ newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break;
+ case EOpConvIntToUint16:
+ newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break;
+ case EOpConvIntToUint:
+ newConstArray[i].setUConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getIConst()); break;
+
+ case EOpConvUintToInt8:
+ newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break;
+ case EOpConvUintToInt16:
+ newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break;
+ case EOpConvUintToInt:
+ newConstArray[i].setIConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getUConst()); break;
+ case EOpConvUintToUint8:
+ newConstArray[i].setU8Const((unsigned char)unionArray[i].getUConst()); break;
+ case EOpConvUintToUint16:
+ newConstArray[i].setU16Const((unsigned short)unionArray[i].getUConst()); break;
+ case EOpConvUintToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getUConst()); break;
+ case EOpConvIntToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToFloat:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvIntToDouble:
+ newConstArray[i].setDConst(unionArray[i].getIConst()); break;
+ case EOpConvUintToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToFloat:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvUintToDouble:
+ newConstArray[i].setDConst(unionArray[i].getUConst()); break;
+ case EOpConvInt64ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToUint64:
+ newConstArray[i].setU64Const(unionArray[i].getI64Const()); break;
+ case EOpConvUint64ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToInt64:
+ newConstArray[i].setI64Const(unionArray[i].getU64Const()); break;
+ case EOpConvUint64ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getU64Const())); break;
+ case EOpConvInt64ToFloat16:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToFloat:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvInt64ToDouble:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getI64Const())); break;
+ case EOpConvUint64ToFloat16:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToFloat:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvUint64ToDouble:
+ newConstArray[i].setDConst(static_cast<double>(unionArray[i].getU64Const())); break;
+ case EOpConvFloat16ToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloat16ToFloat:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloat16ToDouble:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloatToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvFloatToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvFloatToDouble:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvDoubleToInt8:
+ newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt16:
+ newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt:
+ newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToInt64:
+ newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint8:
+ newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint16:
+ newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint:
+ newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToUint64:
+ newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
+ case EOpConvDoubleToFloat16:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvDoubleToFloat:
+ newConstArray[i].setDConst(unionArray[i].getDConst()); break;
+ case EOpConvPtrToUint64:
+ case EOpConvUint64ToPtr:
+ case EOpConstructReference:
+ newConstArray[i].setU64Const(unionArray[i].getU64Const()); break;
+
+
+
+ // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
+
+ case EOpSinh:
+ case EOpCosh:
+ case EOpTanh:
+ case EOpAsinh:
+ case EOpAcosh:
+ case EOpAtanh:
+
+ case EOpFloatBitsToInt:
+ case EOpFloatBitsToUint:
+ case EOpIntBitsToFloat:
+ case EOpUintBitsToFloat:
+ case EOpDoubleBitsToInt64:
+ case EOpDoubleBitsToUint64:
+ case EOpInt64BitsToDouble:
+ case EOpUint64BitsToDouble:
+ case EOpFloat16BitsToInt16:
+ case EOpFloat16BitsToUint16:
+ case EOpInt16BitsToFloat16:
+ case EOpUint16BitsToFloat16:
+ default:
+ return 0;
+ }
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType);
+ newNode->getWritableType().getQualifier().storage = EvqConst;
+ newNode->setLoc(getLoc());
+
+ return newNode;
+}
+
+//
+// Do constant folding for an aggregate node that has all its children
+// as constants and an operator that requires constant folding.
+//
+TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
+{
+ if (aggrNode == nullptr)
+ return aggrNode;
+
+ if (! areAllChildConst(aggrNode))
+ return aggrNode;
+
+ if (aggrNode->isConstructor())
+ return foldConstructor(aggrNode);
+
+ TIntermSequence& children = aggrNode->getSequence();
+
+ // First, see if this is an operation to constant fold, kick out if not,
+ // see what size the result is if so.
+
+ bool componentwise = false; // will also say componentwise if a scalar argument gets repeated to make per-component results
+ int objectSize;
+ switch (aggrNode->getOp()) {
+ case EOpAtan:
+ case EOpPow:
+ case EOpMin:
+ case EOpMax:
+ case EOpMix:
+ case EOpClamp:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpVectorEqual:
+ case EOpVectorNotEqual:
+ componentwise = true;
+ objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ break;
+ case EOpCross:
+ case EOpReflect:
+ case EOpRefract:
+ case EOpFaceForward:
+ objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ break;
+ case EOpDistance:
+ case EOpDot:
+ objectSize = 1;
+ break;
+ case EOpOuterProduct:
+ objectSize = children[0]->getAsTyped()->getType().getVectorSize() *
+ children[1]->getAsTyped()->getType().getVectorSize();
+ break;
+ case EOpStep:
+ componentwise = true;
+ objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(),
+ children[1]->getAsTyped()->getType().getVectorSize());
+ break;
+ case EOpSmoothStep:
+ componentwise = true;
+ objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(),
+ children[2]->getAsTyped()->getType().getVectorSize());
+ break;
+ default:
+ return aggrNode;
+ }
+ TConstUnionArray newConstArray(objectSize);
+
+ TVector<TConstUnionArray> childConstUnions;
+ for (unsigned int arg = 0; arg < children.size(); ++arg)
+ childConstUnions.push_back(children[arg]->getAsConstantUnion()->getConstArray());
+
+ if (componentwise) {
+ for (int comp = 0; comp < objectSize; comp++) {
+
+ // some arguments are scalars instead of matching vectors; simulate a smear
+ int arg0comp = std::min(comp, children[0]->getAsTyped()->getType().getVectorSize() - 1);
+ int arg1comp = 0;
+ if (children.size() > 1)
+ arg1comp = std::min(comp, children[1]->getAsTyped()->getType().getVectorSize() - 1);
+ int arg2comp = 0;
+ if (children.size() > 2)
+ arg2comp = std::min(comp, children[2]->getAsTyped()->getType().getVectorSize() - 1);
+
+ switch (aggrNode->getOp()) {
+ case EOpAtan:
+ newConstArray[comp].setDConst(atan2(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EOpPow:
+ newConstArray[comp].setDConst(pow(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EOpMin:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::min(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::min(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpMax:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpClamp:
+ switch(children[0]->getAsTyped()->getBasicType()) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()),
+ childConstUnions[2][arg2comp].getDConst()));
+ break;
+ case EbtInt8:
+ newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()),
+ childConstUnions[2][arg2comp].getI8Const()));
+ break;
+ case EbtUint8:
+ newConstArray[comp].setU8Const(std::min(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()),
+ childConstUnions[2][arg2comp].getU8Const()));
+ break;
+ case EbtInt16:
+ newConstArray[comp].setI16Const(std::min(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()),
+ childConstUnions[2][arg2comp].getI16Const()));
+ break;
+ case EbtUint16:
+ newConstArray[comp].setU16Const(std::min(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()),
+ childConstUnions[2][arg2comp].getU16Const()));
+ break;
+ case EbtInt:
+ newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()),
+ childConstUnions[2][arg2comp].getIConst()));
+ break;
+ case EbtUint:
+ newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
+ childConstUnions[2][arg2comp].getUConst()));
+ break;
+ case EbtInt64:
+ newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()),
+ childConstUnions[2][arg2comp].getI64Const()));
+ break;
+ case EbtUint64:
+ newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()),
+ childConstUnions[2][arg2comp].getU64Const()));
+ break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EOpLessThan:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]);
+ break;
+ case EOpGreaterThan:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]);
+ break;
+ case EOpLessThanEqual:
+ newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]));
+ break;
+ case EOpGreaterThanEqual:
+ newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]));
+ break;
+ case EOpVectorEqual:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] == childConstUnions[1][arg1comp]);
+ break;
+ case EOpVectorNotEqual:
+ newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]);
+ break;
+ case EOpMix:
+ if (children[2]->getAsTyped()->getBasicType() == EbtBool)
+ newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() ? childConstUnions[1][arg1comp].getDConst() :
+ childConstUnions[0][arg0comp].getDConst());
+ else
+ newConstArray[comp].setDConst(childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
+ childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
+ break;
+ case EOpStep:
+ newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0);
+ break;
+ case EOpSmoothStep:
+ {
+ double t = (childConstUnions[2][arg2comp].getDConst() - childConstUnions[0][arg0comp].getDConst()) /
+ (childConstUnions[1][arg1comp].getDConst() - childConstUnions[0][arg0comp].getDConst());
+ if (t < 0.0)
+ t = 0.0;
+ if (t > 1.0)
+ t = 1.0;
+ newConstArray[comp].setDConst(t * t * (3.0 - 2.0 * t));
+ break;
+ }
+ default:
+ return aggrNode;
+ }
+ }
+ } else {
+ // Non-componentwise...
+
+ int numComps = children[0]->getAsConstantUnion()->getType().computeNumComponents();
+ double dot;
+
+ switch (aggrNode->getOp()) {
+ case EOpDistance:
+ {
+ double sum = 0.0;
+ for (int comp = 0; comp < numComps; ++comp) {
+ double diff = childConstUnions[1][comp].getDConst() - childConstUnions[0][comp].getDConst();
+ sum += diff * diff;
+ }
+ newConstArray[0].setDConst(sqrt(sum));
+ break;
+ }
+ case EOpDot:
+ newConstArray[0].setDConst(childConstUnions[0].dot(childConstUnions[1]));
+ break;
+ case EOpCross:
+ newConstArray[0] = childConstUnions[0][1] * childConstUnions[1][2] - childConstUnions[0][2] * childConstUnions[1][1];
+ newConstArray[1] = childConstUnions[0][2] * childConstUnions[1][0] - childConstUnions[0][0] * childConstUnions[1][2];
+ newConstArray[2] = childConstUnions[0][0] * childConstUnions[1][1] - childConstUnions[0][1] * childConstUnions[1][0];
+ break;
+ case EOpFaceForward:
+ // If dot(Nref, I) < 0 return N, otherwise return -N: Arguments are (N, I, Nref).
+ dot = childConstUnions[1].dot(childConstUnions[2]);
+ for (int comp = 0; comp < numComps; ++comp) {
+ if (dot < 0.0)
+ newConstArray[comp] = childConstUnions[0][comp];
+ else
+ newConstArray[comp].setDConst(-childConstUnions[0][comp].getDConst());
+ }
+ break;
+ case EOpReflect:
+ // I - 2 * dot(N, I) * N: Arguments are (I, N).
+ dot = childConstUnions[0].dot(childConstUnions[1]);
+ dot *= 2.0;
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(childConstUnions[0][comp].getDConst() - dot * childConstUnions[1][comp].getDConst());
+ break;
+ case EOpRefract:
+ {
+ // Arguments are (I, N, eta).
+ // k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
+ // if (k < 0.0)
+ // return dvec(0.0)
+ // else
+ // return eta * I - (eta * dot(N, I) + sqrt(k)) * N
+ dot = childConstUnions[0].dot(childConstUnions[1]);
+ double eta = childConstUnions[2][0].getDConst();
+ double k = 1.0 - eta * eta * (1.0 - dot * dot);
+ if (k < 0.0) {
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(0.0);
+ } else {
+ for (int comp = 0; comp < numComps; ++comp)
+ newConstArray[comp].setDConst(eta * childConstUnions[0][comp].getDConst() - (eta * dot + sqrt(k)) * childConstUnions[1][comp].getDConst());
+ }
+ break;
+ }
+ case EOpOuterProduct:
+ {
+ int numRows = numComps;
+ int numCols = children[1]->getAsConstantUnion()->getType().computeNumComponents();
+ for (int row = 0; row < numRows; ++row)
+ for (int col = 0; col < numCols; ++col)
+ newConstArray[col * numRows + row] = childConstUnions[0][row] * childConstUnions[1][col];
+ break;
+ }
+ default:
+ return aggrNode;
+ }
+ }
+
+ TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, aggrNode->getType());
+ newNode->getWritableType().getQualifier().storage = EvqConst;
+ newNode->setLoc(aggrNode->getLoc());
+
+ return newNode;
+}
+
+bool TIntermediate::areAllChildConst(TIntermAggregate* aggrNode)
+{
+ bool allConstant = true;
+
+ // check if all the child nodes are constants so that they can be inserted into
+ // the parent node
+ if (aggrNode) {
+ TIntermSequence& childSequenceVector = aggrNode->getSequence();
+ for (TIntermSequence::iterator p = childSequenceVector.begin();
+ p != childSequenceVector.end(); p++) {
+ if (!(*p)->getAsTyped()->getAsConstantUnion())
+ return false;
+ }
+ }
+
+ return allConstant;
+}
+
+TIntermTyped* TIntermediate::foldConstructor(TIntermAggregate* aggrNode)
+{
+ bool error = false;
+
+ TConstUnionArray unionArray(aggrNode->getType().computeNumComponents());
+ if (aggrNode->getSequence().size() == 1)
+ error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType(), true);
+ else
+ error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType());
+
+ if (error)
+ return aggrNode;
+
+ return addConstantUnion(unionArray, aggrNode->getType(), aggrNode->getLoc());
+}
+
+//
+// Constant folding of a bracket (array-style) dereference or struct-like dot
+// dereference. Can handle anything except a multi-character swizzle, though
+// all swizzles may go to foldSwizzle().
+//
+TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, const TSourceLoc& loc)
+{
+ TType dereferencedType(node->getType(), index);
+ dereferencedType.getQualifier().storage = EvqConst;
+ TIntermTyped* result = 0;
+ int size = dereferencedType.computeNumComponents();
+
+ // arrays, vectors, matrices, all use simple multiplicative math
+ // while structures need to add up heterogeneous members
+ int start;
+ if (node->getType().isCoopMat())
+ start = 0;
+ else if (node->isArray() || ! node->isStruct())
+ start = size * index;
+ else {
+ // it is a structure
+ assert(node->isStruct());
+ start = 0;
+ for (int i = 0; i < index; ++i)
+ start += (*node->getType().getStruct())[i].type->computeNumComponents();
+ }
+
+ result = addConstantUnion(TConstUnionArray(node->getAsConstantUnion()->getConstArray(), start, size), node->getType(), loc);
+
+ if (result == 0)
+ result = node;
+ else
+ result->setType(dereferencedType);
+
+ return result;
+}
+
+//
+// Make a constant vector node or constant scalar node, representing a given
+// constant vector and constant swizzle into it.
+//
+TIntermTyped* TIntermediate::foldSwizzle(TIntermTyped* node, TSwizzleSelectors<TVectorSelector>& selectors, const TSourceLoc& loc)
+{
+ const TConstUnionArray& unionArray = node->getAsConstantUnion()->getConstArray();
+ TConstUnionArray constArray(selectors.size());
+
+ for (int i = 0; i < selectors.size(); i++)
+ constArray[i] = unionArray[selectors[i]];
+
+ TIntermTyped* result = addConstantUnion(constArray, node->getType(), loc);
+
+ if (result == 0)
+ result = node;
+ else
+ result->setType(TType(node->getBasicType(), EvqConst, selectors.size()));
+
+ return result;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/InfoSink.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/InfoSink.cpp
new file mode 100644
index 0000000..d00c422
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/InfoSink.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/InfoSink.h"
+
+#include <cstring>
+
+namespace glslang {
+
+void TInfoSinkBase::append(const char* s)
+{
+ if (outputStream & EString) {
+ if (s == nullptr)
+ sink.append("(null)");
+ else {
+ checkMem(strlen(s));
+ sink.append(s);
+ }
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(s);
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", s);
+}
+
+void TInfoSinkBase::append(int count, char c)
+{
+ if (outputStream & EString) {
+ checkMem(count);
+ sink.append(count, c);
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger) {
+// char str[2];
+// str[0] = c;
+// str[1] = '\0';
+// OutputDebugString(str);
+// }
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%c", c);
+}
+
+void TInfoSinkBase::append(const TPersistString& t)
+{
+ if (outputStream & EString) {
+ checkMem(t.size());
+ sink.append(t);
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(t.c_str());
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", t.c_str());
+}
+
+void TInfoSinkBase::append(const TString& t)
+{
+ if (outputStream & EString) {
+ checkMem(t.size());
+ sink.append(t.c_str());
+ }
+
+//#ifdef _WIN32
+// if (outputStream & EDebugger)
+// OutputDebugString(t.c_str());
+//#endif
+
+ if (outputStream & EStdOut)
+ fprintf(stdout, "%s", t.c_str());
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.cpp
new file mode 100644
index 0000000..0498b48
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.cpp
@@ -0,0 +1,9634 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Create strings that declare built-in definitions, add built-ins programmatically
+// that cannot be expressed in the strings, and establish mappings between
+// built-in functions and operators.
+//
+// Where to put a built-in:
+// TBuiltIns::initialize(version,profile) context-independent textual built-ins; add them to the right string
+// TBuiltIns::initialize(resources,...) context-dependent textual built-ins; add them to the right string
+// TBuiltIns::identifyBuiltIns(...,symbolTable) context-independent programmatic additions/mappings to the symbol table,
+// including identifying what extensions are needed if a version does not allow a symbol
+// TBuiltIns::identifyBuiltIns(...,symbolTable, resources) context-dependent programmatic additions/mappings to the symbol table,
+// including identifying what extensions are needed if a version does not allow a symbol
+//
+
+#include "../Include/intermediate.h"
+#include "Initialize.h"
+
+namespace glslang {
+
+// TODO: ARB_Compatability: do full extension support
+const bool ARBCompatibility = true;
+
+const bool ForwardCompatibility = false;
+
+// change this back to false if depending on textual spellings of texturing calls when consuming the AST
+// Using PureOperatorBuiltins=false is deprecated.
+bool PureOperatorBuiltins = true;
+
+inline bool IncludeLegacy(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ return profile != EEsProfile && (version <= 130 || (spvVersion.spv == 0 && ARBCompatibility) || profile == ECompatibilityProfile);
+}
+
+// Construct TBuiltInParseables base class. This can be used for language-common constructs.
+TBuiltInParseables::TBuiltInParseables()
+{
+}
+
+// Destroy TBuiltInParseables.
+TBuiltInParseables::~TBuiltInParseables()
+{
+}
+
+TBuiltIns::TBuiltIns()
+{
+ // Set up textual representations for making all the permutations
+ // of texturing/imaging functions.
+ prefixes[EbtFloat] = "";
+#ifdef AMD_EXTENSIONS
+ prefixes[EbtFloat16] = "f16";
+#endif
+ prefixes[EbtInt8] = "i8";
+ prefixes[EbtUint8] = "u8";
+ prefixes[EbtInt16] = "i16";
+ prefixes[EbtUint16] = "u16";
+ prefixes[EbtInt] = "i";
+ prefixes[EbtUint] = "u";
+ postfixes[2] = "2";
+ postfixes[3] = "3";
+ postfixes[4] = "4";
+
+ // Map from symbolic class of texturing dimension to numeric dimensions.
+ dimMap[Esd1D] = 1;
+ dimMap[Esd2D] = 2;
+ dimMap[EsdRect] = 2;
+ dimMap[Esd3D] = 3;
+ dimMap[EsdCube] = 3;
+ dimMap[EsdBuffer] = 1;
+ dimMap[EsdSubpass] = 2; // potientially unused for now
+}
+
+TBuiltIns::~TBuiltIns()
+{
+}
+
+
+//
+// Add all context-independent built-in functions and variables that are present
+// for the given version and profile. Share common ones across stages, otherwise
+// make stage-specific entries.
+//
+// Most built-ins variables can be added as simple text strings. Some need to
+// be added programmatically, which is done later in IdentifyBuiltIns() below.
+//
+void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ //============================================================================
+ //
+ // Prototypes for built-in functions used repeatly by different shaders
+ //
+ //============================================================================
+
+ //
+ // Derivatives Functions.
+ //
+ TString derivatives (
+ "float dFdx(float p);"
+ "vec2 dFdx(vec2 p);"
+ "vec3 dFdx(vec3 p);"
+ "vec4 dFdx(vec4 p);"
+
+ "float dFdy(float p);"
+ "vec2 dFdy(vec2 p);"
+ "vec3 dFdy(vec3 p);"
+ "vec4 dFdy(vec4 p);"
+
+ "float fwidth(float p);"
+ "vec2 fwidth(vec2 p);"
+ "vec3 fwidth(vec3 p);"
+ "vec4 fwidth(vec4 p);"
+ );
+
+ TString derivativeControls (
+ "float dFdxFine(float p);"
+ "vec2 dFdxFine(vec2 p);"
+ "vec3 dFdxFine(vec3 p);"
+ "vec4 dFdxFine(vec4 p);"
+
+ "float dFdyFine(float p);"
+ "vec2 dFdyFine(vec2 p);"
+ "vec3 dFdyFine(vec3 p);"
+ "vec4 dFdyFine(vec4 p);"
+
+ "float fwidthFine(float p);"
+ "vec2 fwidthFine(vec2 p);"
+ "vec3 fwidthFine(vec3 p);"
+ "vec4 fwidthFine(vec4 p);"
+
+ "float dFdxCoarse(float p);"
+ "vec2 dFdxCoarse(vec2 p);"
+ "vec3 dFdxCoarse(vec3 p);"
+ "vec4 dFdxCoarse(vec4 p);"
+
+ "float dFdyCoarse(float p);"
+ "vec2 dFdyCoarse(vec2 p);"
+ "vec3 dFdyCoarse(vec3 p);"
+ "vec4 dFdyCoarse(vec4 p);"
+
+ "float fwidthCoarse(float p);"
+ "vec2 fwidthCoarse(vec2 p);"
+ "vec3 fwidthCoarse(vec3 p);"
+ "vec4 fwidthCoarse(vec4 p);"
+ );
+
+ TString derivativesAndControl16bits (
+ "float16_t dFdx(float16_t);"
+ "f16vec2 dFdx(f16vec2);"
+ "f16vec3 dFdx(f16vec3);"
+ "f16vec4 dFdx(f16vec4);"
+
+ "float16_t dFdy(float16_t);"
+ "f16vec2 dFdy(f16vec2);"
+ "f16vec3 dFdy(f16vec3);"
+ "f16vec4 dFdy(f16vec4);"
+
+ "float16_t dFdxFine(float16_t);"
+ "f16vec2 dFdxFine(f16vec2);"
+ "f16vec3 dFdxFine(f16vec3);"
+ "f16vec4 dFdxFine(f16vec4);"
+
+ "float16_t dFdyFine(float16_t);"
+ "f16vec2 dFdyFine(f16vec2);"
+ "f16vec3 dFdyFine(f16vec3);"
+ "f16vec4 dFdyFine(f16vec4);"
+
+ "float16_t dFdxCoarse(float16_t);"
+ "f16vec2 dFdxCoarse(f16vec2);"
+ "f16vec3 dFdxCoarse(f16vec3);"
+ "f16vec4 dFdxCoarse(f16vec4);"
+
+ "float16_t dFdyCoarse(float16_t);"
+ "f16vec2 dFdyCoarse(f16vec2);"
+ "f16vec3 dFdyCoarse(f16vec3);"
+ "f16vec4 dFdyCoarse(f16vec4);"
+
+ "float16_t fwidth(float16_t);"
+ "f16vec2 fwidth(f16vec2);"
+ "f16vec3 fwidth(f16vec3);"
+ "f16vec4 fwidth(f16vec4);"
+
+ "float16_t fwidthFine(float16_t);"
+ "f16vec2 fwidthFine(f16vec2);"
+ "f16vec3 fwidthFine(f16vec3);"
+ "f16vec4 fwidthFine(f16vec4);"
+
+ "float16_t fwidthCoarse(float16_t);"
+ "f16vec2 fwidthCoarse(f16vec2);"
+ "f16vec3 fwidthCoarse(f16vec3);"
+ "f16vec4 fwidthCoarse(f16vec4);"
+ );
+
+ TString derivativesAndControl64bits (
+ "float64_t dFdx(float64_t);"
+ "f64vec2 dFdx(f64vec2);"
+ "f64vec3 dFdx(f64vec3);"
+ "f64vec4 dFdx(f64vec4);"
+
+ "float64_t dFdy(float64_t);"
+ "f64vec2 dFdy(f64vec2);"
+ "f64vec3 dFdy(f64vec3);"
+ "f64vec4 dFdy(f64vec4);"
+
+ "float64_t dFdxFine(float64_t);"
+ "f64vec2 dFdxFine(f64vec2);"
+ "f64vec3 dFdxFine(f64vec3);"
+ "f64vec4 dFdxFine(f64vec4);"
+
+ "float64_t dFdyFine(float64_t);"
+ "f64vec2 dFdyFine(f64vec2);"
+ "f64vec3 dFdyFine(f64vec3);"
+ "f64vec4 dFdyFine(f64vec4);"
+
+ "float64_t dFdxCoarse(float64_t);"
+ "f64vec2 dFdxCoarse(f64vec2);"
+ "f64vec3 dFdxCoarse(f64vec3);"
+ "f64vec4 dFdxCoarse(f64vec4);"
+
+ "float64_t dFdyCoarse(float64_t);"
+ "f64vec2 dFdyCoarse(f64vec2);"
+ "f64vec3 dFdyCoarse(f64vec3);"
+ "f64vec4 dFdyCoarse(f64vec4);"
+
+ "float64_t fwidth(float64_t);"
+ "f64vec2 fwidth(f64vec2);"
+ "f64vec3 fwidth(f64vec3);"
+ "f64vec4 fwidth(f64vec4);"
+
+ "float64_t fwidthFine(float64_t);"
+ "f64vec2 fwidthFine(f64vec2);"
+ "f64vec3 fwidthFine(f64vec3);"
+ "f64vec4 fwidthFine(f64vec4);"
+
+ "float64_t fwidthCoarse(float64_t);"
+ "f64vec2 fwidthCoarse(f64vec2);"
+ "f64vec3 fwidthCoarse(f64vec3);"
+ "f64vec4 fwidthCoarse(f64vec4);"
+ );
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by both vertex and fragment shaders.
+ //
+ //============================================================================
+
+ //
+ // Angle and Trigonometric Functions.
+ //
+ commonBuiltins.append(
+ "float radians(float degrees);"
+ "vec2 radians(vec2 degrees);"
+ "vec3 radians(vec3 degrees);"
+ "vec4 radians(vec4 degrees);"
+
+ "float degrees(float radians);"
+ "vec2 degrees(vec2 radians);"
+ "vec3 degrees(vec3 radians);"
+ "vec4 degrees(vec4 radians);"
+
+ "float sin(float angle);"
+ "vec2 sin(vec2 angle);"
+ "vec3 sin(vec3 angle);"
+ "vec4 sin(vec4 angle);"
+
+ "float cos(float angle);"
+ "vec2 cos(vec2 angle);"
+ "vec3 cos(vec3 angle);"
+ "vec4 cos(vec4 angle);"
+
+ "float tan(float angle);"
+ "vec2 tan(vec2 angle);"
+ "vec3 tan(vec3 angle);"
+ "vec4 tan(vec4 angle);"
+
+ "float asin(float x);"
+ "vec2 asin(vec2 x);"
+ "vec3 asin(vec3 x);"
+ "vec4 asin(vec4 x);"
+
+ "float acos(float x);"
+ "vec2 acos(vec2 x);"
+ "vec3 acos(vec3 x);"
+ "vec4 acos(vec4 x);"
+
+ "float atan(float y, float x);"
+ "vec2 atan(vec2 y, vec2 x);"
+ "vec3 atan(vec3 y, vec3 x);"
+ "vec4 atan(vec4 y, vec4 x);"
+
+ "float atan(float y_over_x);"
+ "vec2 atan(vec2 y_over_x);"
+ "vec3 atan(vec3 y_over_x);"
+ "vec4 atan(vec4 y_over_x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ "float sinh(float angle);"
+ "vec2 sinh(vec2 angle);"
+ "vec3 sinh(vec3 angle);"
+ "vec4 sinh(vec4 angle);"
+
+ "float cosh(float angle);"
+ "vec2 cosh(vec2 angle);"
+ "vec3 cosh(vec3 angle);"
+ "vec4 cosh(vec4 angle);"
+
+ "float tanh(float angle);"
+ "vec2 tanh(vec2 angle);"
+ "vec3 tanh(vec3 angle);"
+ "vec4 tanh(vec4 angle);"
+
+ "float asinh(float x);"
+ "vec2 asinh(vec2 x);"
+ "vec3 asinh(vec3 x);"
+ "vec4 asinh(vec4 x);"
+
+ "float acosh(float x);"
+ "vec2 acosh(vec2 x);"
+ "vec3 acosh(vec3 x);"
+ "vec4 acosh(vec4 x);"
+
+ "float atanh(float y_over_x);"
+ "vec2 atanh(vec2 y_over_x);"
+ "vec3 atanh(vec3 y_over_x);"
+ "vec4 atanh(vec4 y_over_x);"
+
+ "\n");
+ }
+
+ //
+ // Exponential Functions.
+ //
+ commonBuiltins.append(
+ "float pow(float x, float y);"
+ "vec2 pow(vec2 x, vec2 y);"
+ "vec3 pow(vec3 x, vec3 y);"
+ "vec4 pow(vec4 x, vec4 y);"
+
+ "float exp(float x);"
+ "vec2 exp(vec2 x);"
+ "vec3 exp(vec3 x);"
+ "vec4 exp(vec4 x);"
+
+ "float log(float x);"
+ "vec2 log(vec2 x);"
+ "vec3 log(vec3 x);"
+ "vec4 log(vec4 x);"
+
+ "float exp2(float x);"
+ "vec2 exp2(vec2 x);"
+ "vec3 exp2(vec3 x);"
+ "vec4 exp2(vec4 x);"
+
+ "float log2(float x);"
+ "vec2 log2(vec2 x);"
+ "vec3 log2(vec3 x);"
+ "vec4 log2(vec4 x);"
+
+ "float sqrt(float x);"
+ "vec2 sqrt(vec2 x);"
+ "vec3 sqrt(vec3 x);"
+ "vec4 sqrt(vec4 x);"
+
+ "float inversesqrt(float x);"
+ "vec2 inversesqrt(vec2 x);"
+ "vec3 inversesqrt(vec3 x);"
+ "vec4 inversesqrt(vec4 x);"
+
+ "\n");
+
+ //
+ // Common Functions.
+ //
+ commonBuiltins.append(
+ "float abs(float x);"
+ "vec2 abs(vec2 x);"
+ "vec3 abs(vec3 x);"
+ "vec4 abs(vec4 x);"
+
+ "float sign(float x);"
+ "vec2 sign(vec2 x);"
+ "vec3 sign(vec3 x);"
+ "vec4 sign(vec4 x);"
+
+ "float floor(float x);"
+ "vec2 floor(vec2 x);"
+ "vec3 floor(vec3 x);"
+ "vec4 floor(vec4 x);"
+
+ "float ceil(float x);"
+ "vec2 ceil(vec2 x);"
+ "vec3 ceil(vec3 x);"
+ "vec4 ceil(vec4 x);"
+
+ "float fract(float x);"
+ "vec2 fract(vec2 x);"
+ "vec3 fract(vec3 x);"
+ "vec4 fract(vec4 x);"
+
+ "float mod(float x, float y);"
+ "vec2 mod(vec2 x, float y);"
+ "vec3 mod(vec3 x, float y);"
+ "vec4 mod(vec4 x, float y);"
+ "vec2 mod(vec2 x, vec2 y);"
+ "vec3 mod(vec3 x, vec3 y);"
+ "vec4 mod(vec4 x, vec4 y);"
+
+ "float min(float x, float y);"
+ "vec2 min(vec2 x, float y);"
+ "vec3 min(vec3 x, float y);"
+ "vec4 min(vec4 x, float y);"
+ "vec2 min(vec2 x, vec2 y);"
+ "vec3 min(vec3 x, vec3 y);"
+ "vec4 min(vec4 x, vec4 y);"
+
+ "float max(float x, float y);"
+ "vec2 max(vec2 x, float y);"
+ "vec3 max(vec3 x, float y);"
+ "vec4 max(vec4 x, float y);"
+ "vec2 max(vec2 x, vec2 y);"
+ "vec3 max(vec3 x, vec3 y);"
+ "vec4 max(vec4 x, vec4 y);"
+
+ "float clamp(float x, float minVal, float maxVal);"
+ "vec2 clamp(vec2 x, float minVal, float maxVal);"
+ "vec3 clamp(vec3 x, float minVal, float maxVal);"
+ "vec4 clamp(vec4 x, float minVal, float maxVal);"
+ "vec2 clamp(vec2 x, vec2 minVal, vec2 maxVal);"
+ "vec3 clamp(vec3 x, vec3 minVal, vec3 maxVal);"
+ "vec4 clamp(vec4 x, vec4 minVal, vec4 maxVal);"
+
+ "float mix(float x, float y, float a);"
+ "vec2 mix(vec2 x, vec2 y, float a);"
+ "vec3 mix(vec3 x, vec3 y, float a);"
+ "vec4 mix(vec4 x, vec4 y, float a);"
+ "vec2 mix(vec2 x, vec2 y, vec2 a);"
+ "vec3 mix(vec3 x, vec3 y, vec3 a);"
+ "vec4 mix(vec4 x, vec4 y, vec4 a);"
+
+ "float step(float edge, float x);"
+ "vec2 step(vec2 edge, vec2 x);"
+ "vec3 step(vec3 edge, vec3 x);"
+ "vec4 step(vec4 edge, vec4 x);"
+ "vec2 step(float edge, vec2 x);"
+ "vec3 step(float edge, vec3 x);"
+ "vec4 step(float edge, vec4 x);"
+
+ "float smoothstep(float edge0, float edge1, float x);"
+ "vec2 smoothstep(vec2 edge0, vec2 edge1, vec2 x);"
+ "vec3 smoothstep(vec3 edge0, vec3 edge1, vec3 x);"
+ "vec4 smoothstep(vec4 edge0, vec4 edge1, vec4 x);"
+ "vec2 smoothstep(float edge0, float edge1, vec2 x);"
+ "vec3 smoothstep(float edge0, float edge1, vec3 x);"
+ "vec4 smoothstep(float edge0, float edge1, vec4 x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ " int abs( int x);"
+ "ivec2 abs(ivec2 x);"
+ "ivec3 abs(ivec3 x);"
+ "ivec4 abs(ivec4 x);"
+
+ " int sign( int x);"
+ "ivec2 sign(ivec2 x);"
+ "ivec3 sign(ivec3 x);"
+ "ivec4 sign(ivec4 x);"
+
+ "float trunc(float x);"
+ "vec2 trunc(vec2 x);"
+ "vec3 trunc(vec3 x);"
+ "vec4 trunc(vec4 x);"
+
+ "float round(float x);"
+ "vec2 round(vec2 x);"
+ "vec3 round(vec3 x);"
+ "vec4 round(vec4 x);"
+
+ "float roundEven(float x);"
+ "vec2 roundEven(vec2 x);"
+ "vec3 roundEven(vec3 x);"
+ "vec4 roundEven(vec4 x);"
+
+ "float modf(float, out float);"
+ "vec2 modf(vec2, out vec2 );"
+ "vec3 modf(vec3, out vec3 );"
+ "vec4 modf(vec4, out vec4 );"
+
+ " int min(int x, int y);"
+ "ivec2 min(ivec2 x, int y);"
+ "ivec3 min(ivec3 x, int y);"
+ "ivec4 min(ivec4 x, int y);"
+ "ivec2 min(ivec2 x, ivec2 y);"
+ "ivec3 min(ivec3 x, ivec3 y);"
+ "ivec4 min(ivec4 x, ivec4 y);"
+
+ " uint min(uint x, uint y);"
+ "uvec2 min(uvec2 x, uint y);"
+ "uvec3 min(uvec3 x, uint y);"
+ "uvec4 min(uvec4 x, uint y);"
+ "uvec2 min(uvec2 x, uvec2 y);"
+ "uvec3 min(uvec3 x, uvec3 y);"
+ "uvec4 min(uvec4 x, uvec4 y);"
+
+ " int max(int x, int y);"
+ "ivec2 max(ivec2 x, int y);"
+ "ivec3 max(ivec3 x, int y);"
+ "ivec4 max(ivec4 x, int y);"
+ "ivec2 max(ivec2 x, ivec2 y);"
+ "ivec3 max(ivec3 x, ivec3 y);"
+ "ivec4 max(ivec4 x, ivec4 y);"
+
+ " uint max(uint x, uint y);"
+ "uvec2 max(uvec2 x, uint y);"
+ "uvec3 max(uvec3 x, uint y);"
+ "uvec4 max(uvec4 x, uint y);"
+ "uvec2 max(uvec2 x, uvec2 y);"
+ "uvec3 max(uvec3 x, uvec3 y);"
+ "uvec4 max(uvec4 x, uvec4 y);"
+
+ "int clamp(int x, int minVal, int maxVal);"
+ "ivec2 clamp(ivec2 x, int minVal, int maxVal);"
+ "ivec3 clamp(ivec3 x, int minVal, int maxVal);"
+ "ivec4 clamp(ivec4 x, int minVal, int maxVal);"
+ "ivec2 clamp(ivec2 x, ivec2 minVal, ivec2 maxVal);"
+ "ivec3 clamp(ivec3 x, ivec3 minVal, ivec3 maxVal);"
+ "ivec4 clamp(ivec4 x, ivec4 minVal, ivec4 maxVal);"
+
+ "uint clamp(uint x, uint minVal, uint maxVal);"
+ "uvec2 clamp(uvec2 x, uint minVal, uint maxVal);"
+ "uvec3 clamp(uvec3 x, uint minVal, uint maxVal);"
+ "uvec4 clamp(uvec4 x, uint minVal, uint maxVal);"
+ "uvec2 clamp(uvec2 x, uvec2 minVal, uvec2 maxVal);"
+ "uvec3 clamp(uvec3 x, uvec3 minVal, uvec3 maxVal);"
+ "uvec4 clamp(uvec4 x, uvec4 minVal, uvec4 maxVal);"
+
+ "float mix(float x, float y, bool a);"
+ "vec2 mix(vec2 x, vec2 y, bvec2 a);"
+ "vec3 mix(vec3 x, vec3 y, bvec3 a);"
+ "vec4 mix(vec4 x, vec4 y, bvec4 a);"
+
+ "bool isnan(float x);"
+ "bvec2 isnan(vec2 x);"
+ "bvec3 isnan(vec3 x);"
+ "bvec4 isnan(vec4 x);"
+
+ "bool isinf(float x);"
+ "bvec2 isinf(vec2 x);"
+ "bvec3 isinf(vec3 x);"
+ "bvec4 isinf(vec4 x);"
+
+ "\n");
+ }
+
+ //
+ // double functions added to desktop 4.00, but not fma, frexp, ldexp, or pack/unpack
+ //
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+
+ "double sqrt(double);"
+ "dvec2 sqrt(dvec2);"
+ "dvec3 sqrt(dvec3);"
+ "dvec4 sqrt(dvec4);"
+
+ "double inversesqrt(double);"
+ "dvec2 inversesqrt(dvec2);"
+ "dvec3 inversesqrt(dvec3);"
+ "dvec4 inversesqrt(dvec4);"
+
+ "double abs(double);"
+ "dvec2 abs(dvec2);"
+ "dvec3 abs(dvec3);"
+ "dvec4 abs(dvec4);"
+
+ "double sign(double);"
+ "dvec2 sign(dvec2);"
+ "dvec3 sign(dvec3);"
+ "dvec4 sign(dvec4);"
+
+ "double floor(double);"
+ "dvec2 floor(dvec2);"
+ "dvec3 floor(dvec3);"
+ "dvec4 floor(dvec4);"
+
+ "double trunc(double);"
+ "dvec2 trunc(dvec2);"
+ "dvec3 trunc(dvec3);"
+ "dvec4 trunc(dvec4);"
+
+ "double round(double);"
+ "dvec2 round(dvec2);"
+ "dvec3 round(dvec3);"
+ "dvec4 round(dvec4);"
+
+ "double roundEven(double);"
+ "dvec2 roundEven(dvec2);"
+ "dvec3 roundEven(dvec3);"
+ "dvec4 roundEven(dvec4);"
+
+ "double ceil(double);"
+ "dvec2 ceil(dvec2);"
+ "dvec3 ceil(dvec3);"
+ "dvec4 ceil(dvec4);"
+
+ "double fract(double);"
+ "dvec2 fract(dvec2);"
+ "dvec3 fract(dvec3);"
+ "dvec4 fract(dvec4);"
+
+ "double mod(double, double);"
+ "dvec2 mod(dvec2 , double);"
+ "dvec3 mod(dvec3 , double);"
+ "dvec4 mod(dvec4 , double);"
+ "dvec2 mod(dvec2 , dvec2);"
+ "dvec3 mod(dvec3 , dvec3);"
+ "dvec4 mod(dvec4 , dvec4);"
+
+ "double modf(double, out double);"
+ "dvec2 modf(dvec2, out dvec2);"
+ "dvec3 modf(dvec3, out dvec3);"
+ "dvec4 modf(dvec4, out dvec4);"
+
+ "double min(double, double);"
+ "dvec2 min(dvec2, double);"
+ "dvec3 min(dvec3, double);"
+ "dvec4 min(dvec4, double);"
+ "dvec2 min(dvec2, dvec2);"
+ "dvec3 min(dvec3, dvec3);"
+ "dvec4 min(dvec4, dvec4);"
+
+ "double max(double, double);"
+ "dvec2 max(dvec2 , double);"
+ "dvec3 max(dvec3 , double);"
+ "dvec4 max(dvec4 , double);"
+ "dvec2 max(dvec2 , dvec2);"
+ "dvec3 max(dvec3 , dvec3);"
+ "dvec4 max(dvec4 , dvec4);"
+
+ "double clamp(double, double, double);"
+ "dvec2 clamp(dvec2 , double, double);"
+ "dvec3 clamp(dvec3 , double, double);"
+ "dvec4 clamp(dvec4 , double, double);"
+ "dvec2 clamp(dvec2 , dvec2 , dvec2);"
+ "dvec3 clamp(dvec3 , dvec3 , dvec3);"
+ "dvec4 clamp(dvec4 , dvec4 , dvec4);"
+
+ "double mix(double, double, double);"
+ "dvec2 mix(dvec2, dvec2, double);"
+ "dvec3 mix(dvec3, dvec3, double);"
+ "dvec4 mix(dvec4, dvec4, double);"
+ "dvec2 mix(dvec2, dvec2, dvec2);"
+ "dvec3 mix(dvec3, dvec3, dvec3);"
+ "dvec4 mix(dvec4, dvec4, dvec4);"
+ "double mix(double, double, bool);"
+ "dvec2 mix(dvec2, dvec2, bvec2);"
+ "dvec3 mix(dvec3, dvec3, bvec3);"
+ "dvec4 mix(dvec4, dvec4, bvec4);"
+
+ "double step(double, double);"
+ "dvec2 step(dvec2 , dvec2);"
+ "dvec3 step(dvec3 , dvec3);"
+ "dvec4 step(dvec4 , dvec4);"
+ "dvec2 step(double, dvec2);"
+ "dvec3 step(double, dvec3);"
+ "dvec4 step(double, dvec4);"
+
+ "double smoothstep(double, double, double);"
+ "dvec2 smoothstep(dvec2 , dvec2 , dvec2);"
+ "dvec3 smoothstep(dvec3 , dvec3 , dvec3);"
+ "dvec4 smoothstep(dvec4 , dvec4 , dvec4);"
+ "dvec2 smoothstep(double, double, dvec2);"
+ "dvec3 smoothstep(double, double, dvec3);"
+ "dvec4 smoothstep(double, double, dvec4);"
+
+ "bool isnan(double);"
+ "bvec2 isnan(dvec2);"
+ "bvec3 isnan(dvec3);"
+ "bvec4 isnan(dvec4);"
+
+ "bool isinf(double);"
+ "bvec2 isinf(dvec2);"
+ "bvec3 isinf(dvec3);"
+ "bvec4 isinf(dvec4);"
+
+ "double length(double);"
+ "double length(dvec2);"
+ "double length(dvec3);"
+ "double length(dvec4);"
+
+ "double distance(double, double);"
+ "double distance(dvec2 , dvec2);"
+ "double distance(dvec3 , dvec3);"
+ "double distance(dvec4 , dvec4);"
+
+ "double dot(double, double);"
+ "double dot(dvec2 , dvec2);"
+ "double dot(dvec3 , dvec3);"
+ "double dot(dvec4 , dvec4);"
+
+ "dvec3 cross(dvec3, dvec3);"
+
+ "double normalize(double);"
+ "dvec2 normalize(dvec2);"
+ "dvec3 normalize(dvec3);"
+ "dvec4 normalize(dvec4);"
+
+ "double faceforward(double, double, double);"
+ "dvec2 faceforward(dvec2, dvec2, dvec2);"
+ "dvec3 faceforward(dvec3, dvec3, dvec3);"
+ "dvec4 faceforward(dvec4, dvec4, dvec4);"
+
+ "double reflect(double, double);"
+ "dvec2 reflect(dvec2 , dvec2 );"
+ "dvec3 reflect(dvec3 , dvec3 );"
+ "dvec4 reflect(dvec4 , dvec4 );"
+
+ "double refract(double, double, double);"
+ "dvec2 refract(dvec2 , dvec2 , double);"
+ "dvec3 refract(dvec3 , dvec3 , double);"
+ "dvec4 refract(dvec4 , dvec4 , double);"
+
+ "dmat2 matrixCompMult(dmat2, dmat2);"
+ "dmat3 matrixCompMult(dmat3, dmat3);"
+ "dmat4 matrixCompMult(dmat4, dmat4);"
+ "dmat2x3 matrixCompMult(dmat2x3, dmat2x3);"
+ "dmat2x4 matrixCompMult(dmat2x4, dmat2x4);"
+ "dmat3x2 matrixCompMult(dmat3x2, dmat3x2);"
+ "dmat3x4 matrixCompMult(dmat3x4, dmat3x4);"
+ "dmat4x2 matrixCompMult(dmat4x2, dmat4x2);"
+ "dmat4x3 matrixCompMult(dmat4x3, dmat4x3);"
+
+ "dmat2 outerProduct(dvec2, dvec2);"
+ "dmat3 outerProduct(dvec3, dvec3);"
+ "dmat4 outerProduct(dvec4, dvec4);"
+ "dmat2x3 outerProduct(dvec3, dvec2);"
+ "dmat3x2 outerProduct(dvec2, dvec3);"
+ "dmat2x4 outerProduct(dvec4, dvec2);"
+ "dmat4x2 outerProduct(dvec2, dvec4);"
+ "dmat3x4 outerProduct(dvec4, dvec3);"
+ "dmat4x3 outerProduct(dvec3, dvec4);"
+
+ "dmat2 transpose(dmat2);"
+ "dmat3 transpose(dmat3);"
+ "dmat4 transpose(dmat4);"
+ "dmat2x3 transpose(dmat3x2);"
+ "dmat3x2 transpose(dmat2x3);"
+ "dmat2x4 transpose(dmat4x2);"
+ "dmat4x2 transpose(dmat2x4);"
+ "dmat3x4 transpose(dmat4x3);"
+ "dmat4x3 transpose(dmat3x4);"
+
+ "double determinant(dmat2);"
+ "double determinant(dmat3);"
+ "double determinant(dmat4);"
+
+ "dmat2 inverse(dmat2);"
+ "dmat3 inverse(dmat3);"
+ "dmat4 inverse(dmat4);"
+
+ "bvec2 lessThan(dvec2, dvec2);"
+ "bvec3 lessThan(dvec3, dvec3);"
+ "bvec4 lessThan(dvec4, dvec4);"
+
+ "bvec2 lessThanEqual(dvec2, dvec2);"
+ "bvec3 lessThanEqual(dvec3, dvec3);"
+ "bvec4 lessThanEqual(dvec4, dvec4);"
+
+ "bvec2 greaterThan(dvec2, dvec2);"
+ "bvec3 greaterThan(dvec3, dvec3);"
+ "bvec4 greaterThan(dvec4, dvec4);"
+
+ "bvec2 greaterThanEqual(dvec2, dvec2);"
+ "bvec3 greaterThanEqual(dvec3, dvec3);"
+ "bvec4 greaterThanEqual(dvec4, dvec4);"
+
+ "bvec2 equal(dvec2, dvec2);"
+ "bvec3 equal(dvec3, dvec3);"
+ "bvec4 equal(dvec4, dvec4);"
+
+ "bvec2 notEqual(dvec2, dvec2);"
+ "bvec3 notEqual(dvec3, dvec3);"
+ "bvec4 notEqual(dvec4, dvec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+
+ "int64_t abs(int64_t);"
+ "i64vec2 abs(i64vec2);"
+ "i64vec3 abs(i64vec3);"
+ "i64vec4 abs(i64vec4);"
+
+ "int64_t sign(int64_t);"
+ "i64vec2 sign(i64vec2);"
+ "i64vec3 sign(i64vec3);"
+ "i64vec4 sign(i64vec4);"
+
+ "int64_t min(int64_t, int64_t);"
+ "i64vec2 min(i64vec2, int64_t);"
+ "i64vec3 min(i64vec3, int64_t);"
+ "i64vec4 min(i64vec4, int64_t);"
+ "i64vec2 min(i64vec2, i64vec2);"
+ "i64vec3 min(i64vec3, i64vec3);"
+ "i64vec4 min(i64vec4, i64vec4);"
+ "uint64_t min(uint64_t, uint64_t);"
+ "u64vec2 min(u64vec2, uint64_t);"
+ "u64vec3 min(u64vec3, uint64_t);"
+ "u64vec4 min(u64vec4, uint64_t);"
+ "u64vec2 min(u64vec2, u64vec2);"
+ "u64vec3 min(u64vec3, u64vec3);"
+ "u64vec4 min(u64vec4, u64vec4);"
+
+ "int64_t max(int64_t, int64_t);"
+ "i64vec2 max(i64vec2, int64_t);"
+ "i64vec3 max(i64vec3, int64_t);"
+ "i64vec4 max(i64vec4, int64_t);"
+ "i64vec2 max(i64vec2, i64vec2);"
+ "i64vec3 max(i64vec3, i64vec3);"
+ "i64vec4 max(i64vec4, i64vec4);"
+ "uint64_t max(uint64_t, uint64_t);"
+ "u64vec2 max(u64vec2, uint64_t);"
+ "u64vec3 max(u64vec3, uint64_t);"
+ "u64vec4 max(u64vec4, uint64_t);"
+ "u64vec2 max(u64vec2, u64vec2);"
+ "u64vec3 max(u64vec3, u64vec3);"
+ "u64vec4 max(u64vec4, u64vec4);"
+
+ "int64_t clamp(int64_t, int64_t, int64_t);"
+ "i64vec2 clamp(i64vec2, int64_t, int64_t);"
+ "i64vec3 clamp(i64vec3, int64_t, int64_t);"
+ "i64vec4 clamp(i64vec4, int64_t, int64_t);"
+ "i64vec2 clamp(i64vec2, i64vec2, i64vec2);"
+ "i64vec3 clamp(i64vec3, i64vec3, i64vec3);"
+ "i64vec4 clamp(i64vec4, i64vec4, i64vec4);"
+ "uint64_t clamp(uint64_t, uint64_t, uint64_t);"
+ "u64vec2 clamp(u64vec2, uint64_t, uint64_t);"
+ "u64vec3 clamp(u64vec3, uint64_t, uint64_t);"
+ "u64vec4 clamp(u64vec4, uint64_t, uint64_t);"
+ "u64vec2 clamp(u64vec2, u64vec2, u64vec2);"
+ "u64vec3 clamp(u64vec3, u64vec3, u64vec3);"
+ "u64vec4 clamp(u64vec4, u64vec4, u64vec4);"
+
+ "int64_t mix(int64_t, int64_t, bool);"
+ "i64vec2 mix(i64vec2, i64vec2, bvec2);"
+ "i64vec3 mix(i64vec3, i64vec3, bvec3);"
+ "i64vec4 mix(i64vec4, i64vec4, bvec4);"
+ "uint64_t mix(uint64_t, uint64_t, bool);"
+ "u64vec2 mix(u64vec2, u64vec2, bvec2);"
+ "u64vec3 mix(u64vec3, u64vec3, bvec3);"
+ "u64vec4 mix(u64vec4, u64vec4, bvec4);"
+
+ "int64_t doubleBitsToInt64(double);"
+ "i64vec2 doubleBitsToInt64(dvec2);"
+ "i64vec3 doubleBitsToInt64(dvec3);"
+ "i64vec4 doubleBitsToInt64(dvec4);"
+
+ "uint64_t doubleBitsToUint64(double);"
+ "u64vec2 doubleBitsToUint64(dvec2);"
+ "u64vec3 doubleBitsToUint64(dvec3);"
+ "u64vec4 doubleBitsToUint64(dvec4);"
+
+ "double int64BitsToDouble(int64_t);"
+ "dvec2 int64BitsToDouble(i64vec2);"
+ "dvec3 int64BitsToDouble(i64vec3);"
+ "dvec4 int64BitsToDouble(i64vec4);"
+
+ "double uint64BitsToDouble(uint64_t);"
+ "dvec2 uint64BitsToDouble(u64vec2);"
+ "dvec3 uint64BitsToDouble(u64vec3);"
+ "dvec4 uint64BitsToDouble(u64vec4);"
+
+ "int64_t packInt2x32(ivec2);"
+ "uint64_t packUint2x32(uvec2);"
+ "ivec2 unpackInt2x32(int64_t);"
+ "uvec2 unpackUint2x32(uint64_t);"
+
+ "bvec2 lessThan(i64vec2, i64vec2);"
+ "bvec3 lessThan(i64vec3, i64vec3);"
+ "bvec4 lessThan(i64vec4, i64vec4);"
+ "bvec2 lessThan(u64vec2, u64vec2);"
+ "bvec3 lessThan(u64vec3, u64vec3);"
+ "bvec4 lessThan(u64vec4, u64vec4);"
+
+ "bvec2 lessThanEqual(i64vec2, i64vec2);"
+ "bvec3 lessThanEqual(i64vec3, i64vec3);"
+ "bvec4 lessThanEqual(i64vec4, i64vec4);"
+ "bvec2 lessThanEqual(u64vec2, u64vec2);"
+ "bvec3 lessThanEqual(u64vec3, u64vec3);"
+ "bvec4 lessThanEqual(u64vec4, u64vec4);"
+
+ "bvec2 greaterThan(i64vec2, i64vec2);"
+ "bvec3 greaterThan(i64vec3, i64vec3);"
+ "bvec4 greaterThan(i64vec4, i64vec4);"
+ "bvec2 greaterThan(u64vec2, u64vec2);"
+ "bvec3 greaterThan(u64vec3, u64vec3);"
+ "bvec4 greaterThan(u64vec4, u64vec4);"
+
+ "bvec2 greaterThanEqual(i64vec2, i64vec2);"
+ "bvec3 greaterThanEqual(i64vec3, i64vec3);"
+ "bvec4 greaterThanEqual(i64vec4, i64vec4);"
+ "bvec2 greaterThanEqual(u64vec2, u64vec2);"
+ "bvec3 greaterThanEqual(u64vec3, u64vec3);"
+ "bvec4 greaterThanEqual(u64vec4, u64vec4);"
+
+ "bvec2 equal(i64vec2, i64vec2);"
+ "bvec3 equal(i64vec3, i64vec3);"
+ "bvec4 equal(i64vec4, i64vec4);"
+ "bvec2 equal(u64vec2, u64vec2);"
+ "bvec3 equal(u64vec3, u64vec3);"
+ "bvec4 equal(u64vec4, u64vec4);"
+
+ "bvec2 notEqual(i64vec2, i64vec2);"
+ "bvec3 notEqual(i64vec3, i64vec3);"
+ "bvec4 notEqual(i64vec4, i64vec4);"
+ "bvec2 notEqual(u64vec2, u64vec2);"
+ "bvec3 notEqual(u64vec3, u64vec3);"
+ "bvec4 notEqual(u64vec4, u64vec4);"
+
+ "int findLSB(int64_t);"
+ "ivec2 findLSB(i64vec2);"
+ "ivec3 findLSB(i64vec3);"
+ "ivec4 findLSB(i64vec4);"
+
+ "int findLSB(uint64_t);"
+ "ivec2 findLSB(u64vec2);"
+ "ivec3 findLSB(u64vec3);"
+ "ivec4 findLSB(u64vec4);"
+
+ "int findMSB(int64_t);"
+ "ivec2 findMSB(i64vec2);"
+ "ivec3 findMSB(i64vec3);"
+ "ivec4 findMSB(i64vec4);"
+
+ "int findMSB(uint64_t);"
+ "ivec2 findMSB(u64vec2);"
+ "ivec3 findMSB(u64vec3);"
+ "ivec4 findMSB(u64vec4);"
+
+ "\n"
+ );
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_trinary_minmax
+ if (profile != EEsProfile && version >= 430) {
+ commonBuiltins.append(
+ "float min3(float, float, float);"
+ "vec2 min3(vec2, vec2, vec2);"
+ "vec3 min3(vec3, vec3, vec3);"
+ "vec4 min3(vec4, vec4, vec4);"
+
+ "int min3(int, int, int);"
+ "ivec2 min3(ivec2, ivec2, ivec2);"
+ "ivec3 min3(ivec3, ivec3, ivec3);"
+ "ivec4 min3(ivec4, ivec4, ivec4);"
+
+ "uint min3(uint, uint, uint);"
+ "uvec2 min3(uvec2, uvec2, uvec2);"
+ "uvec3 min3(uvec3, uvec3, uvec3);"
+ "uvec4 min3(uvec4, uvec4, uvec4);"
+
+ "float max3(float, float, float);"
+ "vec2 max3(vec2, vec2, vec2);"
+ "vec3 max3(vec3, vec3, vec3);"
+ "vec4 max3(vec4, vec4, vec4);"
+
+ "int max3(int, int, int);"
+ "ivec2 max3(ivec2, ivec2, ivec2);"
+ "ivec3 max3(ivec3, ivec3, ivec3);"
+ "ivec4 max3(ivec4, ivec4, ivec4);"
+
+ "uint max3(uint, uint, uint);"
+ "uvec2 max3(uvec2, uvec2, uvec2);"
+ "uvec3 max3(uvec3, uvec3, uvec3);"
+ "uvec4 max3(uvec4, uvec4, uvec4);"
+
+ "float mid3(float, float, float);"
+ "vec2 mid3(vec2, vec2, vec2);"
+ "vec3 mid3(vec3, vec3, vec3);"
+ "vec4 mid3(vec4, vec4, vec4);"
+
+ "int mid3(int, int, int);"
+ "ivec2 mid3(ivec2, ivec2, ivec2);"
+ "ivec3 mid3(ivec3, ivec3, ivec3);"
+ "ivec4 mid3(ivec4, ivec4, ivec4);"
+
+ "uint mid3(uint, uint, uint);"
+ "uvec2 mid3(uvec2, uvec2, uvec2);"
+ "uvec3 mid3(uvec3, uvec3, uvec3);"
+ "uvec4 mid3(uvec4, uvec4, uvec4);"
+
+ "float16_t min3(float16_t, float16_t, float16_t);"
+ "f16vec2 min3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 min3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 min3(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t max3(float16_t, float16_t, float16_t);"
+ "f16vec2 max3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 max3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 max3(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t mid3(float16_t, float16_t, float16_t);"
+ "f16vec2 mid3(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 mid3(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 mid3(f16vec4, f16vec4, f16vec4);"
+
+ "int16_t min3(int16_t, int16_t, int16_t);"
+ "i16vec2 min3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 min3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 min3(i16vec4, i16vec4, i16vec4);"
+
+ "int16_t max3(int16_t, int16_t, int16_t);"
+ "i16vec2 max3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 max3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 max3(i16vec4, i16vec4, i16vec4);"
+
+ "int16_t mid3(int16_t, int16_t, int16_t);"
+ "i16vec2 mid3(i16vec2, i16vec2, i16vec2);"
+ "i16vec3 mid3(i16vec3, i16vec3, i16vec3);"
+ "i16vec4 mid3(i16vec4, i16vec4, i16vec4);"
+
+ "uint16_t min3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 min3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 min3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 min3(u16vec4, u16vec4, u16vec4);"
+
+ "uint16_t max3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 max3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 max3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 max3(u16vec4, u16vec4, u16vec4);"
+
+ "uint16_t mid3(uint16_t, uint16_t, uint16_t);"
+ "u16vec2 mid3(u16vec2, u16vec2, u16vec2);"
+ "u16vec3 mid3(u16vec3, u16vec3, u16vec3);"
+ "u16vec4 mid3(u16vec4, u16vec4, u16vec4);"
+
+ "\n"
+ );
+ }
+#endif
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 430)) {
+ commonBuiltins.append(
+ "uint atomicAdd(coherent volatile inout uint, uint);"
+ " int atomicAdd(coherent volatile inout int, int);"
+ "uint atomicAdd(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicAdd(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicMin(coherent volatile inout uint, uint);"
+ " int atomicMin(coherent volatile inout int, int);"
+ "uint atomicMin(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicMin(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicMax(coherent volatile inout uint, uint);"
+ " int atomicMax(coherent volatile inout int, int);"
+ "uint atomicMax(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicMax(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicAnd(coherent volatile inout uint, uint);"
+ " int atomicAnd(coherent volatile inout int, int);"
+ "uint atomicAnd(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicAnd(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicOr (coherent volatile inout uint, uint);"
+ " int atomicOr (coherent volatile inout int, int);"
+ "uint atomicOr (coherent volatile inout uint, uint, int, int, int);"
+ " int atomicOr (coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicXor(coherent volatile inout uint, uint);"
+ " int atomicXor(coherent volatile inout int, int);"
+ "uint atomicXor(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicXor(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicExchange(coherent volatile inout uint, uint);"
+ " int atomicExchange(coherent volatile inout int, int);"
+ "uint atomicExchange(coherent volatile inout uint, uint, int, int, int);"
+ " int atomicExchange(coherent volatile inout int, int, int, int, int);"
+
+ "uint atomicCompSwap(coherent volatile inout uint, uint, uint);"
+ " int atomicCompSwap(coherent volatile inout int, int, int);"
+ "uint atomicCompSwap(coherent volatile inout uint, uint, uint, int, int, int, int, int);"
+ " int atomicCompSwap(coherent volatile inout int, int, int, int, int, int, int, int);"
+
+ "uint atomicLoad(coherent volatile in uint, int, int, int);"
+ " int atomicLoad(coherent volatile in int, int, int, int);"
+
+ "void atomicStore(coherent volatile out uint, uint, int, int, int);"
+ "void atomicStore(coherent volatile out int, int, int, int, int);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 440) {
+ commonBuiltins.append(
+ "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicMin(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicMin(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicMax(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicMax(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicAnd(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicAnd(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicOr (coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicOr (coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicXor(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicXor(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicAdd(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicAdd(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t);"
+ " int64_t atomicExchange(coherent volatile inout int64_t, int64_t);"
+ "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t, int, int, int);"
+ " int64_t atomicExchange(coherent volatile inout int64_t, int64_t, int, int, int);"
+
+ "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t);"
+ " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);"
+ "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t, int, int, int, int, int);"
+ " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t, int, int, int, int, int);"
+
+ "uint64_t atomicLoad(coherent volatile in uint64_t, int, int, int);"
+ " int64_t atomicLoad(coherent volatile in int64_t, int, int, int);"
+
+ "void atomicStore(coherent volatile out uint64_t, uint64_t, int, int, int);"
+ "void atomicStore(coherent volatile out int64_t, int64_t, int, int, int);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 450)) {
+ commonBuiltins.append(
+ "int mix(int x, int y, bool a);"
+ "ivec2 mix(ivec2 x, ivec2 y, bvec2 a);"
+ "ivec3 mix(ivec3 x, ivec3 y, bvec3 a);"
+ "ivec4 mix(ivec4 x, ivec4 y, bvec4 a);"
+
+ "uint mix(uint x, uint y, bool a);"
+ "uvec2 mix(uvec2 x, uvec2 y, bvec2 a);"
+ "uvec3 mix(uvec3 x, uvec3 y, bvec3 a);"
+ "uvec4 mix(uvec4 x, uvec4 y, bvec4 a);"
+
+ "bool mix(bool x, bool y, bool a);"
+ "bvec2 mix(bvec2 x, bvec2 y, bvec2 a);"
+ "bvec3 mix(bvec3 x, bvec3 y, bvec3 a);"
+ "bvec4 mix(bvec4 x, bvec4 y, bvec4 a);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 330)) {
+ commonBuiltins.append(
+ "int floatBitsToInt(highp float value);"
+ "ivec2 floatBitsToInt(highp vec2 value);"
+ "ivec3 floatBitsToInt(highp vec3 value);"
+ "ivec4 floatBitsToInt(highp vec4 value);"
+
+ "uint floatBitsToUint(highp float value);"
+ "uvec2 floatBitsToUint(highp vec2 value);"
+ "uvec3 floatBitsToUint(highp vec3 value);"
+ "uvec4 floatBitsToUint(highp vec4 value);"
+
+ "float intBitsToFloat(highp int value);"
+ "vec2 intBitsToFloat(highp ivec2 value);"
+ "vec3 intBitsToFloat(highp ivec3 value);"
+ "vec4 intBitsToFloat(highp ivec4 value);"
+
+ "float uintBitsToFloat(highp uint value);"
+ "vec2 uintBitsToFloat(highp uvec2 value);"
+ "vec3 uintBitsToFloat(highp uvec3 value);"
+ "vec4 uintBitsToFloat(highp uvec4 value);"
+
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 400) ||
+ (profile == EEsProfile && version >= 310)) { // GL_OES_gpu_shader5
+
+ commonBuiltins.append(
+ "float fma(float, float, float );"
+ "vec2 fma(vec2, vec2, vec2 );"
+ "vec3 fma(vec3, vec3, vec3 );"
+ "vec4 fma(vec4, vec4, vec4 );"
+ "\n");
+
+ if (profile != EEsProfile) {
+ commonBuiltins.append(
+ "double fma(double, double, double);"
+ "dvec2 fma(dvec2, dvec2, dvec2 );"
+ "dvec3 fma(dvec3, dvec3, dvec3 );"
+ "dvec4 fma(dvec4, dvec4, dvec4 );"
+ "\n");
+ }
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "float frexp(highp float, out highp int);"
+ "vec2 frexp(highp vec2, out highp ivec2);"
+ "vec3 frexp(highp vec3, out highp ivec3);"
+ "vec4 frexp(highp vec4, out highp ivec4);"
+
+ "float ldexp(highp float, highp int);"
+ "vec2 ldexp(highp vec2, highp ivec2);"
+ "vec3 ldexp(highp vec3, highp ivec3);"
+ "vec4 ldexp(highp vec4, highp ivec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "double frexp(double, out int);"
+ "dvec2 frexp( dvec2, out ivec2);"
+ "dvec3 frexp( dvec3, out ivec3);"
+ "dvec4 frexp( dvec4, out ivec4);"
+
+ "double ldexp(double, int);"
+ "dvec2 ldexp( dvec2, ivec2);"
+ "dvec3 ldexp( dvec3, ivec3);"
+ "dvec4 ldexp( dvec4, ivec4);"
+
+ "double packDouble2x32(uvec2);"
+ "uvec2 unpackDouble2x32(double);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "highp uint packUnorm2x16(vec2);"
+ "vec2 unpackUnorm2x16(highp uint);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && version >= 420)) {
+ commonBuiltins.append(
+ "highp uint packSnorm2x16(vec2);"
+ " vec2 unpackSnorm2x16(highp uint);"
+ "highp uint packHalf2x16(vec2);"
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 300) {
+ commonBuiltins.append(
+ "mediump vec2 unpackHalf2x16(highp uint);"
+ "\n");
+ } else if (profile != EEsProfile && version >= 420) {
+ commonBuiltins.append(
+ " vec2 unpackHalf2x16(highp uint);"
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ "highp uint packSnorm4x8(vec4);"
+ "highp uint packUnorm4x8(vec4);"
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "mediump vec4 unpackSnorm4x8(highp uint);"
+ "mediump vec4 unpackUnorm4x8(highp uint);"
+ "\n");
+ } else if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "vec4 unpackSnorm4x8(highp uint);"
+ "vec4 unpackUnorm4x8(highp uint);"
+ "\n");
+ }
+
+ //
+ // Geometric Functions.
+ //
+ commonBuiltins.append(
+ "float length(float x);"
+ "float length(vec2 x);"
+ "float length(vec3 x);"
+ "float length(vec4 x);"
+
+ "float distance(float p0, float p1);"
+ "float distance(vec2 p0, vec2 p1);"
+ "float distance(vec3 p0, vec3 p1);"
+ "float distance(vec4 p0, vec4 p1);"
+
+ "float dot(float x, float y);"
+ "float dot(vec2 x, vec2 y);"
+ "float dot(vec3 x, vec3 y);"
+ "float dot(vec4 x, vec4 y);"
+
+ "vec3 cross(vec3 x, vec3 y);"
+ "float normalize(float x);"
+ "vec2 normalize(vec2 x);"
+ "vec3 normalize(vec3 x);"
+ "vec4 normalize(vec4 x);"
+
+ "float faceforward(float N, float I, float Nref);"
+ "vec2 faceforward(vec2 N, vec2 I, vec2 Nref);"
+ "vec3 faceforward(vec3 N, vec3 I, vec3 Nref);"
+ "vec4 faceforward(vec4 N, vec4 I, vec4 Nref);"
+
+ "float reflect(float I, float N);"
+ "vec2 reflect(vec2 I, vec2 N);"
+ "vec3 reflect(vec3 I, vec3 N);"
+ "vec4 reflect(vec4 I, vec4 N);"
+
+ "float refract(float I, float N, float eta);"
+ "vec2 refract(vec2 I, vec2 N, float eta);"
+ "vec3 refract(vec3 I, vec3 N, float eta);"
+ "vec4 refract(vec4 I, vec4 N, float eta);"
+
+ "\n");
+
+ //
+ // Matrix Functions.
+ //
+ commonBuiltins.append(
+ "mat2 matrixCompMult(mat2 x, mat2 y);"
+ "mat3 matrixCompMult(mat3 x, mat3 y);"
+ "mat4 matrixCompMult(mat4 x, mat4 y);"
+
+ "\n");
+
+ // 120 is correct for both ES and desktop
+ if (version >= 120) {
+ commonBuiltins.append(
+ "mat2 outerProduct(vec2 c, vec2 r);"
+ "mat3 outerProduct(vec3 c, vec3 r);"
+ "mat4 outerProduct(vec4 c, vec4 r);"
+ "mat2x3 outerProduct(vec3 c, vec2 r);"
+ "mat3x2 outerProduct(vec2 c, vec3 r);"
+ "mat2x4 outerProduct(vec4 c, vec2 r);"
+ "mat4x2 outerProduct(vec2 c, vec4 r);"
+ "mat3x4 outerProduct(vec4 c, vec3 r);"
+ "mat4x3 outerProduct(vec3 c, vec4 r);"
+
+ "mat2 transpose(mat2 m);"
+ "mat3 transpose(mat3 m);"
+ "mat4 transpose(mat4 m);"
+ "mat2x3 transpose(mat3x2 m);"
+ "mat3x2 transpose(mat2x3 m);"
+ "mat2x4 transpose(mat4x2 m);"
+ "mat4x2 transpose(mat2x4 m);"
+ "mat3x4 transpose(mat4x3 m);"
+ "mat4x3 transpose(mat3x4 m);"
+
+ "mat2x3 matrixCompMult(mat2x3, mat2x3);"
+ "mat2x4 matrixCompMult(mat2x4, mat2x4);"
+ "mat3x2 matrixCompMult(mat3x2, mat3x2);"
+ "mat3x4 matrixCompMult(mat3x4, mat3x4);"
+ "mat4x2 matrixCompMult(mat4x2, mat4x2);"
+ "mat4x3 matrixCompMult(mat4x3, mat4x3);"
+
+ "\n");
+
+ // 150 is correct for both ES and desktop
+ if (version >= 150) {
+ commonBuiltins.append(
+ "float determinant(mat2 m);"
+ "float determinant(mat3 m);"
+ "float determinant(mat4 m);"
+
+ "mat2 inverse(mat2 m);"
+ "mat3 inverse(mat3 m);"
+ "mat4 inverse(mat4 m);"
+
+ "\n");
+ }
+ }
+
+ //
+ // Vector relational functions.
+ //
+ commonBuiltins.append(
+ "bvec2 lessThan(vec2 x, vec2 y);"
+ "bvec3 lessThan(vec3 x, vec3 y);"
+ "bvec4 lessThan(vec4 x, vec4 y);"
+
+ "bvec2 lessThan(ivec2 x, ivec2 y);"
+ "bvec3 lessThan(ivec3 x, ivec3 y);"
+ "bvec4 lessThan(ivec4 x, ivec4 y);"
+
+ "bvec2 lessThanEqual(vec2 x, vec2 y);"
+ "bvec3 lessThanEqual(vec3 x, vec3 y);"
+ "bvec4 lessThanEqual(vec4 x, vec4 y);"
+
+ "bvec2 lessThanEqual(ivec2 x, ivec2 y);"
+ "bvec3 lessThanEqual(ivec3 x, ivec3 y);"
+ "bvec4 lessThanEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 greaterThan(vec2 x, vec2 y);"
+ "bvec3 greaterThan(vec3 x, vec3 y);"
+ "bvec4 greaterThan(vec4 x, vec4 y);"
+
+ "bvec2 greaterThan(ivec2 x, ivec2 y);"
+ "bvec3 greaterThan(ivec3 x, ivec3 y);"
+ "bvec4 greaterThan(ivec4 x, ivec4 y);"
+
+ "bvec2 greaterThanEqual(vec2 x, vec2 y);"
+ "bvec3 greaterThanEqual(vec3 x, vec3 y);"
+ "bvec4 greaterThanEqual(vec4 x, vec4 y);"
+
+ "bvec2 greaterThanEqual(ivec2 x, ivec2 y);"
+ "bvec3 greaterThanEqual(ivec3 x, ivec3 y);"
+ "bvec4 greaterThanEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 equal(vec2 x, vec2 y);"
+ "bvec3 equal(vec3 x, vec3 y);"
+ "bvec4 equal(vec4 x, vec4 y);"
+
+ "bvec2 equal(ivec2 x, ivec2 y);"
+ "bvec3 equal(ivec3 x, ivec3 y);"
+ "bvec4 equal(ivec4 x, ivec4 y);"
+
+ "bvec2 equal(bvec2 x, bvec2 y);"
+ "bvec3 equal(bvec3 x, bvec3 y);"
+ "bvec4 equal(bvec4 x, bvec4 y);"
+
+ "bvec2 notEqual(vec2 x, vec2 y);"
+ "bvec3 notEqual(vec3 x, vec3 y);"
+ "bvec4 notEqual(vec4 x, vec4 y);"
+
+ "bvec2 notEqual(ivec2 x, ivec2 y);"
+ "bvec3 notEqual(ivec3 x, ivec3 y);"
+ "bvec4 notEqual(ivec4 x, ivec4 y);"
+
+ "bvec2 notEqual(bvec2 x, bvec2 y);"
+ "bvec3 notEqual(bvec3 x, bvec3 y);"
+ "bvec4 notEqual(bvec4 x, bvec4 y);"
+
+ "bool any(bvec2 x);"
+ "bool any(bvec3 x);"
+ "bool any(bvec4 x);"
+
+ "bool all(bvec2 x);"
+ "bool all(bvec3 x);"
+ "bool all(bvec4 x);"
+
+ "bvec2 not(bvec2 x);"
+ "bvec3 not(bvec3 x);"
+ "bvec4 not(bvec4 x);"
+
+ "\n");
+
+ if (version >= 130) {
+ commonBuiltins.append(
+ "bvec2 lessThan(uvec2 x, uvec2 y);"
+ "bvec3 lessThan(uvec3 x, uvec3 y);"
+ "bvec4 lessThan(uvec4 x, uvec4 y);"
+
+ "bvec2 lessThanEqual(uvec2 x, uvec2 y);"
+ "bvec3 lessThanEqual(uvec3 x, uvec3 y);"
+ "bvec4 lessThanEqual(uvec4 x, uvec4 y);"
+
+ "bvec2 greaterThan(uvec2 x, uvec2 y);"
+ "bvec3 greaterThan(uvec3 x, uvec3 y);"
+ "bvec4 greaterThan(uvec4 x, uvec4 y);"
+
+ "bvec2 greaterThanEqual(uvec2 x, uvec2 y);"
+ "bvec3 greaterThanEqual(uvec3 x, uvec3 y);"
+ "bvec4 greaterThanEqual(uvec4 x, uvec4 y);"
+
+ "bvec2 equal(uvec2 x, uvec2 y);"
+ "bvec3 equal(uvec3 x, uvec3 y);"
+ "bvec4 equal(uvec4 x, uvec4 y);"
+
+ "bvec2 notEqual(uvec2 x, uvec2 y);"
+ "bvec3 notEqual(uvec3 x, uvec3 y);"
+ "bvec4 notEqual(uvec4 x, uvec4 y);"
+
+ "\n");
+ }
+
+ //
+ // Original-style texture functions existing in all stages.
+ // (Per-stage functions below.)
+ //
+ if ((profile == EEsProfile && version == 100) ||
+ profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "vec4 texture2D(sampler2D, vec2);"
+
+ "vec4 texture2DProj(sampler2D, vec3);"
+ "vec4 texture2DProj(sampler2D, vec4);"
+
+ "vec4 texture3D(sampler3D, vec3);" // OES_texture_3D, but caught by keyword check
+ "vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check
+
+ "vec4 textureCube(samplerCube, vec3);"
+
+ "\n");
+ }
+ }
+
+ if ( profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "vec4 texture1D(sampler1D, float);"
+
+ "vec4 texture1DProj(sampler1D, vec2);"
+ "vec4 texture1DProj(sampler1D, vec4);"
+
+ "vec4 shadow1D(sampler1DShadow, vec3);"
+ "vec4 shadow2D(sampler2DShadow, vec3);"
+ "vec4 shadow1DProj(sampler1DShadow, vec4);"
+ "vec4 shadow2DProj(sampler2DShadow, vec4);"
+
+ "vec4 texture2DRect(sampler2DRect, vec2);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 texture2DRectProj(sampler2DRect, vec3);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 texture2DRectProj(sampler2DRect, vec4);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 shadow2DRect(sampler2DRectShadow, vec3);" // GL_ARB_texture_rectangle, caught by keyword check
+ "vec4 shadow2DRectProj(sampler2DRectShadow, vec4);" // GL_ARB_texture_rectangle, caught by keyword check
+
+ "\n");
+ }
+ }
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ if (version < 300) {
+ commonBuiltins.append(
+ "vec4 texture2D(samplerExternalOES, vec2 coord);" // GL_OES_EGL_image_external
+ "vec4 texture2DProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external
+ "vec4 texture2DProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external
+ "\n");
+ } else {
+ commonBuiltins.append(
+ "highp ivec2 textureSize(samplerExternalOES, int lod);" // GL_OES_EGL_image_external_essl3
+ "vec4 texture(samplerExternalOES, vec2);" // GL_OES_EGL_image_external_essl3
+ "vec4 texture(samplerExternalOES, vec2, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec3, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external_essl3
+ "vec4 textureProj(samplerExternalOES, vec4, float bias);" // GL_OES_EGL_image_external_essl3
+ "vec4 texelFetch(samplerExternalOES, ivec2, int lod);" // GL_OES_EGL_image_external_essl3
+ "\n");
+ }
+ commonBuiltins.append(
+ "highp ivec2 textureSize(__samplerExternal2DY2YEXT, int lod);" // GL_EXT_YUV_target
+ "vec4 texture(__samplerExternal2DY2YEXT, vec2);" // GL_EXT_YUV_target
+ "vec4 texture(__samplerExternal2DY2YEXT, vec2, float bias);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec3);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec3, float bias);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec4);" // GL_EXT_YUV_target
+ "vec4 textureProj(__samplerExternal2DY2YEXT, vec4, float bias);" // GL_EXT_YUV_target
+ "vec4 texelFetch(__samplerExternal2DY2YEXT sampler, ivec2, int lod);" // GL_EXT_YUV_target
+ "\n");
+ commonBuiltins.append(
+ "vec4 texture2DGradEXT(sampler2D, vec2, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjGradEXT(sampler2D, vec3, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjGradEXT(sampler2D, vec4, vec2, vec2);" // GL_EXT_shader_texture_lod
+ "vec4 textureCubeGradEXT(samplerCube, vec3, vec3, vec3);" // GL_EXT_shader_texture_lod
+
+ "float shadow2DEXT(sampler2DShadow, vec3);" // GL_EXT_shadow_samplers
+ "float shadow2DProjEXT(sampler2DShadow, vec4);" // GL_EXT_shadow_samplers
+
+ "\n");
+ }
+ }
+
+ //
+ // Noise functions.
+ //
+ if (spvVersion.spv == 0 && profile != EEsProfile) {
+ commonBuiltins.append(
+ "float noise1(float x);"
+ "float noise1(vec2 x);"
+ "float noise1(vec3 x);"
+ "float noise1(vec4 x);"
+
+ "vec2 noise2(float x);"
+ "vec2 noise2(vec2 x);"
+ "vec2 noise2(vec3 x);"
+ "vec2 noise2(vec4 x);"
+
+ "vec3 noise3(float x);"
+ "vec3 noise3(vec2 x);"
+ "vec3 noise3(vec3 x);"
+ "vec3 noise3(vec4 x);"
+
+ "vec4 noise4(float x);"
+ "vec4 noise4(vec2 x);"
+ "vec4 noise4(vec3 x);"
+ "vec4 noise4(vec4 x);"
+
+ "\n");
+ }
+
+ if (spvVersion.vulkan == 0) {
+ //
+ // Atomic counter functions.
+ //
+ if ((profile != EEsProfile && version >= 300) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append(
+ "uint atomicCounterIncrement(atomic_uint);"
+ "uint atomicCounterDecrement(atomic_uint);"
+ "uint atomicCounter(atomic_uint);"
+
+ "\n");
+ }
+ if (profile != EEsProfile && version >= 460) {
+ commonBuiltins.append(
+ "uint atomicCounterAdd(atomic_uint, uint);"
+ "uint atomicCounterSubtract(atomic_uint, uint);"
+ "uint atomicCounterMin(atomic_uint, uint);"
+ "uint atomicCounterMax(atomic_uint, uint);"
+ "uint atomicCounterAnd(atomic_uint, uint);"
+ "uint atomicCounterOr(atomic_uint, uint);"
+ "uint atomicCounterXor(atomic_uint, uint);"
+ "uint atomicCounterExchange(atomic_uint, uint);"
+ "uint atomicCounterCompSwap(atomic_uint, uint, uint);"
+
+ "\n");
+ }
+ }
+
+ // Bitfield
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ " int bitfieldExtract( int, int, int);"
+ "ivec2 bitfieldExtract(ivec2, int, int);"
+ "ivec3 bitfieldExtract(ivec3, int, int);"
+ "ivec4 bitfieldExtract(ivec4, int, int);"
+
+ " uint bitfieldExtract( uint, int, int);"
+ "uvec2 bitfieldExtract(uvec2, int, int);"
+ "uvec3 bitfieldExtract(uvec3, int, int);"
+ "uvec4 bitfieldExtract(uvec4, int, int);"
+
+ " int bitfieldInsert( int base, int, int, int);"
+ "ivec2 bitfieldInsert(ivec2 base, ivec2, int, int);"
+ "ivec3 bitfieldInsert(ivec3 base, ivec3, int, int);"
+ "ivec4 bitfieldInsert(ivec4 base, ivec4, int, int);"
+
+ " uint bitfieldInsert( uint base, uint, int, int);"
+ "uvec2 bitfieldInsert(uvec2 base, uvec2, int, int);"
+ "uvec3 bitfieldInsert(uvec3 base, uvec3, int, int);"
+ "uvec4 bitfieldInsert(uvec4 base, uvec4, int, int);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ " int findLSB( int);"
+ "ivec2 findLSB(ivec2);"
+ "ivec3 findLSB(ivec3);"
+ "ivec4 findLSB(ivec4);"
+
+ " int findLSB( uint);"
+ "ivec2 findLSB(uvec2);"
+ "ivec3 findLSB(uvec3);"
+ "ivec4 findLSB(uvec4);"
+
+ "\n");
+ } else if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "lowp int findLSB( int);"
+ "lowp ivec2 findLSB(ivec2);"
+ "lowp ivec3 findLSB(ivec3);"
+ "lowp ivec4 findLSB(ivec4);"
+
+ "lowp int findLSB( uint);"
+ "lowp ivec2 findLSB(uvec2);"
+ "lowp ivec3 findLSB(uvec3);"
+ "lowp ivec4 findLSB(uvec4);"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ " int bitCount( int);"
+ "ivec2 bitCount(ivec2);"
+ "ivec3 bitCount(ivec3);"
+ "ivec4 bitCount(ivec4);"
+
+ " int bitCount( uint);"
+ "ivec2 bitCount(uvec2);"
+ "ivec3 bitCount(uvec3);"
+ "ivec4 bitCount(uvec4);"
+
+ " int findMSB(highp int);"
+ "ivec2 findMSB(highp ivec2);"
+ "ivec3 findMSB(highp ivec3);"
+ "ivec4 findMSB(highp ivec4);"
+
+ " int findMSB(highp uint);"
+ "ivec2 findMSB(highp uvec2);"
+ "ivec3 findMSB(highp uvec3);"
+ "ivec4 findMSB(highp uvec4);"
+
+ "\n");
+ }
+
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ commonBuiltins.append(
+ " uint uaddCarry(highp uint, highp uint, out lowp uint carry);"
+ "uvec2 uaddCarry(highp uvec2, highp uvec2, out lowp uvec2 carry);"
+ "uvec3 uaddCarry(highp uvec3, highp uvec3, out lowp uvec3 carry);"
+ "uvec4 uaddCarry(highp uvec4, highp uvec4, out lowp uvec4 carry);"
+
+ " uint usubBorrow(highp uint, highp uint, out lowp uint borrow);"
+ "uvec2 usubBorrow(highp uvec2, highp uvec2, out lowp uvec2 borrow);"
+ "uvec3 usubBorrow(highp uvec3, highp uvec3, out lowp uvec3 borrow);"
+ "uvec4 usubBorrow(highp uvec4, highp uvec4, out lowp uvec4 borrow);"
+
+ "void umulExtended(highp uint, highp uint, out highp uint, out highp uint lsb);"
+ "void umulExtended(highp uvec2, highp uvec2, out highp uvec2, out highp uvec2 lsb);"
+ "void umulExtended(highp uvec3, highp uvec3, out highp uvec3, out highp uvec3 lsb);"
+ "void umulExtended(highp uvec4, highp uvec4, out highp uvec4, out highp uvec4 lsb);"
+
+ "void imulExtended(highp int, highp int, out highp int, out highp int lsb);"
+ "void imulExtended(highp ivec2, highp ivec2, out highp ivec2, out highp ivec2 lsb);"
+ "void imulExtended(highp ivec3, highp ivec3, out highp ivec3, out highp ivec3 lsb);"
+ "void imulExtended(highp ivec4, highp ivec4, out highp ivec4, out highp ivec4 lsb);"
+
+ " int bitfieldReverse(highp int);"
+ "ivec2 bitfieldReverse(highp ivec2);"
+ "ivec3 bitfieldReverse(highp ivec3);"
+ "ivec4 bitfieldReverse(highp ivec4);"
+
+ " uint bitfieldReverse(highp uint);"
+ "uvec2 bitfieldReverse(highp uvec2);"
+ "uvec3 bitfieldReverse(highp uvec3);"
+ "uvec4 bitfieldReverse(highp uvec4);"
+
+ "\n");
+ }
+
+ if (profile == EEsProfile && version >= 310) {
+ commonBuiltins.append(
+ "lowp int bitCount( int);"
+ "lowp ivec2 bitCount(ivec2);"
+ "lowp ivec3 bitCount(ivec3);"
+ "lowp ivec4 bitCount(ivec4);"
+
+ "lowp int bitCount( uint);"
+ "lowp ivec2 bitCount(uvec2);"
+ "lowp ivec3 bitCount(uvec3);"
+ "lowp ivec4 bitCount(uvec4);"
+
+ "lowp int findMSB(highp int);"
+ "lowp ivec2 findMSB(highp ivec2);"
+ "lowp ivec3 findMSB(highp ivec3);"
+ "lowp ivec4 findMSB(highp ivec4);"
+
+ "lowp int findMSB(highp uint);"
+ "lowp ivec2 findMSB(highp uvec2);"
+ "lowp ivec3 findMSB(highp uvec3);"
+ "lowp ivec4 findMSB(highp uvec4);"
+
+ "\n");
+ }
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "uint64_t ballotARB(bool);"
+
+ "float readInvocationARB(float, uint);"
+ "vec2 readInvocationARB(vec2, uint);"
+ "vec3 readInvocationARB(vec3, uint);"
+ "vec4 readInvocationARB(vec4, uint);"
+
+ "int readInvocationARB(int, uint);"
+ "ivec2 readInvocationARB(ivec2, uint);"
+ "ivec3 readInvocationARB(ivec3, uint);"
+ "ivec4 readInvocationARB(ivec4, uint);"
+
+ "uint readInvocationARB(uint, uint);"
+ "uvec2 readInvocationARB(uvec2, uint);"
+ "uvec3 readInvocationARB(uvec3, uint);"
+ "uvec4 readInvocationARB(uvec4, uint);"
+
+ "float readFirstInvocationARB(float);"
+ "vec2 readFirstInvocationARB(vec2);"
+ "vec3 readFirstInvocationARB(vec3);"
+ "vec4 readFirstInvocationARB(vec4);"
+
+ "int readFirstInvocationARB(int);"
+ "ivec2 readFirstInvocationARB(ivec2);"
+ "ivec3 readFirstInvocationARB(ivec3);"
+ "ivec4 readFirstInvocationARB(ivec4);"
+
+ "uint readFirstInvocationARB(uint);"
+ "uvec2 readFirstInvocationARB(uvec2);"
+ "uvec3 readFirstInvocationARB(uvec3);"
+ "uvec4 readFirstInvocationARB(uvec4);"
+
+ "\n");
+ }
+
+ // GL_ARB_shader_group_vote
+ if (profile != EEsProfile && version >= 430) {
+ commonBuiltins.append(
+ "bool anyInvocationARB(bool);"
+ "bool allInvocationsARB(bool);"
+ "bool allInvocationsEqualARB(bool);"
+
+ "\n");
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ commonBuiltins.append(
+ "void subgroupBarrier();"
+ "void subgroupMemoryBarrier();"
+ "void subgroupMemoryBarrierBuffer();"
+ "void subgroupMemoryBarrierImage();"
+ "bool subgroupElect();"
+
+ "bool subgroupAll(bool);\n"
+ "bool subgroupAny(bool);\n"
+
+ "bool subgroupAllEqual(float);\n"
+ "bool subgroupAllEqual(vec2);\n"
+ "bool subgroupAllEqual(vec3);\n"
+ "bool subgroupAllEqual(vec4);\n"
+ "bool subgroupAllEqual(int);\n"
+ "bool subgroupAllEqual(ivec2);\n"
+ "bool subgroupAllEqual(ivec3);\n"
+ "bool subgroupAllEqual(ivec4);\n"
+ "bool subgroupAllEqual(uint);\n"
+ "bool subgroupAllEqual(uvec2);\n"
+ "bool subgroupAllEqual(uvec3);\n"
+ "bool subgroupAllEqual(uvec4);\n"
+ "bool subgroupAllEqual(bool);\n"
+ "bool subgroupAllEqual(bvec2);\n"
+ "bool subgroupAllEqual(bvec3);\n"
+ "bool subgroupAllEqual(bvec4);\n"
+
+ "float subgroupBroadcast(float, uint);\n"
+ "vec2 subgroupBroadcast(vec2, uint);\n"
+ "vec3 subgroupBroadcast(vec3, uint);\n"
+ "vec4 subgroupBroadcast(vec4, uint);\n"
+ "int subgroupBroadcast(int, uint);\n"
+ "ivec2 subgroupBroadcast(ivec2, uint);\n"
+ "ivec3 subgroupBroadcast(ivec3, uint);\n"
+ "ivec4 subgroupBroadcast(ivec4, uint);\n"
+ "uint subgroupBroadcast(uint, uint);\n"
+ "uvec2 subgroupBroadcast(uvec2, uint);\n"
+ "uvec3 subgroupBroadcast(uvec3, uint);\n"
+ "uvec4 subgroupBroadcast(uvec4, uint);\n"
+ "bool subgroupBroadcast(bool, uint);\n"
+ "bvec2 subgroupBroadcast(bvec2, uint);\n"
+ "bvec3 subgroupBroadcast(bvec3, uint);\n"
+ "bvec4 subgroupBroadcast(bvec4, uint);\n"
+
+ "float subgroupBroadcastFirst(float);\n"
+ "vec2 subgroupBroadcastFirst(vec2);\n"
+ "vec3 subgroupBroadcastFirst(vec3);\n"
+ "vec4 subgroupBroadcastFirst(vec4);\n"
+ "int subgroupBroadcastFirst(int);\n"
+ "ivec2 subgroupBroadcastFirst(ivec2);\n"
+ "ivec3 subgroupBroadcastFirst(ivec3);\n"
+ "ivec4 subgroupBroadcastFirst(ivec4);\n"
+ "uint subgroupBroadcastFirst(uint);\n"
+ "uvec2 subgroupBroadcastFirst(uvec2);\n"
+ "uvec3 subgroupBroadcastFirst(uvec3);\n"
+ "uvec4 subgroupBroadcastFirst(uvec4);\n"
+ "bool subgroupBroadcastFirst(bool);\n"
+ "bvec2 subgroupBroadcastFirst(bvec2);\n"
+ "bvec3 subgroupBroadcastFirst(bvec3);\n"
+ "bvec4 subgroupBroadcastFirst(bvec4);\n"
+
+ "uvec4 subgroupBallot(bool);\n"
+ "bool subgroupInverseBallot(uvec4);\n"
+ "bool subgroupBallotBitExtract(uvec4, uint);\n"
+ "uint subgroupBallotBitCount(uvec4);\n"
+ "uint subgroupBallotInclusiveBitCount(uvec4);\n"
+ "uint subgroupBallotExclusiveBitCount(uvec4);\n"
+ "uint subgroupBallotFindLSB(uvec4);\n"
+ "uint subgroupBallotFindMSB(uvec4);\n"
+
+ "float subgroupShuffle(float, uint);\n"
+ "vec2 subgroupShuffle(vec2, uint);\n"
+ "vec3 subgroupShuffle(vec3, uint);\n"
+ "vec4 subgroupShuffle(vec4, uint);\n"
+ "int subgroupShuffle(int, uint);\n"
+ "ivec2 subgroupShuffle(ivec2, uint);\n"
+ "ivec3 subgroupShuffle(ivec3, uint);\n"
+ "ivec4 subgroupShuffle(ivec4, uint);\n"
+ "uint subgroupShuffle(uint, uint);\n"
+ "uvec2 subgroupShuffle(uvec2, uint);\n"
+ "uvec3 subgroupShuffle(uvec3, uint);\n"
+ "uvec4 subgroupShuffle(uvec4, uint);\n"
+ "bool subgroupShuffle(bool, uint);\n"
+ "bvec2 subgroupShuffle(bvec2, uint);\n"
+ "bvec3 subgroupShuffle(bvec3, uint);\n"
+ "bvec4 subgroupShuffle(bvec4, uint);\n"
+
+ "float subgroupShuffleXor(float, uint);\n"
+ "vec2 subgroupShuffleXor(vec2, uint);\n"
+ "vec3 subgroupShuffleXor(vec3, uint);\n"
+ "vec4 subgroupShuffleXor(vec4, uint);\n"
+ "int subgroupShuffleXor(int, uint);\n"
+ "ivec2 subgroupShuffleXor(ivec2, uint);\n"
+ "ivec3 subgroupShuffleXor(ivec3, uint);\n"
+ "ivec4 subgroupShuffleXor(ivec4, uint);\n"
+ "uint subgroupShuffleXor(uint, uint);\n"
+ "uvec2 subgroupShuffleXor(uvec2, uint);\n"
+ "uvec3 subgroupShuffleXor(uvec3, uint);\n"
+ "uvec4 subgroupShuffleXor(uvec4, uint);\n"
+ "bool subgroupShuffleXor(bool, uint);\n"
+ "bvec2 subgroupShuffleXor(bvec2, uint);\n"
+ "bvec3 subgroupShuffleXor(bvec3, uint);\n"
+ "bvec4 subgroupShuffleXor(bvec4, uint);\n"
+
+ "float subgroupShuffleUp(float, uint delta);\n"
+ "vec2 subgroupShuffleUp(vec2, uint delta);\n"
+ "vec3 subgroupShuffleUp(vec3, uint delta);\n"
+ "vec4 subgroupShuffleUp(vec4, uint delta);\n"
+ "int subgroupShuffleUp(int, uint delta);\n"
+ "ivec2 subgroupShuffleUp(ivec2, uint delta);\n"
+ "ivec3 subgroupShuffleUp(ivec3, uint delta);\n"
+ "ivec4 subgroupShuffleUp(ivec4, uint delta);\n"
+ "uint subgroupShuffleUp(uint, uint delta);\n"
+ "uvec2 subgroupShuffleUp(uvec2, uint delta);\n"
+ "uvec3 subgroupShuffleUp(uvec3, uint delta);\n"
+ "uvec4 subgroupShuffleUp(uvec4, uint delta);\n"
+ "bool subgroupShuffleUp(bool, uint delta);\n"
+ "bvec2 subgroupShuffleUp(bvec2, uint delta);\n"
+ "bvec3 subgroupShuffleUp(bvec3, uint delta);\n"
+ "bvec4 subgroupShuffleUp(bvec4, uint delta);\n"
+
+ "float subgroupShuffleDown(float, uint delta);\n"
+ "vec2 subgroupShuffleDown(vec2, uint delta);\n"
+ "vec3 subgroupShuffleDown(vec3, uint delta);\n"
+ "vec4 subgroupShuffleDown(vec4, uint delta);\n"
+ "int subgroupShuffleDown(int, uint delta);\n"
+ "ivec2 subgroupShuffleDown(ivec2, uint delta);\n"
+ "ivec3 subgroupShuffleDown(ivec3, uint delta);\n"
+ "ivec4 subgroupShuffleDown(ivec4, uint delta);\n"
+ "uint subgroupShuffleDown(uint, uint delta);\n"
+ "uvec2 subgroupShuffleDown(uvec2, uint delta);\n"
+ "uvec3 subgroupShuffleDown(uvec3, uint delta);\n"
+ "uvec4 subgroupShuffleDown(uvec4, uint delta);\n"
+ "bool subgroupShuffleDown(bool, uint delta);\n"
+ "bvec2 subgroupShuffleDown(bvec2, uint delta);\n"
+ "bvec3 subgroupShuffleDown(bvec3, uint delta);\n"
+ "bvec4 subgroupShuffleDown(bvec4, uint delta);\n"
+
+ "float subgroupAdd(float);\n"
+ "vec2 subgroupAdd(vec2);\n"
+ "vec3 subgroupAdd(vec3);\n"
+ "vec4 subgroupAdd(vec4);\n"
+ "int subgroupAdd(int);\n"
+ "ivec2 subgroupAdd(ivec2);\n"
+ "ivec3 subgroupAdd(ivec3);\n"
+ "ivec4 subgroupAdd(ivec4);\n"
+ "uint subgroupAdd(uint);\n"
+ "uvec2 subgroupAdd(uvec2);\n"
+ "uvec3 subgroupAdd(uvec3);\n"
+ "uvec4 subgroupAdd(uvec4);\n"
+
+ "float subgroupMul(float);\n"
+ "vec2 subgroupMul(vec2);\n"
+ "vec3 subgroupMul(vec3);\n"
+ "vec4 subgroupMul(vec4);\n"
+ "int subgroupMul(int);\n"
+ "ivec2 subgroupMul(ivec2);\n"
+ "ivec3 subgroupMul(ivec3);\n"
+ "ivec4 subgroupMul(ivec4);\n"
+ "uint subgroupMul(uint);\n"
+ "uvec2 subgroupMul(uvec2);\n"
+ "uvec3 subgroupMul(uvec3);\n"
+ "uvec4 subgroupMul(uvec4);\n"
+
+ "float subgroupMin(float);\n"
+ "vec2 subgroupMin(vec2);\n"
+ "vec3 subgroupMin(vec3);\n"
+ "vec4 subgroupMin(vec4);\n"
+ "int subgroupMin(int);\n"
+ "ivec2 subgroupMin(ivec2);\n"
+ "ivec3 subgroupMin(ivec3);\n"
+ "ivec4 subgroupMin(ivec4);\n"
+ "uint subgroupMin(uint);\n"
+ "uvec2 subgroupMin(uvec2);\n"
+ "uvec3 subgroupMin(uvec3);\n"
+ "uvec4 subgroupMin(uvec4);\n"
+
+ "float subgroupMax(float);\n"
+ "vec2 subgroupMax(vec2);\n"
+ "vec3 subgroupMax(vec3);\n"
+ "vec4 subgroupMax(vec4);\n"
+ "int subgroupMax(int);\n"
+ "ivec2 subgroupMax(ivec2);\n"
+ "ivec3 subgroupMax(ivec3);\n"
+ "ivec4 subgroupMax(ivec4);\n"
+ "uint subgroupMax(uint);\n"
+ "uvec2 subgroupMax(uvec2);\n"
+ "uvec3 subgroupMax(uvec3);\n"
+ "uvec4 subgroupMax(uvec4);\n"
+
+ "int subgroupAnd(int);\n"
+ "ivec2 subgroupAnd(ivec2);\n"
+ "ivec3 subgroupAnd(ivec3);\n"
+ "ivec4 subgroupAnd(ivec4);\n"
+ "uint subgroupAnd(uint);\n"
+ "uvec2 subgroupAnd(uvec2);\n"
+ "uvec3 subgroupAnd(uvec3);\n"
+ "uvec4 subgroupAnd(uvec4);\n"
+ "bool subgroupAnd(bool);\n"
+ "bvec2 subgroupAnd(bvec2);\n"
+ "bvec3 subgroupAnd(bvec3);\n"
+ "bvec4 subgroupAnd(bvec4);\n"
+
+ "int subgroupOr(int);\n"
+ "ivec2 subgroupOr(ivec2);\n"
+ "ivec3 subgroupOr(ivec3);\n"
+ "ivec4 subgroupOr(ivec4);\n"
+ "uint subgroupOr(uint);\n"
+ "uvec2 subgroupOr(uvec2);\n"
+ "uvec3 subgroupOr(uvec3);\n"
+ "uvec4 subgroupOr(uvec4);\n"
+ "bool subgroupOr(bool);\n"
+ "bvec2 subgroupOr(bvec2);\n"
+ "bvec3 subgroupOr(bvec3);\n"
+ "bvec4 subgroupOr(bvec4);\n"
+
+ "int subgroupXor(int);\n"
+ "ivec2 subgroupXor(ivec2);\n"
+ "ivec3 subgroupXor(ivec3);\n"
+ "ivec4 subgroupXor(ivec4);\n"
+ "uint subgroupXor(uint);\n"
+ "uvec2 subgroupXor(uvec2);\n"
+ "uvec3 subgroupXor(uvec3);\n"
+ "uvec4 subgroupXor(uvec4);\n"
+ "bool subgroupXor(bool);\n"
+ "bvec2 subgroupXor(bvec2);\n"
+ "bvec3 subgroupXor(bvec3);\n"
+ "bvec4 subgroupXor(bvec4);\n"
+
+ "float subgroupInclusiveAdd(float);\n"
+ "vec2 subgroupInclusiveAdd(vec2);\n"
+ "vec3 subgroupInclusiveAdd(vec3);\n"
+ "vec4 subgroupInclusiveAdd(vec4);\n"
+ "int subgroupInclusiveAdd(int);\n"
+ "ivec2 subgroupInclusiveAdd(ivec2);\n"
+ "ivec3 subgroupInclusiveAdd(ivec3);\n"
+ "ivec4 subgroupInclusiveAdd(ivec4);\n"
+ "uint subgroupInclusiveAdd(uint);\n"
+ "uvec2 subgroupInclusiveAdd(uvec2);\n"
+ "uvec3 subgroupInclusiveAdd(uvec3);\n"
+ "uvec4 subgroupInclusiveAdd(uvec4);\n"
+
+ "float subgroupInclusiveMul(float);\n"
+ "vec2 subgroupInclusiveMul(vec2);\n"
+ "vec3 subgroupInclusiveMul(vec3);\n"
+ "vec4 subgroupInclusiveMul(vec4);\n"
+ "int subgroupInclusiveMul(int);\n"
+ "ivec2 subgroupInclusiveMul(ivec2);\n"
+ "ivec3 subgroupInclusiveMul(ivec3);\n"
+ "ivec4 subgroupInclusiveMul(ivec4);\n"
+ "uint subgroupInclusiveMul(uint);\n"
+ "uvec2 subgroupInclusiveMul(uvec2);\n"
+ "uvec3 subgroupInclusiveMul(uvec3);\n"
+ "uvec4 subgroupInclusiveMul(uvec4);\n"
+
+ "float subgroupInclusiveMin(float);\n"
+ "vec2 subgroupInclusiveMin(vec2);\n"
+ "vec3 subgroupInclusiveMin(vec3);\n"
+ "vec4 subgroupInclusiveMin(vec4);\n"
+ "int subgroupInclusiveMin(int);\n"
+ "ivec2 subgroupInclusiveMin(ivec2);\n"
+ "ivec3 subgroupInclusiveMin(ivec3);\n"
+ "ivec4 subgroupInclusiveMin(ivec4);\n"
+ "uint subgroupInclusiveMin(uint);\n"
+ "uvec2 subgroupInclusiveMin(uvec2);\n"
+ "uvec3 subgroupInclusiveMin(uvec3);\n"
+ "uvec4 subgroupInclusiveMin(uvec4);\n"
+
+ "float subgroupInclusiveMax(float);\n"
+ "vec2 subgroupInclusiveMax(vec2);\n"
+ "vec3 subgroupInclusiveMax(vec3);\n"
+ "vec4 subgroupInclusiveMax(vec4);\n"
+ "int subgroupInclusiveMax(int);\n"
+ "ivec2 subgroupInclusiveMax(ivec2);\n"
+ "ivec3 subgroupInclusiveMax(ivec3);\n"
+ "ivec4 subgroupInclusiveMax(ivec4);\n"
+ "uint subgroupInclusiveMax(uint);\n"
+ "uvec2 subgroupInclusiveMax(uvec2);\n"
+ "uvec3 subgroupInclusiveMax(uvec3);\n"
+ "uvec4 subgroupInclusiveMax(uvec4);\n"
+
+ "int subgroupInclusiveAnd(int);\n"
+ "ivec2 subgroupInclusiveAnd(ivec2);\n"
+ "ivec3 subgroupInclusiveAnd(ivec3);\n"
+ "ivec4 subgroupInclusiveAnd(ivec4);\n"
+ "uint subgroupInclusiveAnd(uint);\n"
+ "uvec2 subgroupInclusiveAnd(uvec2);\n"
+ "uvec3 subgroupInclusiveAnd(uvec3);\n"
+ "uvec4 subgroupInclusiveAnd(uvec4);\n"
+ "bool subgroupInclusiveAnd(bool);\n"
+ "bvec2 subgroupInclusiveAnd(bvec2);\n"
+ "bvec3 subgroupInclusiveAnd(bvec3);\n"
+ "bvec4 subgroupInclusiveAnd(bvec4);\n"
+
+ "int subgroupInclusiveOr(int);\n"
+ "ivec2 subgroupInclusiveOr(ivec2);\n"
+ "ivec3 subgroupInclusiveOr(ivec3);\n"
+ "ivec4 subgroupInclusiveOr(ivec4);\n"
+ "uint subgroupInclusiveOr(uint);\n"
+ "uvec2 subgroupInclusiveOr(uvec2);\n"
+ "uvec3 subgroupInclusiveOr(uvec3);\n"
+ "uvec4 subgroupInclusiveOr(uvec4);\n"
+ "bool subgroupInclusiveOr(bool);\n"
+ "bvec2 subgroupInclusiveOr(bvec2);\n"
+ "bvec3 subgroupInclusiveOr(bvec3);\n"
+ "bvec4 subgroupInclusiveOr(bvec4);\n"
+
+ "int subgroupInclusiveXor(int);\n"
+ "ivec2 subgroupInclusiveXor(ivec2);\n"
+ "ivec3 subgroupInclusiveXor(ivec3);\n"
+ "ivec4 subgroupInclusiveXor(ivec4);\n"
+ "uint subgroupInclusiveXor(uint);\n"
+ "uvec2 subgroupInclusiveXor(uvec2);\n"
+ "uvec3 subgroupInclusiveXor(uvec3);\n"
+ "uvec4 subgroupInclusiveXor(uvec4);\n"
+ "bool subgroupInclusiveXor(bool);\n"
+ "bvec2 subgroupInclusiveXor(bvec2);\n"
+ "bvec3 subgroupInclusiveXor(bvec3);\n"
+ "bvec4 subgroupInclusiveXor(bvec4);\n"
+
+ "float subgroupExclusiveAdd(float);\n"
+ "vec2 subgroupExclusiveAdd(vec2);\n"
+ "vec3 subgroupExclusiveAdd(vec3);\n"
+ "vec4 subgroupExclusiveAdd(vec4);\n"
+ "int subgroupExclusiveAdd(int);\n"
+ "ivec2 subgroupExclusiveAdd(ivec2);\n"
+ "ivec3 subgroupExclusiveAdd(ivec3);\n"
+ "ivec4 subgroupExclusiveAdd(ivec4);\n"
+ "uint subgroupExclusiveAdd(uint);\n"
+ "uvec2 subgroupExclusiveAdd(uvec2);\n"
+ "uvec3 subgroupExclusiveAdd(uvec3);\n"
+ "uvec4 subgroupExclusiveAdd(uvec4);\n"
+
+ "float subgroupExclusiveMul(float);\n"
+ "vec2 subgroupExclusiveMul(vec2);\n"
+ "vec3 subgroupExclusiveMul(vec3);\n"
+ "vec4 subgroupExclusiveMul(vec4);\n"
+ "int subgroupExclusiveMul(int);\n"
+ "ivec2 subgroupExclusiveMul(ivec2);\n"
+ "ivec3 subgroupExclusiveMul(ivec3);\n"
+ "ivec4 subgroupExclusiveMul(ivec4);\n"
+ "uint subgroupExclusiveMul(uint);\n"
+ "uvec2 subgroupExclusiveMul(uvec2);\n"
+ "uvec3 subgroupExclusiveMul(uvec3);\n"
+ "uvec4 subgroupExclusiveMul(uvec4);\n"
+
+ "float subgroupExclusiveMin(float);\n"
+ "vec2 subgroupExclusiveMin(vec2);\n"
+ "vec3 subgroupExclusiveMin(vec3);\n"
+ "vec4 subgroupExclusiveMin(vec4);\n"
+ "int subgroupExclusiveMin(int);\n"
+ "ivec2 subgroupExclusiveMin(ivec2);\n"
+ "ivec3 subgroupExclusiveMin(ivec3);\n"
+ "ivec4 subgroupExclusiveMin(ivec4);\n"
+ "uint subgroupExclusiveMin(uint);\n"
+ "uvec2 subgroupExclusiveMin(uvec2);\n"
+ "uvec3 subgroupExclusiveMin(uvec3);\n"
+ "uvec4 subgroupExclusiveMin(uvec4);\n"
+
+ "float subgroupExclusiveMax(float);\n"
+ "vec2 subgroupExclusiveMax(vec2);\n"
+ "vec3 subgroupExclusiveMax(vec3);\n"
+ "vec4 subgroupExclusiveMax(vec4);\n"
+ "int subgroupExclusiveMax(int);\n"
+ "ivec2 subgroupExclusiveMax(ivec2);\n"
+ "ivec3 subgroupExclusiveMax(ivec3);\n"
+ "ivec4 subgroupExclusiveMax(ivec4);\n"
+ "uint subgroupExclusiveMax(uint);\n"
+ "uvec2 subgroupExclusiveMax(uvec2);\n"
+ "uvec3 subgroupExclusiveMax(uvec3);\n"
+ "uvec4 subgroupExclusiveMax(uvec4);\n"
+
+ "int subgroupExclusiveAnd(int);\n"
+ "ivec2 subgroupExclusiveAnd(ivec2);\n"
+ "ivec3 subgroupExclusiveAnd(ivec3);\n"
+ "ivec4 subgroupExclusiveAnd(ivec4);\n"
+ "uint subgroupExclusiveAnd(uint);\n"
+ "uvec2 subgroupExclusiveAnd(uvec2);\n"
+ "uvec3 subgroupExclusiveAnd(uvec3);\n"
+ "uvec4 subgroupExclusiveAnd(uvec4);\n"
+ "bool subgroupExclusiveAnd(bool);\n"
+ "bvec2 subgroupExclusiveAnd(bvec2);\n"
+ "bvec3 subgroupExclusiveAnd(bvec3);\n"
+ "bvec4 subgroupExclusiveAnd(bvec4);\n"
+
+ "int subgroupExclusiveOr(int);\n"
+ "ivec2 subgroupExclusiveOr(ivec2);\n"
+ "ivec3 subgroupExclusiveOr(ivec3);\n"
+ "ivec4 subgroupExclusiveOr(ivec4);\n"
+ "uint subgroupExclusiveOr(uint);\n"
+ "uvec2 subgroupExclusiveOr(uvec2);\n"
+ "uvec3 subgroupExclusiveOr(uvec3);\n"
+ "uvec4 subgroupExclusiveOr(uvec4);\n"
+ "bool subgroupExclusiveOr(bool);\n"
+ "bvec2 subgroupExclusiveOr(bvec2);\n"
+ "bvec3 subgroupExclusiveOr(bvec3);\n"
+ "bvec4 subgroupExclusiveOr(bvec4);\n"
+
+ "int subgroupExclusiveXor(int);\n"
+ "ivec2 subgroupExclusiveXor(ivec2);\n"
+ "ivec3 subgroupExclusiveXor(ivec3);\n"
+ "ivec4 subgroupExclusiveXor(ivec4);\n"
+ "uint subgroupExclusiveXor(uint);\n"
+ "uvec2 subgroupExclusiveXor(uvec2);\n"
+ "uvec3 subgroupExclusiveXor(uvec3);\n"
+ "uvec4 subgroupExclusiveXor(uvec4);\n"
+ "bool subgroupExclusiveXor(bool);\n"
+ "bvec2 subgroupExclusiveXor(bvec2);\n"
+ "bvec3 subgroupExclusiveXor(bvec3);\n"
+ "bvec4 subgroupExclusiveXor(bvec4);\n"
+
+ "float subgroupClusteredAdd(float, uint);\n"
+ "vec2 subgroupClusteredAdd(vec2, uint);\n"
+ "vec3 subgroupClusteredAdd(vec3, uint);\n"
+ "vec4 subgroupClusteredAdd(vec4, uint);\n"
+ "int subgroupClusteredAdd(int, uint);\n"
+ "ivec2 subgroupClusteredAdd(ivec2, uint);\n"
+ "ivec3 subgroupClusteredAdd(ivec3, uint);\n"
+ "ivec4 subgroupClusteredAdd(ivec4, uint);\n"
+ "uint subgroupClusteredAdd(uint, uint);\n"
+ "uvec2 subgroupClusteredAdd(uvec2, uint);\n"
+ "uvec3 subgroupClusteredAdd(uvec3, uint);\n"
+ "uvec4 subgroupClusteredAdd(uvec4, uint);\n"
+
+ "float subgroupClusteredMul(float, uint);\n"
+ "vec2 subgroupClusteredMul(vec2, uint);\n"
+ "vec3 subgroupClusteredMul(vec3, uint);\n"
+ "vec4 subgroupClusteredMul(vec4, uint);\n"
+ "int subgroupClusteredMul(int, uint);\n"
+ "ivec2 subgroupClusteredMul(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMul(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMul(ivec4, uint);\n"
+ "uint subgroupClusteredMul(uint, uint);\n"
+ "uvec2 subgroupClusteredMul(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMul(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMul(uvec4, uint);\n"
+
+ "float subgroupClusteredMin(float, uint);\n"
+ "vec2 subgroupClusteredMin(vec2, uint);\n"
+ "vec3 subgroupClusteredMin(vec3, uint);\n"
+ "vec4 subgroupClusteredMin(vec4, uint);\n"
+ "int subgroupClusteredMin(int, uint);\n"
+ "ivec2 subgroupClusteredMin(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMin(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMin(ivec4, uint);\n"
+ "uint subgroupClusteredMin(uint, uint);\n"
+ "uvec2 subgroupClusteredMin(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMin(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMin(uvec4, uint);\n"
+
+ "float subgroupClusteredMax(float, uint);\n"
+ "vec2 subgroupClusteredMax(vec2, uint);\n"
+ "vec3 subgroupClusteredMax(vec3, uint);\n"
+ "vec4 subgroupClusteredMax(vec4, uint);\n"
+ "int subgroupClusteredMax(int, uint);\n"
+ "ivec2 subgroupClusteredMax(ivec2, uint);\n"
+ "ivec3 subgroupClusteredMax(ivec3, uint);\n"
+ "ivec4 subgroupClusteredMax(ivec4, uint);\n"
+ "uint subgroupClusteredMax(uint, uint);\n"
+ "uvec2 subgroupClusteredMax(uvec2, uint);\n"
+ "uvec3 subgroupClusteredMax(uvec3, uint);\n"
+ "uvec4 subgroupClusteredMax(uvec4, uint);\n"
+
+ "int subgroupClusteredAnd(int, uint);\n"
+ "ivec2 subgroupClusteredAnd(ivec2, uint);\n"
+ "ivec3 subgroupClusteredAnd(ivec3, uint);\n"
+ "ivec4 subgroupClusteredAnd(ivec4, uint);\n"
+ "uint subgroupClusteredAnd(uint, uint);\n"
+ "uvec2 subgroupClusteredAnd(uvec2, uint);\n"
+ "uvec3 subgroupClusteredAnd(uvec3, uint);\n"
+ "uvec4 subgroupClusteredAnd(uvec4, uint);\n"
+ "bool subgroupClusteredAnd(bool, uint);\n"
+ "bvec2 subgroupClusteredAnd(bvec2, uint);\n"
+ "bvec3 subgroupClusteredAnd(bvec3, uint);\n"
+ "bvec4 subgroupClusteredAnd(bvec4, uint);\n"
+
+ "int subgroupClusteredOr(int, uint);\n"
+ "ivec2 subgroupClusteredOr(ivec2, uint);\n"
+ "ivec3 subgroupClusteredOr(ivec3, uint);\n"
+ "ivec4 subgroupClusteredOr(ivec4, uint);\n"
+ "uint subgroupClusteredOr(uint, uint);\n"
+ "uvec2 subgroupClusteredOr(uvec2, uint);\n"
+ "uvec3 subgroupClusteredOr(uvec3, uint);\n"
+ "uvec4 subgroupClusteredOr(uvec4, uint);\n"
+ "bool subgroupClusteredOr(bool, uint);\n"
+ "bvec2 subgroupClusteredOr(bvec2, uint);\n"
+ "bvec3 subgroupClusteredOr(bvec3, uint);\n"
+ "bvec4 subgroupClusteredOr(bvec4, uint);\n"
+
+ "int subgroupClusteredXor(int, uint);\n"
+ "ivec2 subgroupClusteredXor(ivec2, uint);\n"
+ "ivec3 subgroupClusteredXor(ivec3, uint);\n"
+ "ivec4 subgroupClusteredXor(ivec4, uint);\n"
+ "uint subgroupClusteredXor(uint, uint);\n"
+ "uvec2 subgroupClusteredXor(uvec2, uint);\n"
+ "uvec3 subgroupClusteredXor(uvec3, uint);\n"
+ "uvec4 subgroupClusteredXor(uvec4, uint);\n"
+ "bool subgroupClusteredXor(bool, uint);\n"
+ "bvec2 subgroupClusteredXor(bvec2, uint);\n"
+ "bvec3 subgroupClusteredXor(bvec3, uint);\n"
+ "bvec4 subgroupClusteredXor(bvec4, uint);\n"
+
+ "float subgroupQuadBroadcast(float, uint);\n"
+ "vec2 subgroupQuadBroadcast(vec2, uint);\n"
+ "vec3 subgroupQuadBroadcast(vec3, uint);\n"
+ "vec4 subgroupQuadBroadcast(vec4, uint);\n"
+ "int subgroupQuadBroadcast(int, uint);\n"
+ "ivec2 subgroupQuadBroadcast(ivec2, uint);\n"
+ "ivec3 subgroupQuadBroadcast(ivec3, uint);\n"
+ "ivec4 subgroupQuadBroadcast(ivec4, uint);\n"
+ "uint subgroupQuadBroadcast(uint, uint);\n"
+ "uvec2 subgroupQuadBroadcast(uvec2, uint);\n"
+ "uvec3 subgroupQuadBroadcast(uvec3, uint);\n"
+ "uvec4 subgroupQuadBroadcast(uvec4, uint);\n"
+ "bool subgroupQuadBroadcast(bool, uint);\n"
+ "bvec2 subgroupQuadBroadcast(bvec2, uint);\n"
+ "bvec3 subgroupQuadBroadcast(bvec3, uint);\n"
+ "bvec4 subgroupQuadBroadcast(bvec4, uint);\n"
+
+ "float subgroupQuadSwapHorizontal(float);\n"
+ "vec2 subgroupQuadSwapHorizontal(vec2);\n"
+ "vec3 subgroupQuadSwapHorizontal(vec3);\n"
+ "vec4 subgroupQuadSwapHorizontal(vec4);\n"
+ "int subgroupQuadSwapHorizontal(int);\n"
+ "ivec2 subgroupQuadSwapHorizontal(ivec2);\n"
+ "ivec3 subgroupQuadSwapHorizontal(ivec3);\n"
+ "ivec4 subgroupQuadSwapHorizontal(ivec4);\n"
+ "uint subgroupQuadSwapHorizontal(uint);\n"
+ "uvec2 subgroupQuadSwapHorizontal(uvec2);\n"
+ "uvec3 subgroupQuadSwapHorizontal(uvec3);\n"
+ "uvec4 subgroupQuadSwapHorizontal(uvec4);\n"
+ "bool subgroupQuadSwapHorizontal(bool);\n"
+ "bvec2 subgroupQuadSwapHorizontal(bvec2);\n"
+ "bvec3 subgroupQuadSwapHorizontal(bvec3);\n"
+ "bvec4 subgroupQuadSwapHorizontal(bvec4);\n"
+
+ "float subgroupQuadSwapVertical(float);\n"
+ "vec2 subgroupQuadSwapVertical(vec2);\n"
+ "vec3 subgroupQuadSwapVertical(vec3);\n"
+ "vec4 subgroupQuadSwapVertical(vec4);\n"
+ "int subgroupQuadSwapVertical(int);\n"
+ "ivec2 subgroupQuadSwapVertical(ivec2);\n"
+ "ivec3 subgroupQuadSwapVertical(ivec3);\n"
+ "ivec4 subgroupQuadSwapVertical(ivec4);\n"
+ "uint subgroupQuadSwapVertical(uint);\n"
+ "uvec2 subgroupQuadSwapVertical(uvec2);\n"
+ "uvec3 subgroupQuadSwapVertical(uvec3);\n"
+ "uvec4 subgroupQuadSwapVertical(uvec4);\n"
+ "bool subgroupQuadSwapVertical(bool);\n"
+ "bvec2 subgroupQuadSwapVertical(bvec2);\n"
+ "bvec3 subgroupQuadSwapVertical(bvec3);\n"
+ "bvec4 subgroupQuadSwapVertical(bvec4);\n"
+
+ "float subgroupQuadSwapDiagonal(float);\n"
+ "vec2 subgroupQuadSwapDiagonal(vec2);\n"
+ "vec3 subgroupQuadSwapDiagonal(vec3);\n"
+ "vec4 subgroupQuadSwapDiagonal(vec4);\n"
+ "int subgroupQuadSwapDiagonal(int);\n"
+ "ivec2 subgroupQuadSwapDiagonal(ivec2);\n"
+ "ivec3 subgroupQuadSwapDiagonal(ivec3);\n"
+ "ivec4 subgroupQuadSwapDiagonal(ivec4);\n"
+ "uint subgroupQuadSwapDiagonal(uint);\n"
+ "uvec2 subgroupQuadSwapDiagonal(uvec2);\n"
+ "uvec3 subgroupQuadSwapDiagonal(uvec3);\n"
+ "uvec4 subgroupQuadSwapDiagonal(uvec4);\n"
+ "bool subgroupQuadSwapDiagonal(bool);\n"
+ "bvec2 subgroupQuadSwapDiagonal(bvec2);\n"
+ "bvec3 subgroupQuadSwapDiagonal(bvec3);\n"
+ "bvec4 subgroupQuadSwapDiagonal(bvec4);\n"
+
+#ifdef NV_EXTENSIONS
+ "uvec4 subgroupPartitionNV(float);\n"
+ "uvec4 subgroupPartitionNV(vec2);\n"
+ "uvec4 subgroupPartitionNV(vec3);\n"
+ "uvec4 subgroupPartitionNV(vec4);\n"
+ "uvec4 subgroupPartitionNV(int);\n"
+ "uvec4 subgroupPartitionNV(ivec2);\n"
+ "uvec4 subgroupPartitionNV(ivec3);\n"
+ "uvec4 subgroupPartitionNV(ivec4);\n"
+ "uvec4 subgroupPartitionNV(uint);\n"
+ "uvec4 subgroupPartitionNV(uvec2);\n"
+ "uvec4 subgroupPartitionNV(uvec3);\n"
+ "uvec4 subgroupPartitionNV(uvec4);\n"
+ "uvec4 subgroupPartitionNV(bool);\n"
+ "uvec4 subgroupPartitionNV(bvec2);\n"
+ "uvec4 subgroupPartitionNV(bvec3);\n"
+ "uvec4 subgroupPartitionNV(bvec4);\n"
+
+ "float subgroupPartitionedAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedXorNV(bvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedInclusiveMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedInclusiveMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedInclusiveMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedInclusiveMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedInclusiveMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedInclusiveXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedInclusiveXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedInclusiveXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedInclusiveXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedInclusiveXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedInclusiveXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedInclusiveXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedInclusiveXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedInclusiveXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedInclusiveXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedInclusiveXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedInclusiveXorNV(bvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveAddNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveAddNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveAddNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveAddNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveAddNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveAddNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveAddNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveAddNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveAddNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveAddNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveAddNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveAddNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMulNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMulNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMulNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMulNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMulNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMulNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMulNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMulNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMulNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMulNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMulNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMulNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMinNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMinNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMinNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMinNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMinNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMinNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMinNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMinNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMinNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMinNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMinNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMinNV(uvec4, uvec4 ballot);\n"
+
+ "float subgroupPartitionedExclusiveMaxNV(float, uvec4 ballot);\n"
+ "vec2 subgroupPartitionedExclusiveMaxNV(vec2, uvec4 ballot);\n"
+ "vec3 subgroupPartitionedExclusiveMaxNV(vec3, uvec4 ballot);\n"
+ "vec4 subgroupPartitionedExclusiveMaxNV(vec4, uvec4 ballot);\n"
+ "int subgroupPartitionedExclusiveMaxNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveMaxNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveMaxNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveMaxNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveMaxNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveMaxNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveMaxNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveMaxNV(uvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveAndNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveAndNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveAndNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveAndNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveAndNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveAndNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveAndNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveAndNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveAndNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveAndNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveAndNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveAndNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveOrNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveOrNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveOrNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveOrNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveOrNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveOrNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveOrNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveOrNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveOrNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveOrNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveOrNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveOrNV(bvec4, uvec4 ballot);\n"
+
+ "int subgroupPartitionedExclusiveXorNV(int, uvec4 ballot);\n"
+ "ivec2 subgroupPartitionedExclusiveXorNV(ivec2, uvec4 ballot);\n"
+ "ivec3 subgroupPartitionedExclusiveXorNV(ivec3, uvec4 ballot);\n"
+ "ivec4 subgroupPartitionedExclusiveXorNV(ivec4, uvec4 ballot);\n"
+ "uint subgroupPartitionedExclusiveXorNV(uint, uvec4 ballot);\n"
+ "uvec2 subgroupPartitionedExclusiveXorNV(uvec2, uvec4 ballot);\n"
+ "uvec3 subgroupPartitionedExclusiveXorNV(uvec3, uvec4 ballot);\n"
+ "uvec4 subgroupPartitionedExclusiveXorNV(uvec4, uvec4 ballot);\n"
+ "bool subgroupPartitionedExclusiveXorNV(bool, uvec4 ballot);\n"
+ "bvec2 subgroupPartitionedExclusiveXorNV(bvec2, uvec4 ballot);\n"
+ "bvec3 subgroupPartitionedExclusiveXorNV(bvec3, uvec4 ballot);\n"
+ "bvec4 subgroupPartitionedExclusiveXorNV(bvec4, uvec4 ballot);\n"
+#endif
+
+ "\n");
+
+ if (profile != EEsProfile && version >= 400) {
+ commonBuiltins.append(
+ "bool subgroupAllEqual(double);\n"
+ "bool subgroupAllEqual(dvec2);\n"
+ "bool subgroupAllEqual(dvec3);\n"
+ "bool subgroupAllEqual(dvec4);\n"
+
+ "double subgroupBroadcast(double, uint);\n"
+ "dvec2 subgroupBroadcast(dvec2, uint);\n"
+ "dvec3 subgroupBroadcast(dvec3, uint);\n"
+ "dvec4 subgroupBroadcast(dvec4, uint);\n"
+
+ "double subgroupBroadcastFirst(double);\n"
+ "dvec2 subgroupBroadcastFirst(dvec2);\n"
+ "dvec3 subgroupBroadcastFirst(dvec3);\n"
+ "dvec4 subgroupBroadcastFirst(dvec4);\n"
+
+ "double subgroupShuffle(double, uint);\n"
+ "dvec2 subgroupShuffle(dvec2, uint);\n"
+ "dvec3 subgroupShuffle(dvec3, uint);\n"
+ "dvec4 subgroupShuffle(dvec4, uint);\n"
+
+ "double subgroupShuffleXor(double, uint);\n"
+ "dvec2 subgroupShuffleXor(dvec2, uint);\n"
+ "dvec3 subgroupShuffleXor(dvec3, uint);\n"
+ "dvec4 subgroupShuffleXor(dvec4, uint);\n"
+
+ "double subgroupShuffleUp(double, uint delta);\n"
+ "dvec2 subgroupShuffleUp(dvec2, uint delta);\n"
+ "dvec3 subgroupShuffleUp(dvec3, uint delta);\n"
+ "dvec4 subgroupShuffleUp(dvec4, uint delta);\n"
+
+ "double subgroupShuffleDown(double, uint delta);\n"
+ "dvec2 subgroupShuffleDown(dvec2, uint delta);\n"
+ "dvec3 subgroupShuffleDown(dvec3, uint delta);\n"
+ "dvec4 subgroupShuffleDown(dvec4, uint delta);\n"
+
+ "double subgroupAdd(double);\n"
+ "dvec2 subgroupAdd(dvec2);\n"
+ "dvec3 subgroupAdd(dvec3);\n"
+ "dvec4 subgroupAdd(dvec4);\n"
+
+ "double subgroupMul(double);\n"
+ "dvec2 subgroupMul(dvec2);\n"
+ "dvec3 subgroupMul(dvec3);\n"
+ "dvec4 subgroupMul(dvec4);\n"
+
+ "double subgroupMin(double);\n"
+ "dvec2 subgroupMin(dvec2);\n"
+ "dvec3 subgroupMin(dvec3);\n"
+ "dvec4 subgroupMin(dvec4);\n"
+
+ "double subgroupMax(double);\n"
+ "dvec2 subgroupMax(dvec2);\n"
+ "dvec3 subgroupMax(dvec3);\n"
+ "dvec4 subgroupMax(dvec4);\n"
+
+ "double subgroupInclusiveAdd(double);\n"
+ "dvec2 subgroupInclusiveAdd(dvec2);\n"
+ "dvec3 subgroupInclusiveAdd(dvec3);\n"
+ "dvec4 subgroupInclusiveAdd(dvec4);\n"
+
+ "double subgroupInclusiveMul(double);\n"
+ "dvec2 subgroupInclusiveMul(dvec2);\n"
+ "dvec3 subgroupInclusiveMul(dvec3);\n"
+ "dvec4 subgroupInclusiveMul(dvec4);\n"
+
+ "double subgroupInclusiveMin(double);\n"
+ "dvec2 subgroupInclusiveMin(dvec2);\n"
+ "dvec3 subgroupInclusiveMin(dvec3);\n"
+ "dvec4 subgroupInclusiveMin(dvec4);\n"
+
+ "double subgroupInclusiveMax(double);\n"
+ "dvec2 subgroupInclusiveMax(dvec2);\n"
+ "dvec3 subgroupInclusiveMax(dvec3);\n"
+ "dvec4 subgroupInclusiveMax(dvec4);\n"
+
+ "double subgroupExclusiveAdd(double);\n"
+ "dvec2 subgroupExclusiveAdd(dvec2);\n"
+ "dvec3 subgroupExclusiveAdd(dvec3);\n"
+ "dvec4 subgroupExclusiveAdd(dvec4);\n"
+
+ "double subgroupExclusiveMul(double);\n"
+ "dvec2 subgroupExclusiveMul(dvec2);\n"
+ "dvec3 subgroupExclusiveMul(dvec3);\n"
+ "dvec4 subgroupExclusiveMul(dvec4);\n"
+
+ "double subgroupExclusiveMin(double);\n"
+ "dvec2 subgroupExclusiveMin(dvec2);\n"
+ "dvec3 subgroupExclusiveMin(dvec3);\n"
+ "dvec4 subgroupExclusiveMin(dvec4);\n"
+
+ "double subgroupExclusiveMax(double);\n"
+ "dvec2 subgroupExclusiveMax(dvec2);\n"
+ "dvec3 subgroupExclusiveMax(dvec3);\n"
+ "dvec4 subgroupExclusiveMax(dvec4);\n"
+
+ "double subgroupClusteredAdd(double, uint);\n"
+ "dvec2 subgroupClusteredAdd(dvec2, uint);\n"
+ "dvec3 subgroupClusteredAdd(dvec3, uint);\n"
+ "dvec4 subgroupClusteredAdd(dvec4, uint);\n"
+
+ "double subgroupClusteredMul(double, uint);\n"
+ "dvec2 subgroupClusteredMul(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMul(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMul(dvec4, uint);\n"
+
+ "double subgroupClusteredMin(double, uint);\n"
+ "dvec2 subgroupClusteredMin(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMin(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMin(dvec4, uint);\n"
+
+ "double subgroupClusteredMax(double, uint);\n"
+ "dvec2 subgroupClusteredMax(dvec2, uint);\n"
+ "dvec3 subgroupClusteredMax(dvec3, uint);\n"
+ "dvec4 subgroupClusteredMax(dvec4, uint);\n"
+
+ "double subgroupQuadBroadcast(double, uint);\n"
+ "dvec2 subgroupQuadBroadcast(dvec2, uint);\n"
+ "dvec3 subgroupQuadBroadcast(dvec3, uint);\n"
+ "dvec4 subgroupQuadBroadcast(dvec4, uint);\n"
+
+ "double subgroupQuadSwapHorizontal(double);\n"
+ "dvec2 subgroupQuadSwapHorizontal(dvec2);\n"
+ "dvec3 subgroupQuadSwapHorizontal(dvec3);\n"
+ "dvec4 subgroupQuadSwapHorizontal(dvec4);\n"
+
+ "double subgroupQuadSwapVertical(double);\n"
+ "dvec2 subgroupQuadSwapVertical(dvec2);\n"
+ "dvec3 subgroupQuadSwapVertical(dvec3);\n"
+ "dvec4 subgroupQuadSwapVertical(dvec4);\n"
+
+ "double subgroupQuadSwapDiagonal(double);\n"
+ "dvec2 subgroupQuadSwapDiagonal(dvec2);\n"
+ "dvec3 subgroupQuadSwapDiagonal(dvec3);\n"
+ "dvec4 subgroupQuadSwapDiagonal(dvec4);\n"
+
+
+#ifdef NV_EXTENSIONS
+ "uvec4 subgroupPartitionNV(double);\n"
+ "uvec4 subgroupPartitionNV(dvec2);\n"
+ "uvec4 subgroupPartitionNV(dvec3);\n"
+ "uvec4 subgroupPartitionNV(dvec4);\n"
+
+ "double subgroupPartitionedAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedMaxNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedInclusiveMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedInclusiveMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedInclusiveMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedInclusiveMaxNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveAddNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveAddNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveAddNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveAddNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMulNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMulNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMulNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMulNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMinNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMinNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMinNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMinNV(dvec4, uvec4 ballot);\n"
+
+ "double subgroupPartitionedExclusiveMaxNV(double, uvec4 ballot);\n"
+ "dvec2 subgroupPartitionedExclusiveMaxNV(dvec2, uvec4 ballot);\n"
+ "dvec3 subgroupPartitionedExclusiveMaxNV(dvec3, uvec4 ballot);\n"
+ "dvec4 subgroupPartitionedExclusiveMaxNV(dvec4, uvec4 ballot);\n"
+#endif
+
+ "\n");
+ }
+
+ stageBuiltins[EShLangCompute].append(
+ "void subgroupMemoryBarrierShared();"
+
+ "\n"
+ );
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV].append(
+ "void subgroupMemoryBarrierShared();"
+ "\n"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void subgroupMemoryBarrierShared();"
+ "\n"
+ );
+#endif
+ }
+
+ if (profile != EEsProfile && version >= 460) {
+ commonBuiltins.append(
+ "bool anyInvocation(bool);"
+ "bool allInvocations(bool);"
+ "bool allInvocationsEqual(bool);"
+
+ "\n");
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float minInvocationsAMD(float);"
+ "vec2 minInvocationsAMD(vec2);"
+ "vec3 minInvocationsAMD(vec3);"
+ "vec4 minInvocationsAMD(vec4);"
+
+ "int minInvocationsAMD(int);"
+ "ivec2 minInvocationsAMD(ivec2);"
+ "ivec3 minInvocationsAMD(ivec3);"
+ "ivec4 minInvocationsAMD(ivec4);"
+
+ "uint minInvocationsAMD(uint);"
+ "uvec2 minInvocationsAMD(uvec2);"
+ "uvec3 minInvocationsAMD(uvec3);"
+ "uvec4 minInvocationsAMD(uvec4);"
+
+ "double minInvocationsAMD(double);"
+ "dvec2 minInvocationsAMD(dvec2);"
+ "dvec3 minInvocationsAMD(dvec3);"
+ "dvec4 minInvocationsAMD(dvec4);"
+
+ "int64_t minInvocationsAMD(int64_t);"
+ "i64vec2 minInvocationsAMD(i64vec2);"
+ "i64vec3 minInvocationsAMD(i64vec3);"
+ "i64vec4 minInvocationsAMD(i64vec4);"
+
+ "uint64_t minInvocationsAMD(uint64_t);"
+ "u64vec2 minInvocationsAMD(u64vec2);"
+ "u64vec3 minInvocationsAMD(u64vec3);"
+ "u64vec4 minInvocationsAMD(u64vec4);"
+
+ "float16_t minInvocationsAMD(float16_t);"
+ "f16vec2 minInvocationsAMD(f16vec2);"
+ "f16vec3 minInvocationsAMD(f16vec3);"
+ "f16vec4 minInvocationsAMD(f16vec4);"
+
+ "int16_t minInvocationsAMD(int16_t);"
+ "i16vec2 minInvocationsAMD(i16vec2);"
+ "i16vec3 minInvocationsAMD(i16vec3);"
+ "i16vec4 minInvocationsAMD(i16vec4);"
+
+ "uint16_t minInvocationsAMD(uint16_t);"
+ "u16vec2 minInvocationsAMD(u16vec2);"
+ "u16vec3 minInvocationsAMD(u16vec3);"
+ "u16vec4 minInvocationsAMD(u16vec4);"
+
+ "float minInvocationsInclusiveScanAMD(float);"
+ "vec2 minInvocationsInclusiveScanAMD(vec2);"
+ "vec3 minInvocationsInclusiveScanAMD(vec3);"
+ "vec4 minInvocationsInclusiveScanAMD(vec4);"
+
+ "int minInvocationsInclusiveScanAMD(int);"
+ "ivec2 minInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 minInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 minInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint minInvocationsInclusiveScanAMD(uint);"
+ "uvec2 minInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 minInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 minInvocationsInclusiveScanAMD(uvec4);"
+
+ "double minInvocationsInclusiveScanAMD(double);"
+ "dvec2 minInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 minInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 minInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t minInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t minInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t minInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t minInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 minInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 minInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 minInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t minInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 minInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 minInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 minInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float minInvocationsExclusiveScanAMD(float);"
+ "vec2 minInvocationsExclusiveScanAMD(vec2);"
+ "vec3 minInvocationsExclusiveScanAMD(vec3);"
+ "vec4 minInvocationsExclusiveScanAMD(vec4);"
+
+ "int minInvocationsExclusiveScanAMD(int);"
+ "ivec2 minInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 minInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 minInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint minInvocationsExclusiveScanAMD(uint);"
+ "uvec2 minInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 minInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 minInvocationsExclusiveScanAMD(uvec4);"
+
+ "double minInvocationsExclusiveScanAMD(double);"
+ "dvec2 minInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 minInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 minInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t minInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t minInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t minInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t minInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 minInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 minInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 minInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t minInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 minInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 minInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 minInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float maxInvocationsAMD(float);"
+ "vec2 maxInvocationsAMD(vec2);"
+ "vec3 maxInvocationsAMD(vec3);"
+ "vec4 maxInvocationsAMD(vec4);"
+
+ "int maxInvocationsAMD(int);"
+ "ivec2 maxInvocationsAMD(ivec2);"
+ "ivec3 maxInvocationsAMD(ivec3);"
+ "ivec4 maxInvocationsAMD(ivec4);"
+
+ "uint maxInvocationsAMD(uint);"
+ "uvec2 maxInvocationsAMD(uvec2);"
+ "uvec3 maxInvocationsAMD(uvec3);"
+ "uvec4 maxInvocationsAMD(uvec4);"
+
+ "double maxInvocationsAMD(double);"
+ "dvec2 maxInvocationsAMD(dvec2);"
+ "dvec3 maxInvocationsAMD(dvec3);"
+ "dvec4 maxInvocationsAMD(dvec4);"
+
+ "int64_t maxInvocationsAMD(int64_t);"
+ "i64vec2 maxInvocationsAMD(i64vec2);"
+ "i64vec3 maxInvocationsAMD(i64vec3);"
+ "i64vec4 maxInvocationsAMD(i64vec4);"
+
+ "uint64_t maxInvocationsAMD(uint64_t);"
+ "u64vec2 maxInvocationsAMD(u64vec2);"
+ "u64vec3 maxInvocationsAMD(u64vec3);"
+ "u64vec4 maxInvocationsAMD(u64vec4);"
+
+ "float16_t maxInvocationsAMD(float16_t);"
+ "f16vec2 maxInvocationsAMD(f16vec2);"
+ "f16vec3 maxInvocationsAMD(f16vec3);"
+ "f16vec4 maxInvocationsAMD(f16vec4);"
+
+ "int16_t maxInvocationsAMD(int16_t);"
+ "i16vec2 maxInvocationsAMD(i16vec2);"
+ "i16vec3 maxInvocationsAMD(i16vec3);"
+ "i16vec4 maxInvocationsAMD(i16vec4);"
+
+ "uint16_t maxInvocationsAMD(uint16_t);"
+ "u16vec2 maxInvocationsAMD(u16vec2);"
+ "u16vec3 maxInvocationsAMD(u16vec3);"
+ "u16vec4 maxInvocationsAMD(u16vec4);"
+
+ "float maxInvocationsInclusiveScanAMD(float);"
+ "vec2 maxInvocationsInclusiveScanAMD(vec2);"
+ "vec3 maxInvocationsInclusiveScanAMD(vec3);"
+ "vec4 maxInvocationsInclusiveScanAMD(vec4);"
+
+ "int maxInvocationsInclusiveScanAMD(int);"
+ "ivec2 maxInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 maxInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 maxInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint maxInvocationsInclusiveScanAMD(uint);"
+ "uvec2 maxInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 maxInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 maxInvocationsInclusiveScanAMD(uvec4);"
+
+ "double maxInvocationsInclusiveScanAMD(double);"
+ "dvec2 maxInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 maxInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 maxInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t maxInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t maxInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t maxInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 maxInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 maxInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 maxInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t maxInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 maxInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 maxInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 maxInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float maxInvocationsExclusiveScanAMD(float);"
+ "vec2 maxInvocationsExclusiveScanAMD(vec2);"
+ "vec3 maxInvocationsExclusiveScanAMD(vec3);"
+ "vec4 maxInvocationsExclusiveScanAMD(vec4);"
+
+ "int maxInvocationsExclusiveScanAMD(int);"
+ "ivec2 maxInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 maxInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 maxInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint maxInvocationsExclusiveScanAMD(uint);"
+ "uvec2 maxInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 maxInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 maxInvocationsExclusiveScanAMD(uvec4);"
+
+ "double maxInvocationsExclusiveScanAMD(double);"
+ "dvec2 maxInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 maxInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 maxInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t maxInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t maxInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t maxInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 maxInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 maxInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 maxInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t maxInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 maxInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 maxInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 maxInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float addInvocationsAMD(float);"
+ "vec2 addInvocationsAMD(vec2);"
+ "vec3 addInvocationsAMD(vec3);"
+ "vec4 addInvocationsAMD(vec4);"
+
+ "int addInvocationsAMD(int);"
+ "ivec2 addInvocationsAMD(ivec2);"
+ "ivec3 addInvocationsAMD(ivec3);"
+ "ivec4 addInvocationsAMD(ivec4);"
+
+ "uint addInvocationsAMD(uint);"
+ "uvec2 addInvocationsAMD(uvec2);"
+ "uvec3 addInvocationsAMD(uvec3);"
+ "uvec4 addInvocationsAMD(uvec4);"
+
+ "double addInvocationsAMD(double);"
+ "dvec2 addInvocationsAMD(dvec2);"
+ "dvec3 addInvocationsAMD(dvec3);"
+ "dvec4 addInvocationsAMD(dvec4);"
+
+ "int64_t addInvocationsAMD(int64_t);"
+ "i64vec2 addInvocationsAMD(i64vec2);"
+ "i64vec3 addInvocationsAMD(i64vec3);"
+ "i64vec4 addInvocationsAMD(i64vec4);"
+
+ "uint64_t addInvocationsAMD(uint64_t);"
+ "u64vec2 addInvocationsAMD(u64vec2);"
+ "u64vec3 addInvocationsAMD(u64vec3);"
+ "u64vec4 addInvocationsAMD(u64vec4);"
+
+ "float16_t addInvocationsAMD(float16_t);"
+ "f16vec2 addInvocationsAMD(f16vec2);"
+ "f16vec3 addInvocationsAMD(f16vec3);"
+ "f16vec4 addInvocationsAMD(f16vec4);"
+
+ "int16_t addInvocationsAMD(int16_t);"
+ "i16vec2 addInvocationsAMD(i16vec2);"
+ "i16vec3 addInvocationsAMD(i16vec3);"
+ "i16vec4 addInvocationsAMD(i16vec4);"
+
+ "uint16_t addInvocationsAMD(uint16_t);"
+ "u16vec2 addInvocationsAMD(u16vec2);"
+ "u16vec3 addInvocationsAMD(u16vec3);"
+ "u16vec4 addInvocationsAMD(u16vec4);"
+
+ "float addInvocationsInclusiveScanAMD(float);"
+ "vec2 addInvocationsInclusiveScanAMD(vec2);"
+ "vec3 addInvocationsInclusiveScanAMD(vec3);"
+ "vec4 addInvocationsInclusiveScanAMD(vec4);"
+
+ "int addInvocationsInclusiveScanAMD(int);"
+ "ivec2 addInvocationsInclusiveScanAMD(ivec2);"
+ "ivec3 addInvocationsInclusiveScanAMD(ivec3);"
+ "ivec4 addInvocationsInclusiveScanAMD(ivec4);"
+
+ "uint addInvocationsInclusiveScanAMD(uint);"
+ "uvec2 addInvocationsInclusiveScanAMD(uvec2);"
+ "uvec3 addInvocationsInclusiveScanAMD(uvec3);"
+ "uvec4 addInvocationsInclusiveScanAMD(uvec4);"
+
+ "double addInvocationsInclusiveScanAMD(double);"
+ "dvec2 addInvocationsInclusiveScanAMD(dvec2);"
+ "dvec3 addInvocationsInclusiveScanAMD(dvec3);"
+ "dvec4 addInvocationsInclusiveScanAMD(dvec4);"
+
+ "int64_t addInvocationsInclusiveScanAMD(int64_t);"
+ "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);"
+ "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);"
+ "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);"
+
+ "uint64_t addInvocationsInclusiveScanAMD(uint64_t);"
+ "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);"
+ "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);"
+ "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);"
+
+ "float16_t addInvocationsInclusiveScanAMD(float16_t);"
+ "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);"
+ "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);"
+ "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);"
+
+ "int16_t addInvocationsInclusiveScanAMD(int16_t);"
+ "i16vec2 addInvocationsInclusiveScanAMD(i16vec2);"
+ "i16vec3 addInvocationsInclusiveScanAMD(i16vec3);"
+ "i16vec4 addInvocationsInclusiveScanAMD(i16vec4);"
+
+ "uint16_t addInvocationsInclusiveScanAMD(uint16_t);"
+ "u16vec2 addInvocationsInclusiveScanAMD(u16vec2);"
+ "u16vec3 addInvocationsInclusiveScanAMD(u16vec3);"
+ "u16vec4 addInvocationsInclusiveScanAMD(u16vec4);"
+
+ "float addInvocationsExclusiveScanAMD(float);"
+ "vec2 addInvocationsExclusiveScanAMD(vec2);"
+ "vec3 addInvocationsExclusiveScanAMD(vec3);"
+ "vec4 addInvocationsExclusiveScanAMD(vec4);"
+
+ "int addInvocationsExclusiveScanAMD(int);"
+ "ivec2 addInvocationsExclusiveScanAMD(ivec2);"
+ "ivec3 addInvocationsExclusiveScanAMD(ivec3);"
+ "ivec4 addInvocationsExclusiveScanAMD(ivec4);"
+
+ "uint addInvocationsExclusiveScanAMD(uint);"
+ "uvec2 addInvocationsExclusiveScanAMD(uvec2);"
+ "uvec3 addInvocationsExclusiveScanAMD(uvec3);"
+ "uvec4 addInvocationsExclusiveScanAMD(uvec4);"
+
+ "double addInvocationsExclusiveScanAMD(double);"
+ "dvec2 addInvocationsExclusiveScanAMD(dvec2);"
+ "dvec3 addInvocationsExclusiveScanAMD(dvec3);"
+ "dvec4 addInvocationsExclusiveScanAMD(dvec4);"
+
+ "int64_t addInvocationsExclusiveScanAMD(int64_t);"
+ "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);"
+ "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);"
+ "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);"
+
+ "uint64_t addInvocationsExclusiveScanAMD(uint64_t);"
+ "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);"
+ "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);"
+ "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);"
+
+ "float16_t addInvocationsExclusiveScanAMD(float16_t);"
+ "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);"
+ "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);"
+ "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);"
+
+ "int16_t addInvocationsExclusiveScanAMD(int16_t);"
+ "i16vec2 addInvocationsExclusiveScanAMD(i16vec2);"
+ "i16vec3 addInvocationsExclusiveScanAMD(i16vec3);"
+ "i16vec4 addInvocationsExclusiveScanAMD(i16vec4);"
+
+ "uint16_t addInvocationsExclusiveScanAMD(uint16_t);"
+ "u16vec2 addInvocationsExclusiveScanAMD(u16vec2);"
+ "u16vec3 addInvocationsExclusiveScanAMD(u16vec3);"
+ "u16vec4 addInvocationsExclusiveScanAMD(u16vec4);"
+
+ "float minInvocationsNonUniformAMD(float);"
+ "vec2 minInvocationsNonUniformAMD(vec2);"
+ "vec3 minInvocationsNonUniformAMD(vec3);"
+ "vec4 minInvocationsNonUniformAMD(vec4);"
+
+ "int minInvocationsNonUniformAMD(int);"
+ "ivec2 minInvocationsNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsNonUniformAMD(ivec4);"
+
+ "uint minInvocationsNonUniformAMD(uint);"
+ "uvec2 minInvocationsNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsNonUniformAMD(uvec4);"
+
+ "double minInvocationsNonUniformAMD(double);"
+ "dvec2 minInvocationsNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsNonUniformAMD(u16vec4);"
+
+ "float minInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int minInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint minInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double minInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float minInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int minInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint minInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double minInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t minInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 minInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 minInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 minInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t minInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 minInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 minInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 minInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsNonUniformAMD(float);"
+ "vec2 maxInvocationsNonUniformAMD(vec2);"
+ "vec3 maxInvocationsNonUniformAMD(vec3);"
+ "vec4 maxInvocationsNonUniformAMD(vec4);"
+
+ "int maxInvocationsNonUniformAMD(int);"
+ "ivec2 maxInvocationsNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsNonUniformAMD(uint);"
+ "uvec2 maxInvocationsNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsNonUniformAMD(uvec4);"
+
+ "double maxInvocationsNonUniformAMD(double);"
+ "dvec2 maxInvocationsNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int maxInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double maxInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float maxInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int maxInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint maxInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double maxInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t maxInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 maxInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 maxInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 maxInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t maxInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 maxInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 maxInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 maxInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float addInvocationsNonUniformAMD(float);"
+ "vec2 addInvocationsNonUniformAMD(vec2);"
+ "vec3 addInvocationsNonUniformAMD(vec3);"
+ "vec4 addInvocationsNonUniformAMD(vec4);"
+
+ "int addInvocationsNonUniformAMD(int);"
+ "ivec2 addInvocationsNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsNonUniformAMD(ivec4);"
+
+ "uint addInvocationsNonUniformAMD(uint);"
+ "uvec2 addInvocationsNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsNonUniformAMD(uvec4);"
+
+ "double addInvocationsNonUniformAMD(double);"
+ "dvec2 addInvocationsNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsNonUniformAMD(u16vec4);"
+
+ "float addInvocationsInclusiveScanNonUniformAMD(float);"
+ "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);"
+ "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);"
+ "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);"
+
+ "int addInvocationsInclusiveScanNonUniformAMD(int);"
+ "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);"
+
+ "uint addInvocationsInclusiveScanNonUniformAMD(uint);"
+ "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);"
+
+ "double addInvocationsInclusiveScanNonUniformAMD(double);"
+ "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsInclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsInclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsInclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsInclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsInclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsInclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsInclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsInclusiveScanNonUniformAMD(u16vec4);"
+
+ "float addInvocationsExclusiveScanNonUniformAMD(float);"
+ "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);"
+ "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);"
+ "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);"
+
+ "int addInvocationsExclusiveScanNonUniformAMD(int);"
+ "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);"
+ "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);"
+ "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);"
+
+ "uint addInvocationsExclusiveScanNonUniformAMD(uint);"
+ "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);"
+ "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);"
+ "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);"
+
+ "double addInvocationsExclusiveScanNonUniformAMD(double);"
+ "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);"
+ "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);"
+ "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);"
+
+ "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);"
+ "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);"
+ "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);"
+ "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);"
+
+ "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);"
+ "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);"
+ "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);"
+ "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);"
+
+ "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);"
+ "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);"
+ "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);"
+ "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);"
+
+ "int16_t addInvocationsExclusiveScanNonUniformAMD(int16_t);"
+ "i16vec2 addInvocationsExclusiveScanNonUniformAMD(i16vec2);"
+ "i16vec3 addInvocationsExclusiveScanNonUniformAMD(i16vec3);"
+ "i16vec4 addInvocationsExclusiveScanNonUniformAMD(i16vec4);"
+
+ "uint16_t addInvocationsExclusiveScanNonUniformAMD(uint16_t);"
+ "u16vec2 addInvocationsExclusiveScanNonUniformAMD(u16vec2);"
+ "u16vec3 addInvocationsExclusiveScanNonUniformAMD(u16vec3);"
+ "u16vec4 addInvocationsExclusiveScanNonUniformAMD(u16vec4);"
+
+ "float swizzleInvocationsAMD(float, uvec4);"
+ "vec2 swizzleInvocationsAMD(vec2, uvec4);"
+ "vec3 swizzleInvocationsAMD(vec3, uvec4);"
+ "vec4 swizzleInvocationsAMD(vec4, uvec4);"
+
+ "int swizzleInvocationsAMD(int, uvec4);"
+ "ivec2 swizzleInvocationsAMD(ivec2, uvec4);"
+ "ivec3 swizzleInvocationsAMD(ivec3, uvec4);"
+ "ivec4 swizzleInvocationsAMD(ivec4, uvec4);"
+
+ "uint swizzleInvocationsAMD(uint, uvec4);"
+ "uvec2 swizzleInvocationsAMD(uvec2, uvec4);"
+ "uvec3 swizzleInvocationsAMD(uvec3, uvec4);"
+ "uvec4 swizzleInvocationsAMD(uvec4, uvec4);"
+
+ "float swizzleInvocationsMaskedAMD(float, uvec3);"
+ "vec2 swizzleInvocationsMaskedAMD(vec2, uvec3);"
+ "vec3 swizzleInvocationsMaskedAMD(vec3, uvec3);"
+ "vec4 swizzleInvocationsMaskedAMD(vec4, uvec3);"
+
+ "int swizzleInvocationsMaskedAMD(int, uvec3);"
+ "ivec2 swizzleInvocationsMaskedAMD(ivec2, uvec3);"
+ "ivec3 swizzleInvocationsMaskedAMD(ivec3, uvec3);"
+ "ivec4 swizzleInvocationsMaskedAMD(ivec4, uvec3);"
+
+ "uint swizzleInvocationsMaskedAMD(uint, uvec3);"
+ "uvec2 swizzleInvocationsMaskedAMD(uvec2, uvec3);"
+ "uvec3 swizzleInvocationsMaskedAMD(uvec3, uvec3);"
+ "uvec4 swizzleInvocationsMaskedAMD(uvec4, uvec3);"
+
+ "float writeInvocationAMD(float, float, uint);"
+ "vec2 writeInvocationAMD(vec2, vec2, uint);"
+ "vec3 writeInvocationAMD(vec3, vec3, uint);"
+ "vec4 writeInvocationAMD(vec4, vec4, uint);"
+
+ "int writeInvocationAMD(int, int, uint);"
+ "ivec2 writeInvocationAMD(ivec2, ivec2, uint);"
+ "ivec3 writeInvocationAMD(ivec3, ivec3, uint);"
+ "ivec4 writeInvocationAMD(ivec4, ivec4, uint);"
+
+ "uint writeInvocationAMD(uint, uint, uint);"
+ "uvec2 writeInvocationAMD(uvec2, uvec2, uint);"
+ "uvec3 writeInvocationAMD(uvec3, uvec3, uint);"
+ "uvec4 writeInvocationAMD(uvec4, uvec4, uint);"
+
+ "uint mbcntAMD(uint64_t);"
+
+ "\n");
+ }
+
+ // GL_AMD_gcn_shader
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float cubeFaceIndexAMD(vec3);"
+ "vec2 cubeFaceCoordAMD(vec3);"
+ "uint64_t timeAMD();"
+
+ "\n");
+ }
+
+ // GL_AMD_shader_fragment_mask
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "uint fragmentMaskFetchAMD(sampler2DMS, ivec2);"
+ "uint fragmentMaskFetchAMD(isampler2DMS, ivec2);"
+ "uint fragmentMaskFetchAMD(usampler2DMS, ivec2);"
+
+ "uint fragmentMaskFetchAMD(sampler2DMSArray, ivec3);"
+ "uint fragmentMaskFetchAMD(isampler2DMSArray, ivec3);"
+ "uint fragmentMaskFetchAMD(usampler2DMSArray, ivec3);"
+
+ "vec4 fragmentFetchAMD(sampler2DMS, ivec2, uint);"
+ "ivec4 fragmentFetchAMD(isampler2DMS, ivec2, uint);"
+ "uvec4 fragmentFetchAMD(usampler2DMS, ivec2, uint);"
+
+ "vec4 fragmentFetchAMD(sampler2DMSArray, ivec3, uint);"
+ "ivec4 fragmentFetchAMD(isampler2DMSArray, ivec3, uint);"
+ "uvec4 fragmentFetchAMD(usampler2DMSArray, ivec3, uint);"
+
+ "\n");
+ }
+
+#endif // AMD_EXTENSIONS
+
+
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ commonBuiltins.append(
+ "struct gl_TextureFootprint2DNV {"
+ "uvec2 anchor;"
+ "uvec2 offset;"
+ "uvec2 mask;"
+ "uint lod;"
+ "uint granularity;"
+ "};"
+
+ "struct gl_TextureFootprint3DNV {"
+ "uvec3 anchor;"
+ "uvec3 offset;"
+ "uvec2 mask;"
+ "uint lod;"
+ "uint granularity;"
+ "};"
+ "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV, float);"
+ "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV, float);"
+ "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV, float);"
+ "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV, float);"
+ "bool textureFootprintLodNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintLodNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);"
+ "bool textureFootprintGradNV(sampler2D, vec2, vec2, vec2, int, bool, out gl_TextureFootprint2DNV);"
+ "bool textureFootprintGradClampNV(sampler2D, vec2, vec2, vec2, float, int, bool, out gl_TextureFootprint2DNV);"
+ "\n");
+ }
+
+#endif // NV_EXTENSIONS
+ // GL_AMD_gpu_shader_half_float/Explicit types
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "float16_t radians(float16_t);"
+ "f16vec2 radians(f16vec2);"
+ "f16vec3 radians(f16vec3);"
+ "f16vec4 radians(f16vec4);"
+
+ "float16_t degrees(float16_t);"
+ "f16vec2 degrees(f16vec2);"
+ "f16vec3 degrees(f16vec3);"
+ "f16vec4 degrees(f16vec4);"
+
+ "float16_t sin(float16_t);"
+ "f16vec2 sin(f16vec2);"
+ "f16vec3 sin(f16vec3);"
+ "f16vec4 sin(f16vec4);"
+
+ "float16_t cos(float16_t);"
+ "f16vec2 cos(f16vec2);"
+ "f16vec3 cos(f16vec3);"
+ "f16vec4 cos(f16vec4);"
+
+ "float16_t tan(float16_t);"
+ "f16vec2 tan(f16vec2);"
+ "f16vec3 tan(f16vec3);"
+ "f16vec4 tan(f16vec4);"
+
+ "float16_t asin(float16_t);"
+ "f16vec2 asin(f16vec2);"
+ "f16vec3 asin(f16vec3);"
+ "f16vec4 asin(f16vec4);"
+
+ "float16_t acos(float16_t);"
+ "f16vec2 acos(f16vec2);"
+ "f16vec3 acos(f16vec3);"
+ "f16vec4 acos(f16vec4);"
+
+ "float16_t atan(float16_t, float16_t);"
+ "f16vec2 atan(f16vec2, f16vec2);"
+ "f16vec3 atan(f16vec3, f16vec3);"
+ "f16vec4 atan(f16vec4, f16vec4);"
+
+ "float16_t atan(float16_t);"
+ "f16vec2 atan(f16vec2);"
+ "f16vec3 atan(f16vec3);"
+ "f16vec4 atan(f16vec4);"
+
+ "float16_t sinh(float16_t);"
+ "f16vec2 sinh(f16vec2);"
+ "f16vec3 sinh(f16vec3);"
+ "f16vec4 sinh(f16vec4);"
+
+ "float16_t cosh(float16_t);"
+ "f16vec2 cosh(f16vec2);"
+ "f16vec3 cosh(f16vec3);"
+ "f16vec4 cosh(f16vec4);"
+
+ "float16_t tanh(float16_t);"
+ "f16vec2 tanh(f16vec2);"
+ "f16vec3 tanh(f16vec3);"
+ "f16vec4 tanh(f16vec4);"
+
+ "float16_t asinh(float16_t);"
+ "f16vec2 asinh(f16vec2);"
+ "f16vec3 asinh(f16vec3);"
+ "f16vec4 asinh(f16vec4);"
+
+ "float16_t acosh(float16_t);"
+ "f16vec2 acosh(f16vec2);"
+ "f16vec3 acosh(f16vec3);"
+ "f16vec4 acosh(f16vec4);"
+
+ "float16_t atanh(float16_t);"
+ "f16vec2 atanh(f16vec2);"
+ "f16vec3 atanh(f16vec3);"
+ "f16vec4 atanh(f16vec4);"
+
+ "float16_t pow(float16_t, float16_t);"
+ "f16vec2 pow(f16vec2, f16vec2);"
+ "f16vec3 pow(f16vec3, f16vec3);"
+ "f16vec4 pow(f16vec4, f16vec4);"
+
+ "float16_t exp(float16_t);"
+ "f16vec2 exp(f16vec2);"
+ "f16vec3 exp(f16vec3);"
+ "f16vec4 exp(f16vec4);"
+
+ "float16_t log(float16_t);"
+ "f16vec2 log(f16vec2);"
+ "f16vec3 log(f16vec3);"
+ "f16vec4 log(f16vec4);"
+
+ "float16_t exp2(float16_t);"
+ "f16vec2 exp2(f16vec2);"
+ "f16vec3 exp2(f16vec3);"
+ "f16vec4 exp2(f16vec4);"
+
+ "float16_t log2(float16_t);"
+ "f16vec2 log2(f16vec2);"
+ "f16vec3 log2(f16vec3);"
+ "f16vec4 log2(f16vec4);"
+
+ "float16_t sqrt(float16_t);"
+ "f16vec2 sqrt(f16vec2);"
+ "f16vec3 sqrt(f16vec3);"
+ "f16vec4 sqrt(f16vec4);"
+
+ "float16_t inversesqrt(float16_t);"
+ "f16vec2 inversesqrt(f16vec2);"
+ "f16vec3 inversesqrt(f16vec3);"
+ "f16vec4 inversesqrt(f16vec4);"
+
+ "float16_t abs(float16_t);"
+ "f16vec2 abs(f16vec2);"
+ "f16vec3 abs(f16vec3);"
+ "f16vec4 abs(f16vec4);"
+
+ "float16_t sign(float16_t);"
+ "f16vec2 sign(f16vec2);"
+ "f16vec3 sign(f16vec3);"
+ "f16vec4 sign(f16vec4);"
+
+ "float16_t floor(float16_t);"
+ "f16vec2 floor(f16vec2);"
+ "f16vec3 floor(f16vec3);"
+ "f16vec4 floor(f16vec4);"
+
+ "float16_t trunc(float16_t);"
+ "f16vec2 trunc(f16vec2);"
+ "f16vec3 trunc(f16vec3);"
+ "f16vec4 trunc(f16vec4);"
+
+ "float16_t round(float16_t);"
+ "f16vec2 round(f16vec2);"
+ "f16vec3 round(f16vec3);"
+ "f16vec4 round(f16vec4);"
+
+ "float16_t roundEven(float16_t);"
+ "f16vec2 roundEven(f16vec2);"
+ "f16vec3 roundEven(f16vec3);"
+ "f16vec4 roundEven(f16vec4);"
+
+ "float16_t ceil(float16_t);"
+ "f16vec2 ceil(f16vec2);"
+ "f16vec3 ceil(f16vec3);"
+ "f16vec4 ceil(f16vec4);"
+
+ "float16_t fract(float16_t);"
+ "f16vec2 fract(f16vec2);"
+ "f16vec3 fract(f16vec3);"
+ "f16vec4 fract(f16vec4);"
+
+ "float16_t mod(float16_t, float16_t);"
+ "f16vec2 mod(f16vec2, float16_t);"
+ "f16vec3 mod(f16vec3, float16_t);"
+ "f16vec4 mod(f16vec4, float16_t);"
+ "f16vec2 mod(f16vec2, f16vec2);"
+ "f16vec3 mod(f16vec3, f16vec3);"
+ "f16vec4 mod(f16vec4, f16vec4);"
+
+ "float16_t modf(float16_t, out float16_t);"
+ "f16vec2 modf(f16vec2, out f16vec2);"
+ "f16vec3 modf(f16vec3, out f16vec3);"
+ "f16vec4 modf(f16vec4, out f16vec4);"
+
+ "float16_t min(float16_t, float16_t);"
+ "f16vec2 min(f16vec2, float16_t);"
+ "f16vec3 min(f16vec3, float16_t);"
+ "f16vec4 min(f16vec4, float16_t);"
+ "f16vec2 min(f16vec2, f16vec2);"
+ "f16vec3 min(f16vec3, f16vec3);"
+ "f16vec4 min(f16vec4, f16vec4);"
+
+ "float16_t max(float16_t, float16_t);"
+ "f16vec2 max(f16vec2, float16_t);"
+ "f16vec3 max(f16vec3, float16_t);"
+ "f16vec4 max(f16vec4, float16_t);"
+ "f16vec2 max(f16vec2, f16vec2);"
+ "f16vec3 max(f16vec3, f16vec3);"
+ "f16vec4 max(f16vec4, f16vec4);"
+
+ "float16_t clamp(float16_t, float16_t, float16_t);"
+ "f16vec2 clamp(f16vec2, float16_t, float16_t);"
+ "f16vec3 clamp(f16vec3, float16_t, float16_t);"
+ "f16vec4 clamp(f16vec4, float16_t, float16_t);"
+ "f16vec2 clamp(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 clamp(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 clamp(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t mix(float16_t, float16_t, float16_t);"
+ "f16vec2 mix(f16vec2, f16vec2, float16_t);"
+ "f16vec3 mix(f16vec3, f16vec3, float16_t);"
+ "f16vec4 mix(f16vec4, f16vec4, float16_t);"
+ "f16vec2 mix(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 mix(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 mix(f16vec4, f16vec4, f16vec4);"
+ "float16_t mix(float16_t, float16_t, bool);"
+ "f16vec2 mix(f16vec2, f16vec2, bvec2);"
+ "f16vec3 mix(f16vec3, f16vec3, bvec3);"
+ "f16vec4 mix(f16vec4, f16vec4, bvec4);"
+
+ "float16_t step(float16_t, float16_t);"
+ "f16vec2 step(f16vec2, f16vec2);"
+ "f16vec3 step(f16vec3, f16vec3);"
+ "f16vec4 step(f16vec4, f16vec4);"
+ "f16vec2 step(float16_t, f16vec2);"
+ "f16vec3 step(float16_t, f16vec3);"
+ "f16vec4 step(float16_t, f16vec4);"
+
+ "float16_t smoothstep(float16_t, float16_t, float16_t);"
+ "f16vec2 smoothstep(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 smoothstep(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 smoothstep(f16vec4, f16vec4, f16vec4);"
+ "f16vec2 smoothstep(float16_t, float16_t, f16vec2);"
+ "f16vec3 smoothstep(float16_t, float16_t, f16vec3);"
+ "f16vec4 smoothstep(float16_t, float16_t, f16vec4);"
+
+ "bool isnan(float16_t);"
+ "bvec2 isnan(f16vec2);"
+ "bvec3 isnan(f16vec3);"
+ "bvec4 isnan(f16vec4);"
+
+ "bool isinf(float16_t);"
+ "bvec2 isinf(f16vec2);"
+ "bvec3 isinf(f16vec3);"
+ "bvec4 isinf(f16vec4);"
+
+ "float16_t fma(float16_t, float16_t, float16_t);"
+ "f16vec2 fma(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 fma(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 fma(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t frexp(float16_t, out int);"
+ "f16vec2 frexp(f16vec2, out ivec2);"
+ "f16vec3 frexp(f16vec3, out ivec3);"
+ "f16vec4 frexp(f16vec4, out ivec4);"
+
+ "float16_t ldexp(float16_t, in int);"
+ "f16vec2 ldexp(f16vec2, in ivec2);"
+ "f16vec3 ldexp(f16vec3, in ivec3);"
+ "f16vec4 ldexp(f16vec4, in ivec4);"
+
+ "uint packFloat2x16(f16vec2);"
+ "f16vec2 unpackFloat2x16(uint);"
+
+ "float16_t length(float16_t);"
+ "float16_t length(f16vec2);"
+ "float16_t length(f16vec3);"
+ "float16_t length(f16vec4);"
+
+ "float16_t distance(float16_t, float16_t);"
+ "float16_t distance(f16vec2, f16vec2);"
+ "float16_t distance(f16vec3, f16vec3);"
+ "float16_t distance(f16vec4, f16vec4);"
+
+ "float16_t dot(float16_t, float16_t);"
+ "float16_t dot(f16vec2, f16vec2);"
+ "float16_t dot(f16vec3, f16vec3);"
+ "float16_t dot(f16vec4, f16vec4);"
+
+ "f16vec3 cross(f16vec3, f16vec3);"
+
+ "float16_t normalize(float16_t);"
+ "f16vec2 normalize(f16vec2);"
+ "f16vec3 normalize(f16vec3);"
+ "f16vec4 normalize(f16vec4);"
+
+ "float16_t faceforward(float16_t, float16_t, float16_t);"
+ "f16vec2 faceforward(f16vec2, f16vec2, f16vec2);"
+ "f16vec3 faceforward(f16vec3, f16vec3, f16vec3);"
+ "f16vec4 faceforward(f16vec4, f16vec4, f16vec4);"
+
+ "float16_t reflect(float16_t, float16_t);"
+ "f16vec2 reflect(f16vec2, f16vec2);"
+ "f16vec3 reflect(f16vec3, f16vec3);"
+ "f16vec4 reflect(f16vec4, f16vec4);"
+
+ "float16_t refract(float16_t, float16_t, float16_t);"
+ "f16vec2 refract(f16vec2, f16vec2, float16_t);"
+ "f16vec3 refract(f16vec3, f16vec3, float16_t);"
+ "f16vec4 refract(f16vec4, f16vec4, float16_t);"
+
+ "f16mat2 matrixCompMult(f16mat2, f16mat2);"
+ "f16mat3 matrixCompMult(f16mat3, f16mat3);"
+ "f16mat4 matrixCompMult(f16mat4, f16mat4);"
+ "f16mat2x3 matrixCompMult(f16mat2x3, f16mat2x3);"
+ "f16mat2x4 matrixCompMult(f16mat2x4, f16mat2x4);"
+ "f16mat3x2 matrixCompMult(f16mat3x2, f16mat3x2);"
+ "f16mat3x4 matrixCompMult(f16mat3x4, f16mat3x4);"
+ "f16mat4x2 matrixCompMult(f16mat4x2, f16mat4x2);"
+ "f16mat4x3 matrixCompMult(f16mat4x3, f16mat4x3);"
+
+ "f16mat2 outerProduct(f16vec2, f16vec2);"
+ "f16mat3 outerProduct(f16vec3, f16vec3);"
+ "f16mat4 outerProduct(f16vec4, f16vec4);"
+ "f16mat2x3 outerProduct(f16vec3, f16vec2);"
+ "f16mat3x2 outerProduct(f16vec2, f16vec3);"
+ "f16mat2x4 outerProduct(f16vec4, f16vec2);"
+ "f16mat4x2 outerProduct(f16vec2, f16vec4);"
+ "f16mat3x4 outerProduct(f16vec4, f16vec3);"
+ "f16mat4x3 outerProduct(f16vec3, f16vec4);"
+
+ "f16mat2 transpose(f16mat2);"
+ "f16mat3 transpose(f16mat3);"
+ "f16mat4 transpose(f16mat4);"
+ "f16mat2x3 transpose(f16mat3x2);"
+ "f16mat3x2 transpose(f16mat2x3);"
+ "f16mat2x4 transpose(f16mat4x2);"
+ "f16mat4x2 transpose(f16mat2x4);"
+ "f16mat3x4 transpose(f16mat4x3);"
+ "f16mat4x3 transpose(f16mat3x4);"
+
+ "float16_t determinant(f16mat2);"
+ "float16_t determinant(f16mat3);"
+ "float16_t determinant(f16mat4);"
+
+ "f16mat2 inverse(f16mat2);"
+ "f16mat3 inverse(f16mat3);"
+ "f16mat4 inverse(f16mat4);"
+
+ "bvec2 lessThan(f16vec2, f16vec2);"
+ "bvec3 lessThan(f16vec3, f16vec3);"
+ "bvec4 lessThan(f16vec4, f16vec4);"
+
+ "bvec2 lessThanEqual(f16vec2, f16vec2);"
+ "bvec3 lessThanEqual(f16vec3, f16vec3);"
+ "bvec4 lessThanEqual(f16vec4, f16vec4);"
+
+ "bvec2 greaterThan(f16vec2, f16vec2);"
+ "bvec3 greaterThan(f16vec3, f16vec3);"
+ "bvec4 greaterThan(f16vec4, f16vec4);"
+
+ "bvec2 greaterThanEqual(f16vec2, f16vec2);"
+ "bvec3 greaterThanEqual(f16vec3, f16vec3);"
+ "bvec4 greaterThanEqual(f16vec4, f16vec4);"
+
+ "bvec2 equal(f16vec2, f16vec2);"
+ "bvec3 equal(f16vec3, f16vec3);"
+ "bvec4 equal(f16vec4, f16vec4);"
+
+ "bvec2 notEqual(f16vec2, f16vec2);"
+ "bvec3 notEqual(f16vec3, f16vec3);"
+ "bvec4 notEqual(f16vec4, f16vec4);"
+
+ "\n");
+ }
+
+ // Explicit types
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append(
+ "int8_t abs(int8_t);"
+ "i8vec2 abs(i8vec2);"
+ "i8vec3 abs(i8vec3);"
+ "i8vec4 abs(i8vec4);"
+
+ "int8_t sign(int8_t);"
+ "i8vec2 sign(i8vec2);"
+ "i8vec3 sign(i8vec3);"
+ "i8vec4 sign(i8vec4);"
+
+ "int8_t min(int8_t x, int8_t y);"
+ "i8vec2 min(i8vec2 x, int8_t y);"
+ "i8vec3 min(i8vec3 x, int8_t y);"
+ "i8vec4 min(i8vec4 x, int8_t y);"
+ "i8vec2 min(i8vec2 x, i8vec2 y);"
+ "i8vec3 min(i8vec3 x, i8vec3 y);"
+ "i8vec4 min(i8vec4 x, i8vec4 y);"
+
+ "uint8_t min(uint8_t x, uint8_t y);"
+ "u8vec2 min(u8vec2 x, uint8_t y);"
+ "u8vec3 min(u8vec3 x, uint8_t y);"
+ "u8vec4 min(u8vec4 x, uint8_t y);"
+ "u8vec2 min(u8vec2 x, u8vec2 y);"
+ "u8vec3 min(u8vec3 x, u8vec3 y);"
+ "u8vec4 min(u8vec4 x, u8vec4 y);"
+
+ "int8_t max(int8_t x, int8_t y);"
+ "i8vec2 max(i8vec2 x, int8_t y);"
+ "i8vec3 max(i8vec3 x, int8_t y);"
+ "i8vec4 max(i8vec4 x, int8_t y);"
+ "i8vec2 max(i8vec2 x, i8vec2 y);"
+ "i8vec3 max(i8vec3 x, i8vec3 y);"
+ "i8vec4 max(i8vec4 x, i8vec4 y);"
+
+ "uint8_t max(uint8_t x, uint8_t y);"
+ "u8vec2 max(u8vec2 x, uint8_t y);"
+ "u8vec3 max(u8vec3 x, uint8_t y);"
+ "u8vec4 max(u8vec4 x, uint8_t y);"
+ "u8vec2 max(u8vec2 x, u8vec2 y);"
+ "u8vec3 max(u8vec3 x, u8vec3 y);"
+ "u8vec4 max(u8vec4 x, u8vec4 y);"
+
+ "int8_t clamp(int8_t x, int8_t minVal, int8_t maxVal);"
+ "i8vec2 clamp(i8vec2 x, int8_t minVal, int8_t maxVal);"
+ "i8vec3 clamp(i8vec3 x, int8_t minVal, int8_t maxVal);"
+ "i8vec4 clamp(i8vec4 x, int8_t minVal, int8_t maxVal);"
+ "i8vec2 clamp(i8vec2 x, i8vec2 minVal, i8vec2 maxVal);"
+ "i8vec3 clamp(i8vec3 x, i8vec3 minVal, i8vec3 maxVal);"
+ "i8vec4 clamp(i8vec4 x, i8vec4 minVal, i8vec4 maxVal);"
+
+ "uint8_t clamp(uint8_t x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec2 clamp(u8vec2 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec3 clamp(u8vec3 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec4 clamp(u8vec4 x, uint8_t minVal, uint8_t maxVal);"
+ "u8vec2 clamp(u8vec2 x, u8vec2 minVal, u8vec2 maxVal);"
+ "u8vec3 clamp(u8vec3 x, u8vec3 minVal, u8vec3 maxVal);"
+ "u8vec4 clamp(u8vec4 x, u8vec4 minVal, u8vec4 maxVal);"
+
+ "int8_t mix(int8_t, int8_t, bool);"
+ "i8vec2 mix(i8vec2, i8vec2, bvec2);"
+ "i8vec3 mix(i8vec3, i8vec3, bvec3);"
+ "i8vec4 mix(i8vec4, i8vec4, bvec4);"
+ "uint8_t mix(uint8_t, uint8_t, bool);"
+ "u8vec2 mix(u8vec2, u8vec2, bvec2);"
+ "u8vec3 mix(u8vec3, u8vec3, bvec3);"
+ "u8vec4 mix(u8vec4, u8vec4, bvec4);"
+
+ "bvec2 lessThan(i8vec2, i8vec2);"
+ "bvec3 lessThan(i8vec3, i8vec3);"
+ "bvec4 lessThan(i8vec4, i8vec4);"
+ "bvec2 lessThan(u8vec2, u8vec2);"
+ "bvec3 lessThan(u8vec3, u8vec3);"
+ "bvec4 lessThan(u8vec4, u8vec4);"
+
+ "bvec2 lessThanEqual(i8vec2, i8vec2);"
+ "bvec3 lessThanEqual(i8vec3, i8vec3);"
+ "bvec4 lessThanEqual(i8vec4, i8vec4);"
+ "bvec2 lessThanEqual(u8vec2, u8vec2);"
+ "bvec3 lessThanEqual(u8vec3, u8vec3);"
+ "bvec4 lessThanEqual(u8vec4, u8vec4);"
+
+ "bvec2 greaterThan(i8vec2, i8vec2);"
+ "bvec3 greaterThan(i8vec3, i8vec3);"
+ "bvec4 greaterThan(i8vec4, i8vec4);"
+ "bvec2 greaterThan(u8vec2, u8vec2);"
+ "bvec3 greaterThan(u8vec3, u8vec3);"
+ "bvec4 greaterThan(u8vec4, u8vec4);"
+
+ "bvec2 greaterThanEqual(i8vec2, i8vec2);"
+ "bvec3 greaterThanEqual(i8vec3, i8vec3);"
+ "bvec4 greaterThanEqual(i8vec4, i8vec4);"
+ "bvec2 greaterThanEqual(u8vec2, u8vec2);"
+ "bvec3 greaterThanEqual(u8vec3, u8vec3);"
+ "bvec4 greaterThanEqual(u8vec4, u8vec4);"
+
+ "bvec2 equal(i8vec2, i8vec2);"
+ "bvec3 equal(i8vec3, i8vec3);"
+ "bvec4 equal(i8vec4, i8vec4);"
+ "bvec2 equal(u8vec2, u8vec2);"
+ "bvec3 equal(u8vec3, u8vec3);"
+ "bvec4 equal(u8vec4, u8vec4);"
+
+ "bvec2 notEqual(i8vec2, i8vec2);"
+ "bvec3 notEqual(i8vec3, i8vec3);"
+ "bvec4 notEqual(i8vec4, i8vec4);"
+ "bvec2 notEqual(u8vec2, u8vec2);"
+ "bvec3 notEqual(u8vec3, u8vec3);"
+ "bvec4 notEqual(u8vec4, u8vec4);"
+
+ " int8_t bitfieldExtract( int8_t, int8_t, int8_t);"
+ "i8vec2 bitfieldExtract(i8vec2, int8_t, int8_t);"
+ "i8vec3 bitfieldExtract(i8vec3, int8_t, int8_t);"
+ "i8vec4 bitfieldExtract(i8vec4, int8_t, int8_t);"
+
+ " uint8_t bitfieldExtract( uint8_t, int8_t, int8_t);"
+ "u8vec2 bitfieldExtract(u8vec2, int8_t, int8_t);"
+ "u8vec3 bitfieldExtract(u8vec3, int8_t, int8_t);"
+ "u8vec4 bitfieldExtract(u8vec4, int8_t, int8_t);"
+
+ " int8_t bitfieldInsert( int8_t base, int8_t, int8_t, int8_t);"
+ "i8vec2 bitfieldInsert(i8vec2 base, i8vec2, int8_t, int8_t);"
+ "i8vec3 bitfieldInsert(i8vec3 base, i8vec3, int8_t, int8_t);"
+ "i8vec4 bitfieldInsert(i8vec4 base, i8vec4, int8_t, int8_t);"
+
+ " uint8_t bitfieldInsert( uint8_t base, uint8_t, int8_t, int8_t);"
+ "u8vec2 bitfieldInsert(u8vec2 base, u8vec2, int8_t, int8_t);"
+ "u8vec3 bitfieldInsert(u8vec3 base, u8vec3, int8_t, int8_t);"
+ "u8vec4 bitfieldInsert(u8vec4 base, u8vec4, int8_t, int8_t);"
+
+ " int8_t bitCount( int8_t);"
+ "i8vec2 bitCount(i8vec2);"
+ "i8vec3 bitCount(i8vec3);"
+ "i8vec4 bitCount(i8vec4);"
+
+ " int8_t bitCount( uint8_t);"
+ "i8vec2 bitCount(u8vec2);"
+ "i8vec3 bitCount(u8vec3);"
+ "i8vec4 bitCount(u8vec4);"
+
+ " int8_t findLSB( int8_t);"
+ "i8vec2 findLSB(i8vec2);"
+ "i8vec3 findLSB(i8vec3);"
+ "i8vec4 findLSB(i8vec4);"
+
+ " int8_t findLSB( uint8_t);"
+ "i8vec2 findLSB(u8vec2);"
+ "i8vec3 findLSB(u8vec3);"
+ "i8vec4 findLSB(u8vec4);"
+
+ " int8_t findMSB( int8_t);"
+ "i8vec2 findMSB(i8vec2);"
+ "i8vec3 findMSB(i8vec3);"
+ "i8vec4 findMSB(i8vec4);"
+
+ " int8_t findMSB( uint8_t);"
+ "i8vec2 findMSB(u8vec2);"
+ "i8vec3 findMSB(u8vec3);"
+ "i8vec4 findMSB(u8vec4);"
+
+ "int16_t abs(int16_t);"
+ "i16vec2 abs(i16vec2);"
+ "i16vec3 abs(i16vec3);"
+ "i16vec4 abs(i16vec4);"
+
+ "int16_t sign(int16_t);"
+ "i16vec2 sign(i16vec2);"
+ "i16vec3 sign(i16vec3);"
+ "i16vec4 sign(i16vec4);"
+
+ "int16_t min(int16_t x, int16_t y);"
+ "i16vec2 min(i16vec2 x, int16_t y);"
+ "i16vec3 min(i16vec3 x, int16_t y);"
+ "i16vec4 min(i16vec4 x, int16_t y);"
+ "i16vec2 min(i16vec2 x, i16vec2 y);"
+ "i16vec3 min(i16vec3 x, i16vec3 y);"
+ "i16vec4 min(i16vec4 x, i16vec4 y);"
+
+ "uint16_t min(uint16_t x, uint16_t y);"
+ "u16vec2 min(u16vec2 x, uint16_t y);"
+ "u16vec3 min(u16vec3 x, uint16_t y);"
+ "u16vec4 min(u16vec4 x, uint16_t y);"
+ "u16vec2 min(u16vec2 x, u16vec2 y);"
+ "u16vec3 min(u16vec3 x, u16vec3 y);"
+ "u16vec4 min(u16vec4 x, u16vec4 y);"
+
+ "int16_t max(int16_t x, int16_t y);"
+ "i16vec2 max(i16vec2 x, int16_t y);"
+ "i16vec3 max(i16vec3 x, int16_t y);"
+ "i16vec4 max(i16vec4 x, int16_t y);"
+ "i16vec2 max(i16vec2 x, i16vec2 y);"
+ "i16vec3 max(i16vec3 x, i16vec3 y);"
+ "i16vec4 max(i16vec4 x, i16vec4 y);"
+
+ "uint16_t max(uint16_t x, uint16_t y);"
+ "u16vec2 max(u16vec2 x, uint16_t y);"
+ "u16vec3 max(u16vec3 x, uint16_t y);"
+ "u16vec4 max(u16vec4 x, uint16_t y);"
+ "u16vec2 max(u16vec2 x, u16vec2 y);"
+ "u16vec3 max(u16vec3 x, u16vec3 y);"
+ "u16vec4 max(u16vec4 x, u16vec4 y);"
+
+ "int16_t clamp(int16_t x, int16_t minVal, int16_t maxVal);"
+ "i16vec2 clamp(i16vec2 x, int16_t minVal, int16_t maxVal);"
+ "i16vec3 clamp(i16vec3 x, int16_t minVal, int16_t maxVal);"
+ "i16vec4 clamp(i16vec4 x, int16_t minVal, int16_t maxVal);"
+ "i16vec2 clamp(i16vec2 x, i16vec2 minVal, i16vec2 maxVal);"
+ "i16vec3 clamp(i16vec3 x, i16vec3 minVal, i16vec3 maxVal);"
+ "i16vec4 clamp(i16vec4 x, i16vec4 minVal, i16vec4 maxVal);"
+
+ "uint16_t clamp(uint16_t x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec2 clamp(u16vec2 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec3 clamp(u16vec3 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec4 clamp(u16vec4 x, uint16_t minVal, uint16_t maxVal);"
+ "u16vec2 clamp(u16vec2 x, u16vec2 minVal, u16vec2 maxVal);"
+ "u16vec3 clamp(u16vec3 x, u16vec3 minVal, u16vec3 maxVal);"
+ "u16vec4 clamp(u16vec4 x, u16vec4 minVal, u16vec4 maxVal);"
+
+ "int16_t mix(int16_t, int16_t, bool);"
+ "i16vec2 mix(i16vec2, i16vec2, bvec2);"
+ "i16vec3 mix(i16vec3, i16vec3, bvec3);"
+ "i16vec4 mix(i16vec4, i16vec4, bvec4);"
+ "uint16_t mix(uint16_t, uint16_t, bool);"
+ "u16vec2 mix(u16vec2, u16vec2, bvec2);"
+ "u16vec3 mix(u16vec3, u16vec3, bvec3);"
+ "u16vec4 mix(u16vec4, u16vec4, bvec4);"
+
+ "float16_t frexp(float16_t, out int16_t);"
+ "f16vec2 frexp(f16vec2, out i16vec2);"
+ "f16vec3 frexp(f16vec3, out i16vec3);"
+ "f16vec4 frexp(f16vec4, out i16vec4);"
+
+ "float16_t ldexp(float16_t, int16_t);"
+ "f16vec2 ldexp(f16vec2, i16vec2);"
+ "f16vec3 ldexp(f16vec3, i16vec3);"
+ "f16vec4 ldexp(f16vec4, i16vec4);"
+
+ "int16_t halfBitsToInt16(float16_t);"
+ "i16vec2 halfBitsToInt16(f16vec2);"
+ "i16vec3 halhBitsToInt16(f16vec3);"
+ "i16vec4 halfBitsToInt16(f16vec4);"
+
+ "uint16_t halfBitsToUint16(float16_t);"
+ "u16vec2 halfBitsToUint16(f16vec2);"
+ "u16vec3 halfBitsToUint16(f16vec3);"
+ "u16vec4 halfBitsToUint16(f16vec4);"
+
+ "int16_t float16BitsToInt16(float16_t);"
+ "i16vec2 float16BitsToInt16(f16vec2);"
+ "i16vec3 float16BitsToInt16(f16vec3);"
+ "i16vec4 float16BitsToInt16(f16vec4);"
+
+ "uint16_t float16BitsToUint16(float16_t);"
+ "u16vec2 float16BitsToUint16(f16vec2);"
+ "u16vec3 float16BitsToUint16(f16vec3);"
+ "u16vec4 float16BitsToUint16(f16vec4);"
+
+ "float16_t int16BitsToFloat16(int16_t);"
+ "f16vec2 int16BitsToFloat16(i16vec2);"
+ "f16vec3 int16BitsToFloat16(i16vec3);"
+ "f16vec4 int16BitsToFloat16(i16vec4);"
+
+ "float16_t uint16BitsToFloat16(uint16_t);"
+ "f16vec2 uint16BitsToFloat16(u16vec2);"
+ "f16vec3 uint16BitsToFloat16(u16vec3);"
+ "f16vec4 uint16BitsToFloat16(u16vec4);"
+
+ "float16_t int16BitsToHalf(int16_t);"
+ "f16vec2 int16BitsToHalf(i16vec2);"
+ "f16vec3 int16BitsToHalf(i16vec3);"
+ "f16vec4 int16BitsToHalf(i16vec4);"
+
+ "float16_t uint16BitsToHalf(uint16_t);"
+ "f16vec2 uint16BitsToHalf(u16vec2);"
+ "f16vec3 uint16BitsToHalf(u16vec3);"
+ "f16vec4 uint16BitsToHalf(u16vec4);"
+
+ "int packInt2x16(i16vec2);"
+ "uint packUint2x16(u16vec2);"
+ "int64_t packInt4x16(i16vec4);"
+ "uint64_t packUint4x16(u16vec4);"
+ "i16vec2 unpackInt2x16(int);"
+ "u16vec2 unpackUint2x16(uint);"
+ "i16vec4 unpackInt4x16(int64_t);"
+ "u16vec4 unpackUint4x16(uint64_t);"
+
+ "bvec2 lessThan(i16vec2, i16vec2);"
+ "bvec3 lessThan(i16vec3, i16vec3);"
+ "bvec4 lessThan(i16vec4, i16vec4);"
+ "bvec2 lessThan(u16vec2, u16vec2);"
+ "bvec3 lessThan(u16vec3, u16vec3);"
+ "bvec4 lessThan(u16vec4, u16vec4);"
+
+ "bvec2 lessThanEqual(i16vec2, i16vec2);"
+ "bvec3 lessThanEqual(i16vec3, i16vec3);"
+ "bvec4 lessThanEqual(i16vec4, i16vec4);"
+ "bvec2 lessThanEqual(u16vec2, u16vec2);"
+ "bvec3 lessThanEqual(u16vec3, u16vec3);"
+ "bvec4 lessThanEqual(u16vec4, u16vec4);"
+
+ "bvec2 greaterThan(i16vec2, i16vec2);"
+ "bvec3 greaterThan(i16vec3, i16vec3);"
+ "bvec4 greaterThan(i16vec4, i16vec4);"
+ "bvec2 greaterThan(u16vec2, u16vec2);"
+ "bvec3 greaterThan(u16vec3, u16vec3);"
+ "bvec4 greaterThan(u16vec4, u16vec4);"
+
+ "bvec2 greaterThanEqual(i16vec2, i16vec2);"
+ "bvec3 greaterThanEqual(i16vec3, i16vec3);"
+ "bvec4 greaterThanEqual(i16vec4, i16vec4);"
+ "bvec2 greaterThanEqual(u16vec2, u16vec2);"
+ "bvec3 greaterThanEqual(u16vec3, u16vec3);"
+ "bvec4 greaterThanEqual(u16vec4, u16vec4);"
+
+ "bvec2 equal(i16vec2, i16vec2);"
+ "bvec3 equal(i16vec3, i16vec3);"
+ "bvec4 equal(i16vec4, i16vec4);"
+ "bvec2 equal(u16vec2, u16vec2);"
+ "bvec3 equal(u16vec3, u16vec3);"
+ "bvec4 equal(u16vec4, u16vec4);"
+
+ "bvec2 notEqual(i16vec2, i16vec2);"
+ "bvec3 notEqual(i16vec3, i16vec3);"
+ "bvec4 notEqual(i16vec4, i16vec4);"
+ "bvec2 notEqual(u16vec2, u16vec2);"
+ "bvec3 notEqual(u16vec3, u16vec3);"
+ "bvec4 notEqual(u16vec4, u16vec4);"
+
+ " int16_t bitfieldExtract( int16_t, int16_t, int16_t);"
+ "i16vec2 bitfieldExtract(i16vec2, int16_t, int16_t);"
+ "i16vec3 bitfieldExtract(i16vec3, int16_t, int16_t);"
+ "i16vec4 bitfieldExtract(i16vec4, int16_t, int16_t);"
+
+ " uint16_t bitfieldExtract( uint16_t, int16_t, int16_t);"
+ "u16vec2 bitfieldExtract(u16vec2, int16_t, int16_t);"
+ "u16vec3 bitfieldExtract(u16vec3, int16_t, int16_t);"
+ "u16vec4 bitfieldExtract(u16vec4, int16_t, int16_t);"
+
+ " int16_t bitfieldInsert( int16_t base, int16_t, int16_t, int16_t);"
+ "i16vec2 bitfieldInsert(i16vec2 base, i16vec2, int16_t, int16_t);"
+ "i16vec3 bitfieldInsert(i16vec3 base, i16vec3, int16_t, int16_t);"
+ "i16vec4 bitfieldInsert(i16vec4 base, i16vec4, int16_t, int16_t);"
+
+ " uint16_t bitfieldInsert( uint16_t base, uint16_t, int16_t, int16_t);"
+ "u16vec2 bitfieldInsert(u16vec2 base, u16vec2, int16_t, int16_t);"
+ "u16vec3 bitfieldInsert(u16vec3 base, u16vec3, int16_t, int16_t);"
+ "u16vec4 bitfieldInsert(u16vec4 base, u16vec4, int16_t, int16_t);"
+
+ " int16_t bitCount( int16_t);"
+ "i16vec2 bitCount(i16vec2);"
+ "i16vec3 bitCount(i16vec3);"
+ "i16vec4 bitCount(i16vec4);"
+
+ " int16_t bitCount( uint16_t);"
+ "i16vec2 bitCount(u16vec2);"
+ "i16vec3 bitCount(u16vec3);"
+ "i16vec4 bitCount(u16vec4);"
+
+ " int16_t findLSB( int16_t);"
+ "i16vec2 findLSB(i16vec2);"
+ "i16vec3 findLSB(i16vec3);"
+ "i16vec4 findLSB(i16vec4);"
+
+ " int16_t findLSB( uint16_t);"
+ "i16vec2 findLSB(u16vec2);"
+ "i16vec3 findLSB(u16vec3);"
+ "i16vec4 findLSB(u16vec4);"
+
+ " int16_t findMSB( int16_t);"
+ "i16vec2 findMSB(i16vec2);"
+ "i16vec3 findMSB(i16vec3);"
+ "i16vec4 findMSB(i16vec4);"
+
+ " int16_t findMSB( uint16_t);"
+ "i16vec2 findMSB(u16vec2);"
+ "i16vec3 findMSB(u16vec3);"
+ "i16vec4 findMSB(u16vec4);"
+
+ "int16_t pack16(i8vec2);"
+ "uint16_t pack16(u8vec2);"
+ "int32_t pack32(i8vec4);"
+ "uint32_t pack32(u8vec4);"
+ "int32_t pack32(i16vec2);"
+ "uint32_t pack32(u16vec2);"
+ "int64_t pack64(i16vec4);"
+ "uint64_t pack64(u16vec4);"
+ "int64_t pack64(i32vec2);"
+ "uint64_t pack64(u32vec2);"
+
+ "i8vec2 unpack8(int16_t);"
+ "u8vec2 unpack8(uint16_t);"
+ "i8vec4 unpack8(int32_t);"
+ "u8vec4 unpack8(uint32_t);"
+ "i16vec2 unpack16(int32_t);"
+ "u16vec2 unpack16(uint32_t);"
+ "i16vec4 unpack16(int64_t);"
+ "u16vec4 unpack16(uint64_t);"
+ "i32vec2 unpack32(int64_t);"
+ "u32vec2 unpack32(uint64_t);"
+
+ "float64_t radians(float64_t);"
+ "f64vec2 radians(f64vec2);"
+ "f64vec3 radians(f64vec3);"
+ "f64vec4 radians(f64vec4);"
+
+ "float64_t degrees(float64_t);"
+ "f64vec2 degrees(f64vec2);"
+ "f64vec3 degrees(f64vec3);"
+ "f64vec4 degrees(f64vec4);"
+
+ "float64_t sin(float64_t);"
+ "f64vec2 sin(f64vec2);"
+ "f64vec3 sin(f64vec3);"
+ "f64vec4 sin(f64vec4);"
+
+ "float64_t cos(float64_t);"
+ "f64vec2 cos(f64vec2);"
+ "f64vec3 cos(f64vec3);"
+ "f64vec4 cos(f64vec4);"
+
+ "float64_t tan(float64_t);"
+ "f64vec2 tan(f64vec2);"
+ "f64vec3 tan(f64vec3);"
+ "f64vec4 tan(f64vec4);"
+
+ "float64_t asin(float64_t);"
+ "f64vec2 asin(f64vec2);"
+ "f64vec3 asin(f64vec3);"
+ "f64vec4 asin(f64vec4);"
+
+ "float64_t acos(float64_t);"
+ "f64vec2 acos(f64vec2);"
+ "f64vec3 acos(f64vec3);"
+ "f64vec4 acos(f64vec4);"
+
+ "float64_t atan(float64_t, float64_t);"
+ "f64vec2 atan(f64vec2, f64vec2);"
+ "f64vec3 atan(f64vec3, f64vec3);"
+ "f64vec4 atan(f64vec4, f64vec4);"
+
+ "float64_t atan(float64_t);"
+ "f64vec2 atan(f64vec2);"
+ "f64vec3 atan(f64vec3);"
+ "f64vec4 atan(f64vec4);"
+
+ "float64_t sinh(float64_t);"
+ "f64vec2 sinh(f64vec2);"
+ "f64vec3 sinh(f64vec3);"
+ "f64vec4 sinh(f64vec4);"
+
+ "float64_t cosh(float64_t);"
+ "f64vec2 cosh(f64vec2);"
+ "f64vec3 cosh(f64vec3);"
+ "f64vec4 cosh(f64vec4);"
+
+ "float64_t tanh(float64_t);"
+ "f64vec2 tanh(f64vec2);"
+ "f64vec3 tanh(f64vec3);"
+ "f64vec4 tanh(f64vec4);"
+
+ "float64_t asinh(float64_t);"
+ "f64vec2 asinh(f64vec2);"
+ "f64vec3 asinh(f64vec3);"
+ "f64vec4 asinh(f64vec4);"
+
+ "float64_t acosh(float64_t);"
+ "f64vec2 acosh(f64vec2);"
+ "f64vec3 acosh(f64vec3);"
+ "f64vec4 acosh(f64vec4);"
+
+ "float64_t atanh(float64_t);"
+ "f64vec2 atanh(f64vec2);"
+ "f64vec3 atanh(f64vec3);"
+ "f64vec4 atanh(f64vec4);"
+
+ "float64_t pow(float64_t, float64_t);"
+ "f64vec2 pow(f64vec2, f64vec2);"
+ "f64vec3 pow(f64vec3, f64vec3);"
+ "f64vec4 pow(f64vec4, f64vec4);"
+
+ "float64_t exp(float64_t);"
+ "f64vec2 exp(f64vec2);"
+ "f64vec3 exp(f64vec3);"
+ "f64vec4 exp(f64vec4);"
+
+ "float64_t log(float64_t);"
+ "f64vec2 log(f64vec2);"
+ "f64vec3 log(f64vec3);"
+ "f64vec4 log(f64vec4);"
+
+ "float64_t exp2(float64_t);"
+ "f64vec2 exp2(f64vec2);"
+ "f64vec3 exp2(f64vec3);"
+ "f64vec4 exp2(f64vec4);"
+
+ "float64_t log2(float64_t);"
+ "f64vec2 log2(f64vec2);"
+ "f64vec3 log2(f64vec3);"
+ "f64vec4 log2(f64vec4);"
+ "\n");
+ }
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(derivativesAndControl64bits);
+ stageBuiltins[EShLangFragment].append(
+ "float64_t interpolateAtCentroid(float64_t);"
+ "f64vec2 interpolateAtCentroid(f64vec2);"
+ "f64vec3 interpolateAtCentroid(f64vec3);"
+ "f64vec4 interpolateAtCentroid(f64vec4);"
+
+ "float64_t interpolateAtSample(float64_t, int);"
+ "f64vec2 interpolateAtSample(f64vec2, int);"
+ "f64vec3 interpolateAtSample(f64vec3, int);"
+ "f64vec4 interpolateAtSample(f64vec4, int);"
+
+ "float64_t interpolateAtOffset(float64_t, f64vec2);"
+ "f64vec2 interpolateAtOffset(f64vec2, f64vec2);"
+ "f64vec3 interpolateAtOffset(f64vec3, f64vec2);"
+ "f64vec4 interpolateAtOffset(f64vec4, f64vec2);"
+
+ "\n");
+
+ }
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by vertex shaders only.
+ // (Except legacy lod functions, where it depends which release they are
+ // vertex only.)
+ //
+ //============================================================================
+
+ //
+ // Geometric Functions.
+ //
+ if (IncludeLegacy(version, profile, spvVersion))
+ stageBuiltins[EShLangVertex].append("vec4 ftransform();");
+
+ //
+ // Original-style texture Functions with lod.
+ //
+ TString* s;
+ if (version == 100)
+ s = &stageBuiltins[EShLangVertex];
+ else
+ s = &commonBuiltins;
+ if ((profile == EEsProfile && version == 100) ||
+ profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ s->append(
+ "vec4 texture2DLod(sampler2D, vec2, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjLod(sampler2D, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjLod(sampler2D, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DLod(sampler3D, vec3, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check
+ "vec4 texture3DProjLod(sampler3D, vec4, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check
+ "vec4 textureCubeLod(samplerCube, vec3, float);" // GL_ARB_shader_texture_lod
+
+ "\n");
+ }
+ }
+ if ( profile == ECompatibilityProfile ||
+ (profile == ECoreProfile && version < 420) ||
+ profile == ENoProfile) {
+ if (spvVersion.spv == 0) {
+ s->append(
+ "vec4 texture1DLod(sampler1D, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjLod(sampler1D, vec2, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjLod(sampler1D, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DLod(sampler1DShadow, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DLod(sampler2DShadow, vec3, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DProjLod(sampler1DShadow, vec4, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DProjLod(sampler2DShadow, vec4, float);" // GL_ARB_shader_texture_lod
+
+ "vec4 texture1DGradARB(sampler1D, float, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjGradARB(sampler1D, vec2, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture1DProjGradARB(sampler1D, vec4, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DGradARB(sampler2D, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjGradARB(sampler2D, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DProjGradARB(sampler2D, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DGradARB(sampler3D, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 texture3DProjGradARB(sampler3D, vec4, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 textureCubeGradARB(samplerCube, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DGradARB(sampler1DShadow, vec3, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow1DProjGradARB( sampler1DShadow, vec4, float, float);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DGradARB(sampler2DShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DProjGradARB( sampler2DShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectGradARB(sampler2DRect, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectProjGradARB( sampler2DRect, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 texture2DRectProjGradARB( sampler2DRect, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DRectGradARB( sampler2DRectShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod
+ "vec4 shadow2DRectProjGradARB(sampler2DRectShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod
+
+ "\n");
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310)) {
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by geometry shaders only.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 400) {
+ stageBuiltins[EShLangGeometry].append(
+ "void EmitStreamVertex(int);"
+ "void EndStreamPrimitive(int);"
+ );
+ }
+ stageBuiltins[EShLangGeometry].append(
+ "void EmitVertex();"
+ "void EndPrimitive();"
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Prototypes for all control functions.
+ //
+ //============================================================================
+ bool esBarrier = (profile == EEsProfile && version >= 310);
+ if ((profile != EEsProfile && version >= 150) || esBarrier)
+ stageBuiltins[EShLangTessControl].append(
+ "void barrier();"
+ );
+ if ((profile != EEsProfile && version >= 420) || esBarrier)
+ stageBuiltins[EShLangCompute].append(
+ "void barrier();"
+ );
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void barrier();"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void barrier();"
+ );
+ }
+#endif
+ if ((profile != EEsProfile && version >= 130) || esBarrier)
+ commonBuiltins.append(
+ "void memoryBarrier();"
+ );
+ if ((profile != EEsProfile && version >= 420) || esBarrier) {
+ commonBuiltins.append(
+ "void memoryBarrierAtomicCounter();"
+ "void memoryBarrierBuffer();"
+ "void memoryBarrierImage();"
+ );
+ stageBuiltins[EShLangCompute].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ }
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ stageBuiltins[EShLangTaskNV].append(
+ "void memoryBarrierShared();"
+ "void groupMemoryBarrier();"
+ );
+ }
+#endif
+
+ commonBuiltins.append("void controlBarrier(int, int, int, int);\n"
+ "void memoryBarrier(int, int, int);\n");
+
+ if (profile != EEsProfile && version >= 450) {
+ // coopMatStoreNV perhaps ought to have "out" on the buf parameter, but
+ // adding it introduces undesirable tempArgs on the stack. What we want
+ // is more like "buf" thought of as a pointer value being an in parameter.
+ stageBuiltins[EShLangCompute].append(
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n"
+
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent float64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n"
+ "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n"
+
+ "fcoopmatNV coopMatMulAddNV(fcoopmatNV A, fcoopmatNV B, fcoopmatNV C);\n"
+ );
+ }
+
+ //============================================================================
+ //
+ // Prototypes for built-in functions seen by fragment shaders only.
+ //
+ //============================================================================
+
+ //
+ // Original-style texture Functions with bias.
+ //
+ if (spvVersion.spv == 0 && (profile != EEsProfile || version == 100)) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture2D(sampler2D, vec2, float);"
+ "vec4 texture2DProj(sampler2D, vec3, float);"
+ "vec4 texture2DProj(sampler2D, vec4, float);"
+ "vec4 texture3D(sampler3D, vec3, float);" // OES_texture_3D
+ "vec4 texture3DProj(sampler3D, vec4, float);" // OES_texture_3D
+ "vec4 textureCube(samplerCube, vec3, float);"
+
+ "\n");
+ }
+ if (spvVersion.spv == 0 && (profile != EEsProfile && version > 100)) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture1D(sampler1D, float, float);"
+ "vec4 texture1DProj(sampler1D, vec2, float);"
+ "vec4 texture1DProj(sampler1D, vec4, float);"
+ "vec4 shadow1D(sampler1DShadow, vec3, float);"
+ "vec4 shadow2D(sampler2DShadow, vec3, float);"
+ "vec4 shadow1DProj(sampler1DShadow, vec4, float);"
+ "vec4 shadow2DProj(sampler2DShadow, vec4, float);"
+
+ "\n");
+ }
+ if (spvVersion.spv == 0 && profile == EEsProfile) {
+ stageBuiltins[EShLangFragment].append(
+ "vec4 texture2DLodEXT(sampler2D, vec2, float);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjLodEXT(sampler2D, vec3, float);" // GL_EXT_shader_texture_lod
+ "vec4 texture2DProjLodEXT(sampler2D, vec4, float);" // GL_EXT_shader_texture_lod
+ "vec4 textureCubeLodEXT(samplerCube, vec3, float);" // GL_EXT_shader_texture_lod
+
+ "\n");
+ }
+
+ stageBuiltins[EShLangFragment].append(derivatives);
+ stageBuiltins[EShLangFragment].append("\n");
+
+ // GL_ARB_derivative_control
+ if (profile != EEsProfile && version >= 400) {
+ stageBuiltins[EShLangFragment].append(derivativeControls);
+ stageBuiltins[EShLangFragment].append("\n");
+ }
+
+ // GL_OES_shader_multisample_interpolation
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 400)) {
+ stageBuiltins[EShLangFragment].append(
+ "float interpolateAtCentroid(float);"
+ "vec2 interpolateAtCentroid(vec2);"
+ "vec3 interpolateAtCentroid(vec3);"
+ "vec4 interpolateAtCentroid(vec4);"
+
+ "float interpolateAtSample(float, int);"
+ "vec2 interpolateAtSample(vec2, int);"
+ "vec3 interpolateAtSample(vec3, int);"
+ "vec4 interpolateAtSample(vec4, int);"
+
+ "float interpolateAtOffset(float, vec2);"
+ "vec2 interpolateAtOffset(vec2, vec2);"
+ "vec3 interpolateAtOffset(vec3, vec2);"
+ "vec4 interpolateAtOffset(vec4, vec2);"
+
+ "\n");
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_shader_explicit_vertex_parameter
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(
+ "float interpolateAtVertexAMD(float, uint);"
+ "vec2 interpolateAtVertexAMD(vec2, uint);"
+ "vec3 interpolateAtVertexAMD(vec3, uint);"
+ "vec4 interpolateAtVertexAMD(vec4, uint);"
+
+ "int interpolateAtVertexAMD(int, uint);"
+ "ivec2 interpolateAtVertexAMD(ivec2, uint);"
+ "ivec3 interpolateAtVertexAMD(ivec3, uint);"
+ "ivec4 interpolateAtVertexAMD(ivec4, uint);"
+
+ "uint interpolateAtVertexAMD(uint, uint);"
+ "uvec2 interpolateAtVertexAMD(uvec2, uint);"
+ "uvec3 interpolateAtVertexAMD(uvec3, uint);"
+ "uvec4 interpolateAtVertexAMD(uvec4, uint);"
+
+ "float16_t interpolateAtVertexAMD(float16_t, uint);"
+ "f16vec2 interpolateAtVertexAMD(f16vec2, uint);"
+ "f16vec3 interpolateAtVertexAMD(f16vec3, uint);"
+ "f16vec4 interpolateAtVertexAMD(f16vec4, uint);"
+
+ "\n");
+ }
+
+ // GL_AMD_gpu_shader_half_float
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangFragment].append(derivativesAndControl16bits);
+ stageBuiltins[EShLangFragment].append("\n");
+
+ stageBuiltins[EShLangFragment].append(
+ "float16_t interpolateAtCentroid(float16_t);"
+ "f16vec2 interpolateAtCentroid(f16vec2);"
+ "f16vec3 interpolateAtCentroid(f16vec3);"
+ "f16vec4 interpolateAtCentroid(f16vec4);"
+
+ "float16_t interpolateAtSample(float16_t, int);"
+ "f16vec2 interpolateAtSample(f16vec2, int);"
+ "f16vec3 interpolateAtSample(f16vec3, int);"
+ "f16vec4 interpolateAtSample(f16vec4, int);"
+
+ "float16_t interpolateAtOffset(float16_t, f16vec2);"
+ "f16vec2 interpolateAtOffset(f16vec2, f16vec2);"
+ "f16vec3 interpolateAtOffset(f16vec3, f16vec2);"
+ "f16vec4 interpolateAtOffset(f16vec4, f16vec2);"
+
+ "\n");
+ }
+
+ // GL_AMD_shader_fragment_mask
+ if (profile != EEsProfile && version >= 450 && spvVersion.vulkan > 0) {
+ stageBuiltins[EShLangFragment].append(
+ "uint fragmentMaskFetchAMD(subpassInputMS);"
+ "uint fragmentMaskFetchAMD(isubpassInputMS);"
+ "uint fragmentMaskFetchAMD(usubpassInputMS);"
+
+ "vec4 fragmentFetchAMD(subpassInputMS, uint);"
+ "ivec4 fragmentFetchAMD(isubpassInputMS, uint);"
+ "uvec4 fragmentFetchAMD(usubpassInputMS, uint);"
+
+ "\n");
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+
+ // Builtins for GL_NV_ray_tracing
+ if (profile != EEsProfile && version >= 460) {
+ stageBuiltins[EShLangRayGenNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangIntersectNV].append(
+ "bool reportIntersectionNV(float, uint);"
+ "\n");
+ stageBuiltins[EShLangAnyHitNV].append(
+ "void ignoreIntersectionNV();"
+ "void terminateRayNV();"
+ "\n");
+ stageBuiltins[EShLangClosestHitNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangMissNV].append(
+ "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
+ "void executeCallableNV(uint, int);"
+ "\n");
+ stageBuiltins[EShLangCallableNV].append(
+ "void executeCallableNV(uint, int);"
+ "\n");
+ }
+
+ //E_SPV_NV_compute_shader_derivatives
+
+ stageBuiltins[EShLangCompute].append(derivatives);
+ stageBuiltins[EShLangCompute].append(derivativeControls);
+ stageBuiltins[EShLangCompute].append("\n");
+
+
+ if (profile != EEsProfile && version >= 450) {
+
+ stageBuiltins[EShLangCompute].append(derivativesAndControl16bits);
+ stageBuiltins[EShLangCompute].append(derivativesAndControl64bits);
+ stageBuiltins[EShLangCompute].append("\n");
+ }
+
+ // Builtins for GL_NV_mesh_shader
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ stageBuiltins[EShLangMeshNV].append(
+ "void writePackedPrimitiveIndices4x8NV(uint, uint);"
+ "\n");
+ }
+#endif
+
+ //============================================================================
+ //
+ // Standard Uniforms
+ //
+ //============================================================================
+
+ //
+ // Depth range in window coordinates, p. 33
+ //
+ if (spvVersion.spv == 0) {
+ commonBuiltins.append(
+ "struct gl_DepthRangeParameters {"
+ );
+ if (profile == EEsProfile) {
+ commonBuiltins.append(
+ "highp float near;" // n
+ "highp float far;" // f
+ "highp float diff;" // f - n
+ );
+ } else {
+ commonBuiltins.append(
+ "float near;" // n
+ "float far;" // f
+ "float diff;" // f - n
+ );
+ }
+
+ commonBuiltins.append(
+ "};"
+ "uniform gl_DepthRangeParameters gl_DepthRange;"
+ "\n");
+ }
+
+ if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) {
+ //
+ // Matrix state. p. 31, 32, 37, 39, 40.
+ //
+ commonBuiltins.append(
+ "uniform mat4 gl_ModelViewMatrix;"
+ "uniform mat4 gl_ProjectionMatrix;"
+ "uniform mat4 gl_ModelViewProjectionMatrix;"
+
+ //
+ // Derived matrix state that provides inverse and transposed versions
+ // of the matrices above.
+ //
+ "uniform mat3 gl_NormalMatrix;"
+
+ "uniform mat4 gl_ModelViewMatrixInverse;"
+ "uniform mat4 gl_ProjectionMatrixInverse;"
+ "uniform mat4 gl_ModelViewProjectionMatrixInverse;"
+
+ "uniform mat4 gl_ModelViewMatrixTranspose;"
+ "uniform mat4 gl_ProjectionMatrixTranspose;"
+ "uniform mat4 gl_ModelViewProjectionMatrixTranspose;"
+
+ "uniform mat4 gl_ModelViewMatrixInverseTranspose;"
+ "uniform mat4 gl_ProjectionMatrixInverseTranspose;"
+ "uniform mat4 gl_ModelViewProjectionMatrixInverseTranspose;"
+
+ //
+ // Normal scaling p. 39.
+ //
+ "uniform float gl_NormalScale;"
+
+ //
+ // Point Size, p. 66, 67.
+ //
+ "struct gl_PointParameters {"
+ "float size;"
+ "float sizeMin;"
+ "float sizeMax;"
+ "float fadeThresholdSize;"
+ "float distanceConstantAttenuation;"
+ "float distanceLinearAttenuation;"
+ "float distanceQuadraticAttenuation;"
+ "};"
+
+ "uniform gl_PointParameters gl_Point;"
+
+ //
+ // Material State p. 50, 55.
+ //
+ "struct gl_MaterialParameters {"
+ "vec4 emission;" // Ecm
+ "vec4 ambient;" // Acm
+ "vec4 diffuse;" // Dcm
+ "vec4 specular;" // Scm
+ "float shininess;" // Srm
+ "};"
+ "uniform gl_MaterialParameters gl_FrontMaterial;"
+ "uniform gl_MaterialParameters gl_BackMaterial;"
+
+ //
+ // Light State p 50, 53, 55.
+ //
+ "struct gl_LightSourceParameters {"
+ "vec4 ambient;" // Acli
+ "vec4 diffuse;" // Dcli
+ "vec4 specular;" // Scli
+ "vec4 position;" // Ppli
+ "vec4 halfVector;" // Derived: Hi
+ "vec3 spotDirection;" // Sdli
+ "float spotExponent;" // Srli
+ "float spotCutoff;" // Crli
+ // (range: [0.0,90.0], 180.0)
+ "float spotCosCutoff;" // Derived: cos(Crli)
+ // (range: [1.0,0.0],-1.0)
+ "float constantAttenuation;" // K0
+ "float linearAttenuation;" // K1
+ "float quadraticAttenuation;"// K2
+ "};"
+
+ "struct gl_LightModelParameters {"
+ "vec4 ambient;" // Acs
+ "};"
+
+ "uniform gl_LightModelParameters gl_LightModel;"
+
+ //
+ // Derived state from products of light and material.
+ //
+ "struct gl_LightModelProducts {"
+ "vec4 sceneColor;" // Derived. Ecm + Acm * Acs
+ "};"
+
+ "uniform gl_LightModelProducts gl_FrontLightModelProduct;"
+ "uniform gl_LightModelProducts gl_BackLightModelProduct;"
+
+ "struct gl_LightProducts {"
+ "vec4 ambient;" // Acm * Acli
+ "vec4 diffuse;" // Dcm * Dcli
+ "vec4 specular;" // Scm * Scli
+ "};"
+
+ //
+ // Fog p. 161
+ //
+ "struct gl_FogParameters {"
+ "vec4 color;"
+ "float density;"
+ "float start;"
+ "float end;"
+ "float scale;" // 1 / (gl_FogEnd - gl_FogStart)
+ "};"
+
+ "uniform gl_FogParameters gl_Fog;"
+
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the compute shader.
+ //
+ //============================================================================
+
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangCompute].append(
+ "in highp uvec3 gl_NumWorkGroups;"
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangCompute].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "\n");
+ }
+
+#ifdef NV_EXTENSIONS
+ //============================================================================
+ //
+ // Define the interface to the mesh/task shader.
+ //
+ //============================================================================
+
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ // per-vertex attributes
+ stageBuiltins[EShLangMeshNV].append(
+ "out gl_MeshPerVertexNV {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ "float gl_CullDistance[];"
+ "perviewNV vec4 gl_PositionPerViewNV[];"
+ "perviewNV float gl_ClipDistancePerViewNV[][];"
+ "perviewNV float gl_CullDistancePerViewNV[][];"
+ "} gl_MeshVerticesNV[];"
+ );
+
+ // per-primitive attributes
+ stageBuiltins[EShLangMeshNV].append(
+ "perprimitiveNV out gl_MeshPerPrimitiveNV {"
+ "int gl_PrimitiveID;"
+ "int gl_Layer;"
+ "int gl_ViewportIndex;"
+ "int gl_ViewportMask[];"
+ "perviewNV int gl_LayerPerViewNV[];"
+ "perviewNV int gl_ViewportMaskPerViewNV[][];"
+ "} gl_MeshPrimitivesNV[];"
+ );
+
+ stageBuiltins[EShLangMeshNV].append(
+ "out uint gl_PrimitiveCountNV;"
+ "out uint gl_PrimitiveIndicesNV[];"
+
+ "in uint gl_MeshViewCountNV;"
+ "in uint gl_MeshViewIndicesNV[4];"
+
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "out uint gl_TaskCountNV;"
+
+ "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);"
+
+ "in highp uvec3 gl_WorkGroupID;"
+ "in highp uvec3 gl_LocalInvocationID;"
+
+ "in highp uvec3 gl_GlobalInvocationID;"
+ "in highp uint gl_LocalInvocationIndex;"
+
+ "in uint gl_MeshViewCountNV;"
+ "in uint gl_MeshViewIndicesNV[4];"
+
+ "\n");
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ stageBuiltins[EShLangMeshNV].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters
+ "\n");
+
+ if (version >= 460) {
+ stageBuiltins[EShLangMeshNV].append(
+ "in int gl_DrawID;"
+ "\n");
+
+ stageBuiltins[EShLangTaskNV].append(
+ "in int gl_DrawID;"
+ "\n");
+ }
+ }
+#endif
+
+ //============================================================================
+ //
+ // Define the interface to the vertex shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile) {
+ if (version < 130) {
+ stageBuiltins[EShLangVertex].append(
+ "attribute vec4 gl_Color;"
+ "attribute vec4 gl_SecondaryColor;"
+ "attribute vec3 gl_Normal;"
+ "attribute vec4 gl_Vertex;"
+ "attribute vec4 gl_MultiTexCoord0;"
+ "attribute vec4 gl_MultiTexCoord1;"
+ "attribute vec4 gl_MultiTexCoord2;"
+ "attribute vec4 gl_MultiTexCoord3;"
+ "attribute vec4 gl_MultiTexCoord4;"
+ "attribute vec4 gl_MultiTexCoord5;"
+ "attribute vec4 gl_MultiTexCoord6;"
+ "attribute vec4 gl_MultiTexCoord7;"
+ "attribute float gl_FogCoord;"
+ "\n");
+ } else if (IncludeLegacy(version, profile, spvVersion)) {
+ stageBuiltins[EShLangVertex].append(
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ "in vec3 gl_Normal;"
+ "in vec4 gl_Vertex;"
+ "in vec4 gl_MultiTexCoord0;"
+ "in vec4 gl_MultiTexCoord1;"
+ "in vec4 gl_MultiTexCoord2;"
+ "in vec4 gl_MultiTexCoord3;"
+ "in vec4 gl_MultiTexCoord4;"
+ "in vec4 gl_MultiTexCoord5;"
+ "in vec4 gl_MultiTexCoord6;"
+ "in vec4 gl_MultiTexCoord7;"
+ "in float gl_FogCoord;"
+ "\n");
+ }
+
+ if (version < 150) {
+ if (version < 130) {
+ stageBuiltins[EShLangVertex].append(
+ " vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "varying vec4 gl_FrontColor;"
+ "varying vec4 gl_BackColor;"
+ "varying vec4 gl_FrontSecondaryColor;"
+ "varying vec4 gl_BackSecondaryColor;"
+ "varying vec4 gl_TexCoord[];"
+ "varying float gl_FogFragCoord;"
+ "\n");
+ } else if (IncludeLegacy(version, profile, spvVersion)) {
+ stageBuiltins[EShLangVertex].append(
+ " vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "out vec4 gl_FrontColor;"
+ "out vec4 gl_BackColor;"
+ "out vec4 gl_FrontSecondaryColor;"
+ "out vec4 gl_BackSecondaryColor;"
+ "out vec4 gl_TexCoord[];"
+ "out float gl_FogFragCoord;"
+ "\n");
+ }
+ stageBuiltins[EShLangVertex].append(
+ "vec4 gl_Position;" // needs qualifier fixed later
+ "float gl_PointSize;" // needs qualifier fixed later
+ );
+
+ if (version == 130 || version == 140)
+ stageBuiltins[EShLangVertex].append(
+ "out float gl_ClipDistance[];"
+ );
+ } else {
+ // version >= 150
+ stageBuiltins[EShLangVertex].append(
+ "out gl_PerVertex {"
+ "vec4 gl_Position;" // needs qualifier fixed later
+ "float gl_PointSize;" // needs qualifier fixed later
+ "float gl_ClipDistance[];"
+ );
+ if (IncludeLegacy(version, profile, spvVersion))
+ stageBuiltins[EShLangVertex].append(
+ "vec4 gl_ClipVertex;" // needs qualifier fixed later
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangVertex].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangVertex].append(
+ "};"
+ "\n");
+ }
+ if (version >= 130 && spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "int gl_VertexID;" // needs qualifier fixed later
+ );
+ if (version >= 140 && spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "int gl_InstanceID;" // needs qualifier fixed later
+ );
+ if (spvVersion.vulkan > 0 && version >= 140)
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_VertexIndex;"
+ "in int gl_InstanceIndex;"
+ );
+ if (version >= 440) {
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_BaseVertexARB;"
+ "in int gl_BaseInstanceARB;"
+ "in int gl_DrawIDARB;"
+ );
+ }
+ if (version >= 410) {
+ stageBuiltins[EShLangVertex].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ );
+ }
+ if (version >= 460) {
+ stageBuiltins[EShLangVertex].append(
+ "in int gl_BaseVertex;"
+ "in int gl_BaseInstance;"
+ "in int gl_DrawID;"
+ );
+ }
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangVertex].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ } else {
+ // ES profile
+ if (version == 100) {
+ stageBuiltins[EShLangVertex].append(
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "mediump float gl_PointSize;" // needs qualifier fixed later
+ );
+ } else {
+ if (spvVersion.vulkan == 0)
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_VertexID;" // needs qualifier fixed later
+ "in highp int gl_InstanceID;" // needs qualifier fixed later
+ );
+ if (spvVersion.vulkan > 0)
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_VertexIndex;"
+ "in highp int gl_InstanceIndex;"
+ );
+ if (version < 310)
+ stageBuiltins[EShLangVertex].append(
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "highp float gl_PointSize;" // needs qualifier fixed later
+ );
+ else
+ stageBuiltins[EShLangVertex].append(
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;" // needs qualifier fixed later
+ "highp float gl_PointSize;" // needs qualifier fixed later
+ "};"
+ );
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangVertex].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ stageBuiltins[EShLangVertex].append(
+ "in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
+ "\n");
+ }
+
+
+ //============================================================================
+ //
+ // Define the interface to the geometry shader.
+ //
+ //============================================================================
+
+ if (profile == ECoreProfile || profile == ECompatibilityProfile) {
+ stageBuiltins[EShLangGeometry].append(
+ "in gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ stageBuiltins[EShLangGeometry].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ stageBuiltins[EShLangGeometry].append(
+ "} gl_in[];"
+
+ "in int gl_PrimitiveIDIn;"
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ "\n");
+ if (profile == ECompatibilityProfile && version >= 400)
+ stageBuiltins[EShLangGeometry].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangGeometry].append(
+ "};"
+
+ "out int gl_PrimitiveID;"
+ "out int gl_Layer;");
+
+ if (version >= 150)
+ stageBuiltins[EShLangGeometry].append(
+ "out int gl_ViewportIndex;"
+ );
+
+ if (profile == ECompatibilityProfile && version < 400)
+ stageBuiltins[EShLangGeometry].append(
+ "out vec4 gl_ClipVertex;"
+ );
+
+ if (version >= 400)
+ stageBuiltins[EShLangGeometry].append(
+ "in int gl_InvocationID;"
+ );
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangGeometry].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ stageBuiltins[EShLangGeometry].append("\n");
+ } else if (profile == EEsProfile && version >= 310) {
+ stageBuiltins[EShLangGeometry].append(
+ "in gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ "} gl_in[];"
+ "\n"
+ "in highp int gl_PrimitiveIDIn;"
+ "in highp int gl_InvocationID;"
+ "\n"
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ "};"
+ "\n"
+ "out highp int gl_PrimitiveID;"
+ "out highp int gl_Layer;"
+ "\n"
+ );
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangGeometry].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the tessellation control shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 150) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessControl].append(
+ "in int gl_PatchVerticesIn;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InvocationID;"
+
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ stageBuiltins[EShLangTessControl].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangTessControl].append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ stageBuiltins[EShLangTessControl].append(
+ "} gl_out[];"
+
+ "patch out float gl_TessLevelOuter[4];"
+ "patch out float gl_TessLevelInner[2];"
+ "\n");
+
+ if (version >= 410)
+ stageBuiltins[EShLangTessControl].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ "\n");
+
+ } else {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessControl].append(
+ "in highp int gl_PatchVerticesIn;"
+ "in highp int gl_PrimitiveID;"
+ "in highp int gl_InvocationID;"
+
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ );
+ stageBuiltins[EShLangTessControl].append(
+ "} gl_out[];"
+
+ "patch out highp float gl_TessLevelOuter[4];"
+ "patch out highp float gl_TessLevelInner[2];"
+ "patch out highp vec4 gl_BoundingBoxOES[2];"
+ "patch out highp vec4 gl_BoundingBoxEXT[2];"
+ "\n");
+ if (profile == EEsProfile && version >= 320) {
+ stageBuiltins[EShLangTessControl].append(
+ "patch out highp vec4 gl_BoundingBox[2];"
+ "\n"
+ );
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangTessControl].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the tessellation evaluation shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile && version >= 150) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in int gl_PatchVerticesIn;"
+ "in int gl_PrimitiveID;"
+ "in vec3 gl_TessCoord;"
+
+ "patch in float gl_TessLevelOuter[4];"
+ "patch in float gl_TessLevelInner[2];"
+
+ "out gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (version >= 400 && profile == ECompatibilityProfile)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "float gl_CullDistance[];"
+ );
+ stageBuiltins[EShLangTessEvaluation].append(
+ "};"
+ "\n");
+
+ if (version >= 410)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "out int gl_ViewportIndex;"
+ "out int gl_Layer;"
+ "\n");
+
+#ifdef NV_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangTessEvaluation].append(
+ "out int gl_ViewportMask[];" // GL_NV_viewport_array2
+ "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering
+ "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+ );
+#endif
+
+ } else if (profile == EEsProfile && version >= 310) {
+ // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below,
+ // as it depends on the resource sizing of gl_MaxPatchVertices.
+
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in highp int gl_PatchVerticesIn;"
+ "in highp int gl_PrimitiveID;"
+ "in highp vec3 gl_TessCoord;"
+
+ "patch in highp float gl_TessLevelOuter[4];"
+ "patch in highp float gl_TessLevelInner[2];"
+
+ "out gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+ );
+ stageBuiltins[EShLangTessEvaluation].append(
+ "};"
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangTessEvaluation].append(
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ //============================================================================
+ //
+ // Define the interface to the fragment shader.
+ //
+ //============================================================================
+
+ if (profile != EEsProfile) {
+
+ stageBuiltins[EShLangFragment].append(
+ "vec4 gl_FragCoord;" // needs qualifier fixed later
+ "bool gl_FrontFacing;" // needs qualifier fixed later
+ "float gl_FragDepth;" // needs qualifier fixed later
+ );
+ if (version >= 120)
+ stageBuiltins[EShLangFragment].append(
+ "vec2 gl_PointCoord;" // needs qualifier fixed later
+ );
+ if (version >= 140)
+ stageBuiltins[EShLangFragment].append(
+ "out int gl_FragStencilRefARB;"
+ );
+ if (IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && version < 420))
+ stageBuiltins[EShLangFragment].append(
+ "vec4 gl_FragColor;" // needs qualifier fixed later
+ );
+
+ if (version < 130) {
+ stageBuiltins[EShLangFragment].append(
+ "varying vec4 gl_Color;"
+ "varying vec4 gl_SecondaryColor;"
+ "varying vec4 gl_TexCoord[];"
+ "varying float gl_FogFragCoord;"
+ );
+ } else {
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_ClipDistance[];"
+ );
+
+ if (IncludeLegacy(version, profile, spvVersion)) {
+ if (version < 150)
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_FogFragCoord;"
+ "in vec4 gl_TexCoord[];"
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ );
+ else
+ stageBuiltins[EShLangFragment].append(
+ "in gl_PerFragment {"
+ "in float gl_FogFragCoord;"
+ "in vec4 gl_TexCoord[];"
+ "in vec4 gl_Color;"
+ "in vec4 gl_SecondaryColor;"
+ "};"
+ );
+ }
+ }
+
+ if (version >= 150)
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_PrimitiveID;"
+ );
+
+ if (version >= 400) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_SampleID;"
+ " in vec2 gl_SamplePosition;"
+ "flat in int gl_SampleMaskIn[];"
+ " out int gl_SampleMask[];"
+ );
+ if (spvVersion.spv == 0)
+ stageBuiltins[EShLangFragment].append(
+ "uniform int gl_NumSamples;"
+ );
+ }
+
+ if (version >= 430)
+ stageBuiltins[EShLangFragment].append(
+ "flat in int gl_Layer;"
+ "flat in int gl_ViewportIndex;"
+ );
+
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "in float gl_CullDistance[];"
+ "bool gl_HelperInvocation;" // needs qualifier fixed later
+ );
+
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density
+ "flat in ivec2 gl_FragSizeEXT;"
+ "flat in int gl_FragInvocationCountEXT;"
+ );
+
+#ifdef AMD_EXTENSIONS
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "in vec2 gl_BaryCoordNoPerspAMD;"
+ "in vec2 gl_BaryCoordNoPerspCentroidAMD;"
+ "in vec2 gl_BaryCoordNoPerspSampleAMD;"
+ "in vec2 gl_BaryCoordSmoothAMD;"
+ "in vec2 gl_BaryCoordSmoothCentroidAMD;"
+ "in vec2 gl_BaryCoordSmoothSampleAMD;"
+ "in vec3 gl_BaryCoordPullModelAMD;"
+ );
+#endif
+
+#ifdef NV_EXTENSIONS
+ if (version >= 430)
+ stageBuiltins[EShLangFragment].append(
+ "in bool gl_FragFullyCoveredNV;"
+ );
+ if (version >= 450)
+ stageBuiltins[EShLangFragment].append(
+ "flat in ivec2 gl_FragmentSizeNV;" // GL_NV_shading_rate_image
+ "flat in int gl_InvocationsPerPixelNV;"
+ "in vec3 gl_BaryCoordNV;" // GL_NV_fragment_shader_barycentric
+ "in vec3 gl_BaryCoordNoPerspNV;"
+ );
+
+#endif
+ } else {
+ // ES profile
+
+ if (version == 100) {
+ stageBuiltins[EShLangFragment].append(
+ "mediump vec4 gl_FragCoord;" // needs qualifier fixed later
+ " bool gl_FrontFacing;" // needs qualifier fixed later
+ "mediump vec4 gl_FragColor;" // needs qualifier fixed later
+ "mediump vec2 gl_PointCoord;" // needs qualifier fixed later
+ );
+ }
+ if (version >= 300) {
+ stageBuiltins[EShLangFragment].append(
+ "highp vec4 gl_FragCoord;" // needs qualifier fixed later
+ " bool gl_FrontFacing;" // needs qualifier fixed later
+ "mediump vec2 gl_PointCoord;" // needs qualifier fixed later
+ "highp float gl_FragDepth;" // needs qualifier fixed later
+ );
+ }
+ if (version >= 310) {
+ stageBuiltins[EShLangFragment].append(
+ "bool gl_HelperInvocation;" // needs qualifier fixed later
+ "flat in highp int gl_PrimitiveID;" // needs qualifier fixed later
+ "flat in highp int gl_Layer;" // needs qualifier fixed later
+ );
+
+ stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables
+ "flat in lowp int gl_SampleID;"
+ " in mediump vec2 gl_SamplePosition;"
+ "flat in highp int gl_SampleMaskIn[];"
+ " out highp int gl_SampleMask[];"
+ );
+ if (spvVersion.spv == 0)
+ stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables
+ "uniform lowp int gl_NumSamples;"
+ );
+ }
+ stageBuiltins[EShLangFragment].append(
+ "highp float gl_FragDepthEXT;" // GL_EXT_frag_depth
+ );
+
+ if (version >= 310)
+ stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density
+ "flat in ivec2 gl_FragSizeEXT;"
+ "flat in int gl_FragInvocationCountEXT;"
+ );
+#ifdef NV_EXTENSIONS
+ if (version >= 320)
+ stageBuiltins[EShLangFragment].append( // GL_NV_shading_rate_image
+ "flat in ivec2 gl_FragmentSizeNV;"
+ "flat in int gl_InvocationsPerPixelNV;"
+ );
+ if (version >= 320)
+ stageBuiltins[EShLangFragment].append(
+ "in vec3 gl_BaryCoordNV;"
+ "in vec3 gl_BaryCoordNoPerspNV;"
+ );
+#endif
+
+ }
+ stageBuiltins[EShLangFragment].append("\n");
+
+ if (version >= 130)
+ add2ndGenerationSamplingImaging(version, profile, spvVersion);
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile && version >= 450) {
+ const char* ballotDecls =
+ "uniform uint gl_SubGroupSizeARB;"
+ "in uint gl_SubGroupInvocationARB;"
+ "in uint64_t gl_SubGroupEqMaskARB;"
+ "in uint64_t gl_SubGroupGeMaskARB;"
+ "in uint64_t gl_SubGroupGtMaskARB;"
+ "in uint64_t gl_SubGroupLeMaskARB;"
+ "in uint64_t gl_SubGroupLtMaskARB;"
+ "\n";
+ const char* fragmentBallotDecls =
+ "uniform uint gl_SubGroupSizeARB;"
+ "flat in uint gl_SubGroupInvocationARB;"
+ "flat in uint64_t gl_SubGroupEqMaskARB;"
+ "flat in uint64_t gl_SubGroupGeMaskARB;"
+ "flat in uint64_t gl_SubGroupGtMaskARB;"
+ "flat in uint64_t gl_SubGroupLeMaskARB;"
+ "flat in uint64_t gl_SubGroupLtMaskARB;"
+ "\n";
+ stageBuiltins[EShLangVertex] .append(ballotDecls);
+ stageBuiltins[EShLangTessControl] .append(ballotDecls);
+ stageBuiltins[EShLangTessEvaluation].append(ballotDecls);
+ stageBuiltins[EShLangGeometry] .append(ballotDecls);
+ stageBuiltins[EShLangCompute] .append(ballotDecls);
+ stageBuiltins[EShLangFragment] .append(fragmentBallotDecls);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV] .append(ballotDecls);
+ stageBuiltins[EShLangTaskNV] .append(ballotDecls);
+#endif
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "flat in highp int gl_ViewIndex;" // GL_EXT_multiview
+ "\n");
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ const char* ballotDecls =
+ "in mediump uint gl_SubgroupSize;"
+ "in mediump uint gl_SubgroupInvocationID;"
+ "in highp uvec4 gl_SubgroupEqMask;"
+ "in highp uvec4 gl_SubgroupGeMask;"
+ "in highp uvec4 gl_SubgroupGtMask;"
+ "in highp uvec4 gl_SubgroupLeMask;"
+ "in highp uvec4 gl_SubgroupLtMask;"
+ "\n";
+ const char* fragmentBallotDecls =
+ "flat in mediump uint gl_SubgroupSize;"
+ "flat in mediump uint gl_SubgroupInvocationID;"
+ "flat in highp uvec4 gl_SubgroupEqMask;"
+ "flat in highp uvec4 gl_SubgroupGeMask;"
+ "flat in highp uvec4 gl_SubgroupGtMask;"
+ "flat in highp uvec4 gl_SubgroupLeMask;"
+ "flat in highp uvec4 gl_SubgroupLtMask;"
+ "\n";
+ stageBuiltins[EShLangVertex] .append(ballotDecls);
+ stageBuiltins[EShLangTessControl] .append(ballotDecls);
+ stageBuiltins[EShLangTessEvaluation].append(ballotDecls);
+ stageBuiltins[EShLangGeometry] .append(ballotDecls);
+ stageBuiltins[EShLangCompute] .append(ballotDecls);
+ stageBuiltins[EShLangFragment] .append(fragmentBallotDecls);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV] .append(ballotDecls);
+ stageBuiltins[EShLangTaskNV] .append(ballotDecls);
+#endif
+
+ stageBuiltins[EShLangCompute].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangMeshNV].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+ stageBuiltins[EShLangTaskNV].append(
+ "highp in uint gl_NumSubgroups;"
+ "highp in uint gl_SubgroupID;"
+ "\n");
+#endif
+ }
+
+#ifdef NV_EXTENSIONS
+ // GL_NV_ray_tracing
+ if (profile != EEsProfile && version >= 460) {
+
+ const char *constRayFlags =
+ "const uint gl_RayFlagsNoneNV = 0U;"
+ "const uint gl_RayFlagsOpaqueNV = 1U;"
+ "const uint gl_RayFlagsNoOpaqueNV = 2U;"
+ "const uint gl_RayFlagsTerminateOnFirstHitNV = 4U;"
+ "const uint gl_RayFlagsSkipClosestHitShaderNV = 8U;"
+ "const uint gl_RayFlagsCullBackFacingTrianglesNV = 16U;"
+ "const uint gl_RayFlagsCullFrontFacingTrianglesNV = 32U;"
+ "const uint gl_RayFlagsCullOpaqueNV = 64U;"
+ "const uint gl_RayFlagsCullNoOpaqueNV = 128U;"
+ "\n";
+ const char *rayGenDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "\n";
+ const char *intersectDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InstanceID;"
+ "in int gl_InstanceCustomIndexNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in mat4x3 gl_ObjectToWorldNV;"
+ "in mat4x3 gl_WorldToObjectNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+ const char *hitDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in int gl_PrimitiveID;"
+ "in int gl_InstanceID;"
+ "in int gl_InstanceCustomIndexNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in float gl_HitTNV;"
+ "in uint gl_HitKindNV;"
+ "in mat4x3 gl_ObjectToWorldNV;"
+ "in mat4x3 gl_WorldToObjectNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+ const char *missDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "in vec3 gl_WorldRayOriginNV;"
+ "in vec3 gl_WorldRayDirectionNV;"
+ "in vec3 gl_ObjectRayOriginNV;"
+ "in vec3 gl_ObjectRayDirectionNV;"
+ "in float gl_RayTminNV;"
+ "in float gl_RayTmaxNV;"
+ "in uint gl_IncomingRayFlagsNV;"
+ "\n";
+
+ const char *callableDecls =
+ "in uvec3 gl_LaunchIDNV;"
+ "in uvec3 gl_LaunchSizeNV;"
+ "\n";
+
+ stageBuiltins[EShLangRayGenNV].append(rayGenDecls);
+ stageBuiltins[EShLangRayGenNV].append(constRayFlags);
+
+ stageBuiltins[EShLangIntersectNV].append(intersectDecls);
+ stageBuiltins[EShLangIntersectNV].append(constRayFlags);
+
+ stageBuiltins[EShLangAnyHitNV].append(hitDecls);
+ stageBuiltins[EShLangAnyHitNV].append(constRayFlags);
+
+ stageBuiltins[EShLangClosestHitNV].append(hitDecls);
+ stageBuiltins[EShLangClosestHitNV].append(constRayFlags);
+
+ stageBuiltins[EShLangMissNV].append(missDecls);
+ stageBuiltins[EShLangMissNV].append(constRayFlags);
+
+ stageBuiltins[EShLangCallableNV].append(callableDecls);
+ stageBuiltins[EShLangCallableNV].append(constRayFlags);
+
+ }
+ if ((profile != EEsProfile && version >= 140)) {
+ const char *deviceIndex =
+ "in highp int gl_DeviceIndex;" // GL_EXT_device_group
+ "\n";
+
+ stageBuiltins[EShLangRayGenNV].append(deviceIndex);
+ stageBuiltins[EShLangIntersectNV].append(deviceIndex);
+ stageBuiltins[EShLangAnyHitNV].append(deviceIndex);
+ stageBuiltins[EShLangClosestHitNV].append(deviceIndex);
+ stageBuiltins[EShLangMissNV].append(deviceIndex);
+ }
+#endif
+
+ if (version >= 300 /* both ES and non-ES */) {
+ stageBuiltins[EShLangFragment].append(
+ "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
+ "\n");
+ }
+
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append("const int gl_ScopeDevice = 1;\n");
+ commonBuiltins.append("const int gl_ScopeWorkgroup = 2;\n");
+ commonBuiltins.append("const int gl_ScopeSubgroup = 3;\n");
+ commonBuiltins.append("const int gl_ScopeInvocation = 4;\n");
+ commonBuiltins.append("const int gl_ScopeQueueFamily = 5;\n");
+
+ commonBuiltins.append("const int gl_SemanticsRelaxed = 0x0;\n");
+ commonBuiltins.append("const int gl_SemanticsAcquire = 0x2;\n");
+ commonBuiltins.append("const int gl_SemanticsRelease = 0x4;\n");
+ commonBuiltins.append("const int gl_SemanticsAcquireRelease = 0x8;\n");
+ commonBuiltins.append("const int gl_SemanticsMakeAvailable = 0x2000;\n");
+ commonBuiltins.append("const int gl_SemanticsMakeVisible = 0x4000;\n");
+
+ commonBuiltins.append("const int gl_StorageSemanticsNone = 0x0;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsBuffer = 0x40;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsShared = 0x100;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsImage = 0x800;\n");
+ commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n");
+ }
+
+ // printf("%s\n", commonBuiltins.c_str());
+ // printf("%s\n", stageBuiltins[EShLangFragment].c_str());
+}
+
+//
+// Helper function for initialize(), to add the second set of names for texturing,
+// when adding context-independent built-in functions.
+//
+void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion)
+{
+ //
+ // In this function proper, enumerate the types, then calls the next set of functions
+ // to enumerate all the uses for that type.
+ //
+#ifdef AMD_EXTENSIONS
+ TBasicType bTypes[4] = { EbtFloat, EbtFloat16, EbtInt, EbtUint };
+#else
+ TBasicType bTypes[3] = { EbtFloat, EbtInt, EbtUint };
+#endif
+ bool skipBuffer = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 140);
+ bool skipCubeArrayed = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 130);
+
+ // enumerate all the types
+ for (int image = 0; image <= 1; ++image) { // loop over "bool" image vs sampler
+
+ for (int shadow = 0; shadow <= 1; ++shadow) { // loop over "bool" shadow or not
+ for (int ms = 0; ms <=1; ++ms) {
+ if ((ms || image) && shadow)
+ continue;
+ if (ms && profile != EEsProfile && version < 150)
+ continue;
+ if (ms && image && profile == EEsProfile)
+ continue;
+ if (ms && profile == EEsProfile && version < 310)
+ continue;
+
+ for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not
+ for (int dim = Esd1D; dim < EsdNumDims; ++dim) { // 1D, 2D, ..., buffer
+ if (dim == EsdSubpass && spvVersion.vulkan == 0)
+ continue;
+ if (dim == EsdSubpass && (image || shadow || arrayed))
+ continue;
+ if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile)
+ continue;
+ if (dim != Esd2D && dim != EsdSubpass && ms)
+ continue;
+ if ((dim == Esd3D || dim == EsdRect) && arrayed)
+ continue;
+ if (dim == Esd3D && shadow)
+ continue;
+ if (dim == EsdCube && arrayed && skipCubeArrayed)
+ continue;
+ if (dim == EsdBuffer && skipBuffer)
+ continue;
+ if (dim == EsdBuffer && (shadow || arrayed || ms))
+ continue;
+ if (ms && arrayed && profile == EEsProfile && version < 310)
+ continue;
+#ifdef AMD_EXTENSIONS
+ for (int bType = 0; bType < 4; ++bType) { // float, float16, int, uint results
+
+ if (shadow && bType > 1)
+ continue;
+
+ if (bTypes[bType] == EbtFloat16 && (profile == EEsProfile ||version < 450))
+ continue;
+#else
+ for (int bType = 0; bType < 3; ++bType) { // float, int, uint results
+
+ if (shadow && bType > 0)
+ continue;
+#endif
+ if (dim == EsdRect && version < 140 && bType > 0)
+ continue;
+
+ //
+ // Now, make all the function prototypes for the type we just built...
+ //
+
+ TSampler sampler;
+ if (dim == EsdSubpass) {
+ sampler.setSubpass(bTypes[bType], ms ? true : false);
+ } else if (image) {
+ sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false,
+ shadow ? true : false,
+ ms ? true : false);
+ } else {
+ sampler.set(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false,
+ shadow ? true : false,
+ ms ? true : false);
+ }
+
+ TString typeName = sampler.getString();
+
+ if (dim == EsdSubpass) {
+ addSubpassSampling(sampler, typeName, version, profile);
+ continue;
+ }
+
+ addQueryFunctions(sampler, typeName, version, profile);
+
+ if (image)
+ addImageFunctions(sampler, typeName, version, profile);
+ else {
+ addSamplingFunctions(sampler, typeName, version, profile);
+ addGatherFunctions(sampler, typeName, version, profile);
+
+ if (spvVersion.vulkan > 0 && sampler.isCombined() && !sampler.shadow) {
+ // Base Vulkan allows texelFetch() for
+ // textureBuffer (i.e. without sampler).
+ //
+ // GL_EXT_samplerless_texture_functions
+ // allows texelFetch() and query functions
+ // (other than textureQueryLod()) for all
+ // texture types.
+ sampler.setTexture(sampler.type, sampler.dim, sampler.arrayed, sampler.shadow,
+ sampler.ms);
+ TString textureTypeName = sampler.getString();
+ addSamplingFunctions(sampler, textureTypeName, version, profile);
+ addQueryFunctions(sampler, textureTypeName, version, profile);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // sparseTexelsResidentARB()
+ //
+
+ if (profile != EEsProfile && version >= 450) {
+ commonBuiltins.append("bool sparseTexelsResidentARB(int code);\n");
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the query functions for the given type.
+//
+void TBuiltIns::addQueryFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ if (sampler.image && ((profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 430)))
+ return;
+
+ //
+ // textureSize() and imageSize()
+ //
+
+ int sizeDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0) - (sampler.dim == EsdCube ? 1 : 0);
+ if (profile == EEsProfile)
+ commonBuiltins.append("highp ");
+ if (sizeDims == 1)
+ commonBuiltins.append("int");
+ else {
+ commonBuiltins.append("ivec");
+ commonBuiltins.append(postfixes[sizeDims]);
+ }
+ if (sampler.image)
+ commonBuiltins.append(" imageSize(readonly writeonly volatile coherent ");
+ else
+ commonBuiltins.append(" textureSize(");
+ commonBuiltins.append(typeName);
+ if (! sampler.image && sampler.dim != EsdRect && sampler.dim != EsdBuffer && ! sampler.ms)
+ commonBuiltins.append(",int);\n");
+ else
+ commonBuiltins.append(");\n");
+
+ //
+ // textureSamples() and imageSamples()
+ //
+
+ // GL_ARB_shader_texture_image_samples
+ // TODO: spec issue? there are no memory qualifiers; how to query a writeonly/readonly image, etc?
+ if (profile != EEsProfile && version >= 430 && sampler.ms) {
+ commonBuiltins.append("int ");
+ if (sampler.image)
+ commonBuiltins.append("imageSamples(readonly writeonly volatile coherent ");
+ else
+ commonBuiltins.append("textureSamples(");
+ commonBuiltins.append(typeName);
+ commonBuiltins.append(");\n");
+ }
+
+ //
+ // textureQueryLod(), fragment stage only
+ //
+
+ if (profile != EEsProfile && version >= 400 && sampler.combined && sampler.dim != EsdRect && ! sampler.ms && sampler.dim != EsdBuffer) {
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr < 2; ++f16TexAddr) {
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+#endif
+ stageBuiltins[EShLangFragment].append("vec2 textureQueryLod(");
+ stageBuiltins[EShLangFragment].append(typeName);
+ if (dimMap[sampler.dim] == 1)
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ stageBuiltins[EShLangFragment].append(", float16_t");
+ else
+ stageBuiltins[EShLangFragment].append(", float");
+#else
+ stageBuiltins[EShLangFragment].append(", float");
+#endif
+ else {
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ stageBuiltins[EShLangFragment].append(", f16vec");
+ else
+ stageBuiltins[EShLangFragment].append(", vec");
+#else
+ stageBuiltins[EShLangFragment].append(", vec");
+#endif
+ stageBuiltins[EShLangFragment].append(postfixes[dimMap[sampler.dim]]);
+ }
+ stageBuiltins[EShLangFragment].append(");\n");
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangCompute].append("vec2 textureQueryLod(");
+ stageBuiltins[EShLangCompute].append(typeName);
+ if (dimMap[sampler.dim] == 1)
+ stageBuiltins[EShLangCompute].append(", float");
+ else {
+ stageBuiltins[EShLangCompute].append(", vec");
+ stageBuiltins[EShLangCompute].append(postfixes[dimMap[sampler.dim]]);
+ }
+ stageBuiltins[EShLangCompute].append(");\n");
+#endif
+ }
+
+ //
+ // textureQueryLevels()
+ //
+
+ if (profile != EEsProfile && version >= 430 && ! sampler.image && sampler.dim != EsdRect && ! sampler.ms && sampler.dim != EsdBuffer) {
+ commonBuiltins.append("int textureQueryLevels(");
+ commonBuiltins.append(typeName);
+ commonBuiltins.append(");\n");
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the image access functions for the given type.
+//
+void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ int dims = dimMap[sampler.dim];
+ // most things with an array add a dimension, except for cubemaps
+ if (sampler.arrayed && sampler.dim != EsdCube)
+ ++dims;
+
+ TString imageParams = typeName;
+ if (dims == 1)
+ imageParams.append(", int");
+ else {
+ imageParams.append(", ivec");
+ imageParams.append(postfixes[dims]);
+ }
+ if (sampler.ms)
+ imageParams.append(", int");
+
+ if (profile == EEsProfile)
+ commonBuiltins.append("highp ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4 imageLoad(readonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(");\n");
+
+ commonBuiltins.append("void imageStore(writeonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4);\n");
+
+ if (sampler.dim != Esd1D && sampler.dim != EsdBuffer && profile != EEsProfile && version >= 450) {
+ commonBuiltins.append("int sparseImageLoadARB(readonly volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", out ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4");
+ commonBuiltins.append(");\n");
+ }
+
+ if ( profile != EEsProfile ||
+ (profile == EEsProfile && version >= 310)) {
+ if (sampler.type == EbtInt || sampler.type == EbtUint) {
+ const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint";
+
+ const int numBuiltins = 7;
+
+ static const char* atomicFunc[numBuiltins] = {
+ " imageAtomicAdd(volatile coherent ",
+ " imageAtomicMin(volatile coherent ",
+ " imageAtomicMax(volatile coherent ",
+ " imageAtomicAnd(volatile coherent ",
+ " imageAtomicOr(volatile coherent ",
+ " imageAtomicXor(volatile coherent ",
+ " imageAtomicExchange(volatile coherent "
+ };
+
+ // Loop twice to add prototypes with/without scope/semantics
+ for (int j = 0; j < 2; ++j) {
+ for (size_t i = 0; i < numBuiltins; ++i) {
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(atomicFunc[i]);
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ if (j == 1) {
+ commonBuiltins.append(", int, int, int");
+ }
+ commonBuiltins.append(");\n");
+ }
+
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(" imageAtomicCompSwap(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ if (j == 1) {
+ commonBuiltins.append(", int, int, int, int, int");
+ }
+ commonBuiltins.append(");\n");
+ }
+
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(" imageAtomicLoad(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", int, int, int);\n");
+
+ commonBuiltins.append("void imageAtomicStore(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(dataType);
+ commonBuiltins.append(", int, int, int);\n");
+
+ } else {
+ // not int or uint
+ // GL_ARB_ES3_1_compatibility
+ // TODO: spec issue: are there restrictions on the kind of layout() that can be used? what about dropping memory qualifiers?
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ commonBuiltins.append("float imageAtomicExchange(volatile coherent ");
+ commonBuiltins.append(imageParams);
+ commonBuiltins.append(", float);\n");
+ }
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (sampler.dim == EsdRect || sampler.dim == EsdBuffer || sampler.shadow || sampler.ms)
+ return;
+
+ if (profile == EEsProfile || version < 450)
+ return;
+
+ TString imageLodParams = typeName;
+ if (dims == 1)
+ imageLodParams.append(", int");
+ else {
+ imageLodParams.append(", ivec");
+ imageLodParams.append(postfixes[dims]);
+ }
+ imageLodParams.append(", int");
+
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4 imageLoadLodAMD(readonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(");\n");
+
+ commonBuiltins.append("void imageStoreLodAMD(writeonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(", ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4);\n");
+
+ if (sampler.dim != Esd1D) {
+ commonBuiltins.append("int sparseImageLoadLodAMD(readonly volatile coherent ");
+ commonBuiltins.append(imageLodParams);
+ commonBuiltins.append(", out ");
+ commonBuiltins.append(prefixes[sampler.type]);
+ commonBuiltins.append("vec4");
+ commonBuiltins.append(");\n");
+ }
+#endif
+}
+
+//
+// Helper function for initialize(),
+// when adding context-independent built-in functions.
+//
+// Add all the subpass access functions for the given type.
+//
+void TBuiltIns::addSubpassSampling(TSampler sampler, const TString& typeName, int /*version*/, EProfile /*profile*/)
+{
+ stageBuiltins[EShLangFragment].append(prefixes[sampler.type]);
+ stageBuiltins[EShLangFragment].append("vec4 subpassLoad");
+ stageBuiltins[EShLangFragment].append("(");
+ stageBuiltins[EShLangFragment].append(typeName.c_str());
+ if (sampler.ms)
+ stageBuiltins[EShLangFragment].append(", int");
+ stageBuiltins[EShLangFragment].append(");\n");
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the texture lookup functions for the given type.
+//
+void TBuiltIns::addSamplingFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ //
+ // texturing
+ //
+ for (int proj = 0; proj <= 1; ++proj) { // loop over "bool" projective or not
+
+ if (proj && (sampler.dim == EsdCube || sampler.dim == EsdBuffer || sampler.arrayed || sampler.ms || !sampler.combined))
+ continue;
+
+ for (int lod = 0; lod <= 1; ++lod) {
+
+ if (lod && (sampler.dim == EsdBuffer || sampler.dim == EsdRect || sampler.ms || !sampler.combined))
+ continue;
+ if (lod && sampler.dim == Esd2D && sampler.arrayed && sampler.shadow)
+ continue;
+ if (lod && sampler.dim == EsdCube && sampler.shadow)
+ continue;
+
+ for (int bias = 0; bias <= 1; ++bias) {
+
+ if (bias && (lod || sampler.ms || !sampler.combined))
+ continue;
+ if (bias && (sampler.dim == Esd2D || sampler.dim == EsdCube) && sampler.shadow && sampler.arrayed)
+ continue;
+ if (bias && (sampler.dim == EsdRect || sampler.dim == EsdBuffer))
+ continue;
+
+ for (int offset = 0; offset <= 1; ++offset) { // loop over "bool" offset or not
+
+ if (proj + offset + bias + lod > 3)
+ continue;
+ if (offset && (sampler.dim == EsdCube || sampler.dim == EsdBuffer || sampler.ms))
+ continue;
+
+ for (int fetch = 0; fetch <= 1; ++fetch) { // loop over "bool" fetch or not
+
+ if (proj + offset + fetch + bias + lod > 3)
+ continue;
+ if (fetch && (lod || bias))
+ continue;
+ if (fetch && (sampler.shadow || sampler.dim == EsdCube))
+ continue;
+ if (fetch == 0 && (sampler.ms || sampler.dim == EsdBuffer || !sampler.combined))
+ continue;
+
+ for (int grad = 0; grad <= 1; ++grad) { // loop over "bool" grad or not
+
+ if (grad && (lod || bias || sampler.ms || !sampler.combined))
+ continue;
+ if (grad && sampler.dim == EsdBuffer)
+ continue;
+ if (proj + offset + fetch + grad + bias + lod > 3)
+ continue;
+
+ for (int extraProj = 0; extraProj <= 1; ++extraProj) {
+ bool compare = false;
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ // skip dummy unused second component for 1D non-array shadows
+ if (sampler.shadow && totalDims < 2)
+ totalDims = 2;
+ totalDims += (sampler.shadow ? 1 : 0) + proj;
+ if (totalDims > 4 && sampler.shadow) {
+ compare = true;
+ totalDims = 4;
+ }
+ assert(totalDims <= 4);
+
+ if (extraProj && ! proj)
+ continue;
+ if (extraProj && (sampler.dim == Esd3D || sampler.shadow || !sampler.combined))
+ continue;
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+ if (f16TexAddr && sampler.shadow && ! compare) {
+ compare = true; // compare argument is always present
+ totalDims--;
+ }
+#endif
+ for (int lodClamp = 0; lodClamp <= 1 ;++lodClamp) { // loop over "bool" lod clamp
+
+ if (lodClamp && (profile == EEsProfile || version < 450))
+ continue;
+ if (lodClamp && (proj || lod || fetch))
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+ // Sparse sampling is not for 1D/1D array texture, buffer texture, and projective texture
+ if (sparse && (sampler.dim == Esd1D || sampler.dim == EsdBuffer || proj))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ if (sampler.shadow)
+#ifdef AMD_EXTENSIONS
+ if (sampler.type == EbtFloat16)
+ s.append("float16_t ");
+ else
+ s.append("float ");
+#else
+ s.append("float ");
+#endif
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+ }
+
+ // name
+ if (sparse) {
+ if (fetch)
+ s.append("sparseTexel");
+ else
+ s.append("sparseTexture");
+ }
+ else {
+ if (fetch)
+ s.append("texel");
+ else
+ s.append("texture");
+ }
+ if (proj)
+ s.append("Proj");
+ if (lod)
+ s.append("Lod");
+ if (grad)
+ s.append("Grad");
+ if (fetch)
+ s.append("Fetch");
+ if (offset)
+ s.append("Offset");
+ if (lodClamp)
+ s.append("Clamp");
+ if (lodClamp || sparse)
+ s.append("ARB");
+ s.append("(");
+
+ // sampler type
+ s.append(typeName);
+#ifdef AMD_EXTENSIONS
+ // P coordinate
+ if (extraProj) {
+ if (f16TexAddr)
+ s.append(",f16vec4");
+ else
+ s.append(",vec4");
+ } else {
+ s.append(",");
+ TBasicType t = fetch ? EbtInt : (f16TexAddr ? EbtFloat16 : EbtFloat);
+ if (totalDims == 1)
+ s.append(TType::getBasicString(t));
+ else {
+ s.append(prefixes[t]);
+ s.append("vec");
+ s.append(postfixes[totalDims]);
+ }
+ }
+#else
+ // P coordinate
+ if (extraProj)
+ s.append(",vec4");
+ else {
+ s.append(",");
+ TBasicType t = fetch ? EbtInt : EbtFloat;
+ if (totalDims == 1)
+ s.append(TType::getBasicString(t));
+ else {
+ s.append(prefixes[t]);
+ s.append("vec");
+ s.append(postfixes[totalDims]);
+ }
+ }
+#endif
+ // non-optional compare
+ if (compare)
+ s.append(",float");
+
+ // non-optional lod argument (lod that's not driven by lod loop) or sample
+ if ((fetch && sampler.dim != EsdBuffer && sampler.dim != EsdRect && !sampler.ms) ||
+ (sampler.ms && fetch))
+ s.append(",int");
+#ifdef AMD_EXTENSIONS
+ // non-optional lod
+ if (lod) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ // gradient arguments
+ if (grad) {
+ if (dimMap[sampler.dim] == 1) {
+ if (f16TexAddr)
+ s.append(",float16_t,float16_t");
+ else
+ s.append(",float,float");
+ } else {
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+#else
+ // non-optional lod
+ if (lod)
+ s.append(",float");
+
+ // gradient arguments
+ if (grad) {
+ if (dimMap[sampler.dim] == 1)
+ s.append(",float,float");
+ else {
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ s.append(",vec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+#endif
+ // offset
+ if (offset) {
+ if (dimMap[sampler.dim] == 1)
+ s.append(",int");
+ else {
+ s.append(",ivec");
+ s.append(postfixes[dimMap[sampler.dim]]);
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ // lod clamp
+ if (lodClamp) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+#else
+ // lod clamp
+ if (lodClamp)
+ s.append(",float");
+#endif
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ if (sampler.shadow)
+#ifdef AMD_EXTENSIONS
+ if (sampler.type == EbtFloat16)
+ s.append("float16_t");
+ else
+ s.append("float");
+#else
+ s.append("float");
+#endif
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4");
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ // optional bias
+ if (bias) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+#else
+ // optional bias
+ if (bias)
+ s.append(",float");
+#endif
+ s.append(");\n");
+
+ // Add to the per-language set of built-ins
+ if (bias || lodClamp) {
+ stageBuiltins[EShLangFragment].append(s);
+#ifdef NV_EXTENSIONS
+ stageBuiltins[EShLangCompute].append(s);
+#endif
+ } else
+ commonBuiltins.append(s);
+
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+//
+// Helper function for add2ndGenerationSamplingImaging(),
+// when adding context-independent built-in functions.
+//
+// Add all the texture gather functions for the given type.
+//
+void TBuiltIns::addGatherFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile)
+{
+ switch (sampler.dim) {
+ case Esd2D:
+ case EsdRect:
+ case EsdCube:
+ break;
+ default:
+ return;
+ }
+
+ if (sampler.ms)
+ return;
+
+ if (version < 140 && sampler.dim == EsdRect && sampler.type != EbtFloat)
+ return;
+
+#ifdef AMD_EXTENSIONS
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+#endif
+ for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets
+
+ for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument
+
+ if (comp > 0 && sampler.shadow)
+ continue;
+
+ if (offset > 0 && sampler.dim == EsdCube)
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // name
+ if (sparse)
+ s.append("sparseTextureGather");
+ else
+ s.append("textureGather");
+ switch (offset) {
+ case 1:
+ s.append("Offset");
+ break;
+ case 2:
+ s.append("Offsets");
+ break;
+ default:
+ break;
+ }
+ if (sparse)
+ s.append("ARB");
+ s.append("(");
+
+ // sampler type argument
+ s.append(typeName);
+
+ // P coordinate argument
+#ifdef AMD_EXTENSIONS
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+#else
+ s.append(",vec");
+#endif
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ s.append(postfixes[totalDims]);
+
+ // refZ argument
+ if (sampler.shadow)
+ s.append(",float");
+
+ // offset argument
+ if (offset > 0) {
+ s.append(",ivec2");
+ if (offset == 2)
+ s.append("[4]");
+ }
+
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // comp argument
+ if (comp)
+ s.append(",int");
+
+ s.append(");\n");
+ commonBuiltins.append(s);
+#ifdef AMD_EXTENSIONS
+ }
+#endif
+ }
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (sampler.dim == EsdRect || sampler.shadow)
+ return;
+
+ if (profile == EEsProfile || version < 450)
+ return;
+
+ for (int bias = 0; bias < 2; ++bias) { // loop over presence of bias argument
+
+ for (int lod = 0; lod < 2; ++lod) { // loop over presence of lod argument
+
+ if ((lod && bias) || (lod == 0 && bias == 0))
+ continue;
+
+ for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing
+
+ if (f16TexAddr && sampler.type != EbtFloat16)
+ continue;
+
+ for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets
+
+ for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument
+
+ if (comp == 0 && bias)
+ continue;
+
+ if (offset > 0 && sampler.dim == EsdCube)
+ continue;
+
+ for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not
+ if (sparse && (profile == EEsProfile || version < 450))
+ continue;
+
+ TString s;
+
+ // return type
+ if (sparse)
+ s.append("int ");
+ else {
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // name
+ if (sparse)
+ s.append("sparseTextureGather");
+ else
+ s.append("textureGather");
+
+ if (lod)
+ s.append("Lod");
+
+ switch (offset) {
+ case 1:
+ s.append("Offset");
+ break;
+ case 2:
+ s.append("Offsets");
+ break;
+ default:
+ break;
+ }
+
+ if (lod)
+ s.append("AMD");
+ else if (sparse)
+ s.append("ARB");
+
+ s.append("(");
+
+ // sampler type argument
+ s.append(typeName);
+
+ // P coordinate argument
+ if (f16TexAddr)
+ s.append(",f16vec");
+ else
+ s.append(",vec");
+ int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0);
+ s.append(postfixes[totalDims]);
+
+ // lod argument
+ if (lod) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ // offset argument
+ if (offset > 0) {
+ s.append(",ivec2");
+ if (offset == 2)
+ s.append("[4]");
+ }
+
+ // texel out (for sparse texture)
+ if (sparse) {
+ s.append(",out ");
+ s.append(prefixes[sampler.type]);
+ s.append("vec4 ");
+ }
+
+ // comp argument
+ if (comp)
+ s.append(",int");
+
+ // bias argument
+ if (bias) {
+ if (f16TexAddr)
+ s.append(",float16_t");
+ else
+ s.append(",float");
+ }
+
+ s.append(");\n");
+ if (bias)
+ stageBuiltins[EShLangFragment].append(s);
+ else
+ commonBuiltins.append(s);
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
+}
+
+//
+// Add context-dependent built-in functions and variables that are present
+// for the given version and profile. All the results are put into just the
+// commonBuiltins, because it is called for just a specific stage. So,
+// add stage-specific entries to the commonBuiltins, and only if that stage
+// was requested.
+//
+void TBuiltIns::initialize(const TBuiltInResource &resources, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language)
+{
+ //
+ // Initialize the context-dependent (resource-dependent) built-in strings for parsing.
+ //
+
+ //============================================================================
+ //
+ // Standard Uniforms
+ //
+ //============================================================================
+
+ TString& s = commonBuiltins;
+ const int maxSize = 80;
+ char builtInConstant[maxSize];
+
+ //
+ // Build string of implementation dependent constants.
+ //
+
+ if (profile == EEsProfile) {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers);
+ s.append(builtInConstant);
+
+ if (version == 100) {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVaryingVectors = %d;", resources.maxVaryingVectors);
+ s.append(builtInConstant);
+ } else {
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexOutputVectors = %d;", resources.maxVertexOutputVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentInputVectors = %d;", resources.maxFragmentInputVectors);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset);
+ s.append(builtInConstant);
+ }
+
+ if (version >= 310) {
+ // geometry
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources.maxGeometryAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources.maxGeometryAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ // tessellation
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel);
+ s.append(builtInConstant);
+
+ // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ s.append(
+ "in gl_PerVertex {"
+ "highp vec4 gl_Position;"
+ "highp float gl_PointSize;"
+#ifdef NV_EXTENSIONS
+ "highp vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "highp vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ "} gl_in[gl_MaxPatchVertices];"
+ "\n");
+ }
+ }
+
+ } else {
+ // non-ES profile
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxLights = %d;", resources.maxLights);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxClipPlanes = %d;", resources.maxClipPlanes);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureUnits = %d;", resources.maxTextureUnits);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTextureCoords = %d;", resources.maxTextureCoords);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformComponents = %d;", resources.maxVertexUniformComponents);
+ s.append(builtInConstant);
+
+ if (version < 150 || ARBCompatibility) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingFloats = %d;", resources.maxVaryingFloats);
+ s.append(builtInConstant);
+ }
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformComponents = %d;", resources.maxFragmentUniformComponents);
+ s.append(builtInConstant);
+
+ if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) {
+ //
+ // OpenGL'uniform' state. Page numbers are in reference to version
+ // 1.4 of the OpenGL specification.
+ //
+
+ //
+ // Matrix state. p. 31, 32, 37, 39, 40.
+ //
+ s.append("uniform mat4 gl_TextureMatrix[gl_MaxTextureCoords];"
+
+ //
+ // Derived matrix state that provides inverse and transposed versions
+ // of the matrices above.
+ //
+ "uniform mat4 gl_TextureMatrixInverse[gl_MaxTextureCoords];"
+
+ "uniform mat4 gl_TextureMatrixTranspose[gl_MaxTextureCoords];"
+
+ "uniform mat4 gl_TextureMatrixInverseTranspose[gl_MaxTextureCoords];"
+
+ //
+ // Clip planes p. 42.
+ //
+ "uniform vec4 gl_ClipPlane[gl_MaxClipPlanes];"
+
+ //
+ // Light State p 50, 53, 55.
+ //
+ "uniform gl_LightSourceParameters gl_LightSource[gl_MaxLights];"
+
+ //
+ // Derived state from products of light.
+ //
+ "uniform gl_LightProducts gl_FrontLightProduct[gl_MaxLights];"
+ "uniform gl_LightProducts gl_BackLightProduct[gl_MaxLights];"
+
+ //
+ // Texture Environment and Generation, p. 152, p. 40-42.
+ //
+ "uniform vec4 gl_TextureEnvColor[gl_MaxTextureImageUnits];"
+ "uniform vec4 gl_EyePlaneS[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneT[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneR[gl_MaxTextureCoords];"
+ "uniform vec4 gl_EyePlaneQ[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneS[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneT[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneR[gl_MaxTextureCoords];"
+ "uniform vec4 gl_ObjectPlaneQ[gl_MaxTextureCoords];");
+ }
+
+ if (version >= 130) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxClipDistances = %d;", resources.maxClipDistances);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingComponents = %d;", resources.maxVaryingComponents);
+ s.append(builtInConstant);
+
+ // GL_ARB_shading_language_420pack
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset);
+ s.append(builtInConstant);
+ }
+
+ // geometry
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryVaryingComponents = %d;", resources.maxGeometryVaryingComponents);
+ s.append(builtInConstant);
+
+ }
+
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexOutputComponents = %d;", resources.maxVertexOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentInputComponents = %d;", resources.maxFragmentInputComponents);
+ s.append(builtInConstant);
+ }
+
+ // tessellation
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices);
+ s.append(builtInConstant);
+
+ // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ s.append(
+ "in gl_PerVertex {"
+ "vec4 gl_Position;"
+ "float gl_PointSize;"
+ "float gl_ClipDistance[];"
+ );
+ if (profile == ECompatibilityProfile)
+ s.append(
+ "vec4 gl_ClipVertex;"
+ "vec4 gl_FrontColor;"
+ "vec4 gl_BackColor;"
+ "vec4 gl_FrontSecondaryColor;"
+ "vec4 gl_BackSecondaryColor;"
+ "vec4 gl_TexCoord[];"
+ "float gl_FogFragCoord;"
+ );
+ if (profile != EEsProfile && version >= 450)
+ s.append(
+ "float gl_CullDistance[];"
+#ifdef NV_EXTENSIONS
+ "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering
+ "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes
+#endif
+ );
+ s.append(
+ "} gl_in[gl_MaxPatchVertices];"
+ "\n");
+ }
+ }
+
+ if (version >= 150) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxViewports = %d;", resources.maxViewports);
+ s.append(builtInConstant);
+ }
+
+ // images
+ if (version >= 130) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUnitsAndFragmentOutputs = %d;", resources.maxCombinedImageUnitsAndFragmentOutputs);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxImageSamples = %d;", resources.maxImageSamples);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms);
+ s.append(builtInConstant);
+ }
+
+ // enhanced layouts
+ if (version >= 430) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackBuffers = %d;", resources.maxTransformFeedbackBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackInterleavedComponents = %d;", resources.maxTransformFeedbackInterleavedComponents);
+ s.append(builtInConstant);
+ }
+ }
+
+ // images (some in compute below)
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 130)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxImageUnits = %d;", resources.maxImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedShaderOutputResources = %d;", resources.maxCombinedShaderOutputResources);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexImageUniforms = %d;", resources.maxVertexImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentImageUniforms = %d;", resources.maxFragmentImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUniforms = %d;", resources.maxCombinedImageUniforms);
+ s.append(builtInConstant);
+ }
+
+ // atomic counters (some in compute below)
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 420)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounters = %d;", resources. maxVertexAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounters = %d;", resources. maxFragmentAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounters = %d;", resources. maxCombinedAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBindings = %d;", resources. maxAtomicCounterBindings);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounterBuffers = %d;", resources. maxVertexAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounterBuffers = %d;", resources. maxFragmentAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounterBuffers = %d;", resources. maxCombinedAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBufferSize = %d;", resources. maxAtomicCounterBufferSize);
+ s.append(builtInConstant);
+ }
+ if (profile != EEsProfile && version >= 420) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources. maxTessControlAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources. maxTessEvaluationAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources. maxGeometryAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources. maxTessControlAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources. maxTessEvaluationAtomicCounterBuffers);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources. maxGeometryAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+
+ // compute
+ if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) {
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupCount = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupCountX,
+ resources.maxComputeWorkGroupCountY,
+ resources.maxComputeWorkGroupCountZ);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupSize = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupSizeX,
+ resources.maxComputeWorkGroupSizeY,
+ resources.maxComputeWorkGroupSizeZ);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeUniformComponents = %d;", resources.maxComputeUniformComponents);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeTextureImageUnits = %d;", resources.maxComputeTextureImageUnits);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeImageUniforms = %d;", resources.maxComputeImageUniforms);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounters = %d;", resources.maxComputeAtomicCounters);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounterBuffers = %d;", resources.maxComputeAtomicCounterBuffers);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+
+ // GL_ARB_cull_distance
+ if (profile != EEsProfile && version >= 450) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCullDistances = %d;", resources.maxCullDistances);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedClipAndCullDistances = %d;", resources.maxCombinedClipAndCullDistances);
+ s.append(builtInConstant);
+ }
+
+ // GL_ARB_ES3_1_compatibility
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxSamples = %d;", resources.maxSamples);
+ s.append(builtInConstant);
+ }
+
+#ifdef AMD_EXTENSIONS
+ // GL_AMD_gcn_shader
+ if (profile != EEsProfile && version >= 450) {
+ snprintf(builtInConstant, maxSize, "const int gl_SIMDGroupSizeAMD = 64;");
+ s.append(builtInConstant);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ // SPV_NV_mesh_shader
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputVerticesNV = %d;", resources.maxMeshOutputVerticesNV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputPrimitivesNV = %d;", resources.maxMeshOutputPrimitivesNV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxMeshWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxMeshWorkGroupSizeX_NV,
+ resources.maxMeshWorkGroupSizeY_NV,
+ resources.maxMeshWorkGroupSizeZ_NV);
+ s.append(builtInConstant);
+ snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxTaskWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxTaskWorkGroupSizeX_NV,
+ resources.maxTaskWorkGroupSizeY_NV,
+ resources.maxTaskWorkGroupSizeZ_NV);
+ s.append(builtInConstant);
+
+ snprintf(builtInConstant, maxSize, "const int gl_MaxMeshViewCountNV = %d;", resources.maxMeshViewCountNV);
+ s.append(builtInConstant);
+
+ s.append("\n");
+ }
+#endif
+
+ s.append("\n");
+}
+
+//
+// To support special built-ins that have a special qualifier that cannot be declared textually
+// in a shader, like gl_Position.
+//
+// This lets the type of the built-in be declared textually, and then have just its qualifier be
+// updated afterward.
+//
+// Safe to call even if name is not present.
+//
+// Only use this for built-in variables that have a special qualifier in TStorageQualifier.
+// New built-in variables should use a generic (textually declarable) qualifier in
+// TStoraregQualifier and only call BuiltInVariable().
+//
+static void SpecialQualifier(const char* name, TStorageQualifier qualifier, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol == nullptr)
+ return;
+
+ TQualifier& symQualifier = symbol->getWritableType().getQualifier();
+ symQualifier.storage = qualifier;
+ symQualifier.builtIn = builtIn;
+}
+
+//
+// To tag built-in variables with their TBuiltInVariable enum. Use this when the
+// normal declaration text already gets the qualifier right, and all that's needed
+// is setting the builtIn field. This should be the normal way for all new
+// built-in variables.
+//
+// If SpecialQualifier() was called, this does not need to be called.
+//
+// Safe to call even if name is not present.
+//
+static void BuiltInVariable(const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol == nullptr)
+ return;
+
+ TQualifier& symQualifier = symbol->getWritableType().getQualifier();
+ symQualifier.builtIn = builtIn;
+}
+
+//
+// For built-in variables inside a named block.
+// SpecialQualifier() won't ever go inside a block; their member's qualifier come
+// from the qualification of the block.
+//
+// See comments above for other detail.
+//
+static void BuiltInVariable(const char* blockName, const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable)
+{
+ TSymbol* symbol = symbolTable.find(blockName);
+ if (symbol == nullptr)
+ return;
+
+ TTypeList& structure = *symbol->getWritableType().getWritableStruct();
+ for (int i = 0; i < (int)structure.size(); ++i) {
+ if (structure[i].type->getFieldName().compare(name) == 0) {
+ structure[i].type->getQualifier().builtIn = builtIn;
+ return;
+ }
+ }
+}
+
+//
+// Finish adding/processing context-independent built-in symbols.
+// 1) Programmatically add symbols that could not be added by simple text strings above.
+// 2) Map built-in functions to operators, for those that will turn into an operation node
+// instead of remaining a function call.
+// 3) Tag extension-related symbols added to their base version with their extensions, so
+// that if an early version has the extension turned off, there is an error reported on use.
+//
+void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable)
+{
+ //
+ // Tag built-in variables and functions with additional qualifier and extension information
+ // that cannot be declared with the text strings.
+ //
+
+ // N.B.: a symbol should only be tagged once, and this function is called multiple times, once
+ // per stage that's used for this profile. So
+ // - generally, stick common ones in the fragment stage to ensure they are tagged exactly once
+ // - for ES, which has different precisions for different stages, the coarsest-grained tagging
+ // for a built-in used in many stages needs to be once for the fragment stage and once for
+ // the vertex stage
+
+ switch(language) {
+ case EShLangVertex:
+ if (profile != EEsProfile) {
+ if (version >= 440) {
+ symbolTable.setVariableExtensions("gl_BaseVertexARB", 1, &E_GL_ARB_shader_draw_parameters);
+ symbolTable.setVariableExtensions("gl_BaseInstanceARB", 1, &E_GL_ARB_shader_draw_parameters);
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_BaseVertexARB", EbvBaseVertex, symbolTable);
+ BuiltInVariable("gl_BaseInstanceARB", EbvBaseInstance, symbolTable);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ }
+ if (version >= 460) {
+ BuiltInVariable("gl_BaseVertex", EbvBaseVertex, symbolTable);
+ BuiltInVariable("gl_BaseInstance", EbvBaseInstance, symbolTable);
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ symbolTable.setFunctionExtensions("ballotARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setFunctionExtensions("readInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setFunctionExtensions("readFirstInvocationARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+
+ if (version >= 430) {
+ symbolTable.setFunctionExtensions("anyInvocationARB", 1, &E_GL_ARB_shader_group_vote);
+ symbolTable.setFunctionExtensions("allInvocationsARB", 1, &E_GL_ARB_shader_group_vote);
+ symbolTable.setFunctionExtensions("allInvocationsEqualARB", 1, &E_GL_ARB_shader_group_vote);
+ }
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("minInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("swizzleInvocationsAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("swizzleInvocationsWithPatternAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("writeInvocationAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("mbcntAMD", 1, &E_GL_AMD_shader_ballot);
+
+ symbolTable.setFunctionExtensions("minInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("minInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ symbolTable.setFunctionExtensions("addInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("min3", 1, &E_GL_AMD_shader_trinary_minmax);
+ symbolTable.setFunctionExtensions("max3", 1, &E_GL_AMD_shader_trinary_minmax);
+ symbolTable.setFunctionExtensions("mid3", 1, &E_GL_AMD_shader_trinary_minmax);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("cubeFaceIndexAMD", 1, &E_GL_AMD_gcn_shader);
+ symbolTable.setFunctionExtensions("cubeFaceCoordAMD", 1, &E_GL_AMD_gcn_shader);
+ symbolTable.setFunctionExtensions("timeAMD", 1, &E_GL_AMD_gcn_shader);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("fragmentMaskFetchAMD", 1, &E_GL_AMD_shader_fragment_mask);
+ symbolTable.setFunctionExtensions("fragmentFetchAMD", 1, &E_GL_AMD_shader_fragment_mask);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setFunctionExtensions("textureFootprintNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintClampNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintLodNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintGradNV", 1, &E_GL_NV_shader_texture_footprint);
+ symbolTable.setFunctionExtensions("textureFootprintGradClampNV", 1, &E_GL_NV_shader_texture_footprint);
+#endif
+ // Compatibility variables, vertex only
+ if (spvVersion.spv == 0) {
+ BuiltInVariable("gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+ BuiltInVariable("gl_Normal", EbvNormal, symbolTable);
+ BuiltInVariable("gl_Vertex", EbvVertex, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord0", EbvMultiTexCoord0, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord1", EbvMultiTexCoord1, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord2", EbvMultiTexCoord2, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord3", EbvMultiTexCoord3, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord4", EbvMultiTexCoord4, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord5", EbvMultiTexCoord5, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord6", EbvMultiTexCoord6, symbolTable);
+ BuiltInVariable("gl_MultiTexCoord7", EbvMultiTexCoord7, symbolTable);
+ BuiltInVariable("gl_FogCoord", EbvFogFragCoord, symbolTable);
+ }
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ if (version == 310)
+ symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+ if (version == 310)
+ symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic);
+ }
+
+ if (spvVersion.vulkan == 0) {
+ SpecialQualifier("gl_VertexID", EvqVertexId, EbvVertexId, symbolTable);
+ SpecialQualifier("gl_InstanceID", EvqInstanceId, EbvInstanceId, symbolTable);
+ }
+
+ if (spvVersion.vulkan > 0) {
+ BuiltInVariable("gl_VertexIndex", EbvVertexIndex, symbolTable);
+ BuiltInVariable("gl_InstanceIndex", EbvInstanceIndex, symbolTable);
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs);
+ BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable);
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers);
+ symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers);
+ }
+
+ // Fall through
+
+ case EShLangTessControl:
+ if (profile == EEsProfile && version >= 310) {
+ BuiltInVariable("gl_BoundingBoxEXT", EbvBoundingBox, symbolTable);
+ symbolTable.setVariableExtensions("gl_BoundingBoxEXT", 1,
+ &E_GL_EXT_primitive_bounding_box);
+ BuiltInVariable("gl_BoundingBoxOES", EbvBoundingBox, symbolTable);
+ symbolTable.setVariableExtensions("gl_BoundingBoxOES", 1,
+ &E_GL_OES_primitive_bounding_box);
+
+ if (version >= 320) {
+ BuiltInVariable("gl_BoundingBox", EbvBoundingBox, symbolTable);
+ }
+ }
+
+ // Fall through
+
+ case EShLangTessEvaluation:
+ case EShLangGeometry:
+ SpecialQualifier("gl_Position", EvqPosition, EbvPosition, symbolTable);
+ SpecialQualifier("gl_PointSize", EvqPointSize, EbvPointSize, symbolTable);
+ SpecialQualifier("gl_ClipVertex", EvqClipVertex, EbvClipVertex, symbolTable);
+
+ BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ BuiltInVariable("gl_out", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_out", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_out", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_out", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable);
+ BuiltInVariable("gl_PrimitiveIDIn", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_InvocationID", EbvInvocationId, symbolTable);
+ BuiltInVariable("gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable);
+
+#ifdef NV_EXTENSIONS
+ if (language != EShLangGeometry) {
+ symbolTable.setVariableExtensions("gl_Layer", Num_viewportEXTs, viewportEXTs);
+ symbolTable.setVariableExtensions("gl_ViewportIndex", Num_viewportEXTs, viewportEXTs);
+ }
+#else
+ if (language != EShLangGeometry && version >= 410) {
+ symbolTable.setVariableExtensions("gl_Layer", 1, &E_GL_ARB_shader_viewport_layer_array);
+ symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_shader_viewport_layer_array);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setVariableExtensions("gl_ViewportMask", 1, &E_GL_NV_viewport_array2);
+ symbolTable.setVariableExtensions("gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+ symbolTable.setVariableExtensions("gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+
+ if (language != EShLangVertex) {
+ symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ }
+ symbolTable.setVariableExtensions("gl_out", "gl_ViewportMask", 1, &E_GL_NV_viewport_array2);
+ symbolTable.setVariableExtensions("gl_out", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_out", "gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering);
+ symbolTable.setVariableExtensions("gl_out", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+ symbolTable.setVariableExtensions("gl_out", "gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes);
+
+ BuiltInVariable("gl_out", "gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_out", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+#endif
+
+ BuiltInVariable("gl_PatchVerticesIn", EbvPatchVertices, symbolTable);
+ BuiltInVariable("gl_TessLevelOuter", EbvTessLevelOuter, symbolTable);
+ BuiltInVariable("gl_TessLevelInner", EbvTessLevelInner, symbolTable);
+ BuiltInVariable("gl_TessCoord", EbvTessCoord, symbolTable);
+
+ if (version < 410)
+ symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_viewport_array);
+
+ // Compatibility variables
+
+ BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ BuiltInVariable("gl_out", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_out", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_out", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_out", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ BuiltInVariable("gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ // gl_PointSize, when it needs to be tied to an extension, is always a member of a block.
+ // (Sometimes with an instance name, sometimes anonymous).
+ if (profile == EEsProfile) {
+ if (language == EShLangGeometry) {
+ symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size);
+ symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size);
+ } else if (language == EShLangTessEvaluation || language == EShLangTessControl) {
+ // gl_in tessellation settings of gl_PointSize are in the context-dependent paths
+ symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ symbolTable.setVariableExtensions("gl_out", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+ }
+
+ break;
+
+ case EShLangFragment:
+ SpecialQualifier("gl_FrontFacing", EvqFace, EbvFace, symbolTable);
+ SpecialQualifier("gl_FragCoord", EvqFragCoord, EbvFragCoord, symbolTable);
+ SpecialQualifier("gl_PointCoord", EvqPointCoord, EbvPointCoord, symbolTable);
+ if (spvVersion.spv == 0)
+ SpecialQualifier("gl_FragColor", EvqFragColor, EbvFragColor, symbolTable);
+ else {
+ TSymbol* symbol = symbolTable.find("gl_FragColor");
+ if (symbol) {
+ symbol->getWritableType().getQualifier().storage = EvqVaryingOut;
+ symbol->getWritableType().getQualifier().layoutLocation = 0;
+ }
+ }
+ SpecialQualifier("gl_FragDepth", EvqFragDepth, EbvFragDepth, symbolTable);
+ SpecialQualifier("gl_FragDepthEXT", EvqFragDepth, EbvFragDepth, symbolTable);
+ SpecialQualifier("gl_HelperInvocation", EvqVaryingIn, EbvHelperInvocation, symbolTable);
+
+ BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+
+ if (profile != EEsProfile && version >= 140) {
+ symbolTable.setVariableExtensions("gl_FragStencilRefARB", 1, &E_GL_ARB_shader_stencil_export);
+ BuiltInVariable("gl_FragStencilRefARB", EbvFragStencilRef, symbolTable);
+ }
+
+ if ((profile != EEsProfile && version >= 400) ||
+ (profile == EEsProfile && version >= 310)) {
+ BuiltInVariable("gl_SampleID", EbvSampleId, symbolTable);
+ BuiltInVariable("gl_SamplePosition", EbvSamplePosition, symbolTable);
+ BuiltInVariable("gl_SampleMaskIn", EbvSampleMask, symbolTable);
+ BuiltInVariable("gl_SampleMask", EbvSampleMask, symbolTable);
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SampleMaskIn", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_OES_sample_variables);
+ symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_OES_sample_variables);
+ }
+ }
+
+ BuiltInVariable("gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable);
+
+ // Compatibility variables
+
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+
+ BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+ BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_Color", EbvColor, symbolTable);
+ BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable);
+
+ // built-in functions
+
+ if (profile == EEsProfile) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture2DLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeLodEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod);
+ if (version < 320)
+ symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ }
+ if (version == 100) {
+ symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_OES_standard_derivatives);
+ symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_OES_standard_derivatives);
+ symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_OES_standard_derivatives);
+ }
+ if (version == 310) {
+ symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5);
+ symbolTable.setFunctionExtensions("interpolateAtCentroid", 1, &E_GL_OES_shader_multisample_interpolation);
+ symbolTable.setFunctionExtensions("interpolateAtSample", 1, &E_GL_OES_shader_multisample_interpolation);
+ symbolTable.setFunctionExtensions("interpolateAtOffset", 1, &E_GL_OES_shader_multisample_interpolation);
+ }
+ } else if (version < 130) {
+ if (spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture1DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture1DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DProjLod", 1, &E_GL_ARB_shader_texture_lod);
+ }
+ }
+
+ // E_GL_ARB_shader_texture_lod functions usable only with the extension enabled
+ if (profile != EEsProfile && spvVersion.spv == 0) {
+ symbolTable.setFunctionExtensions("texture1DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture3DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("textureCubeGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("texture2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ symbolTable.setFunctionExtensions("shadow2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod);
+ }
+
+ // E_GL_ARB_shader_image_load_store
+ if (profile != EEsProfile && version < 420)
+ symbolTable.setFunctionExtensions("memoryBarrier", 1, &E_GL_ARB_shader_image_load_store);
+ // All the image access functions are protected by checks on the type of the first argument.
+
+ // E_GL_ARB_shader_atomic_counters
+ if (profile != EEsProfile && version < 420) {
+ symbolTable.setFunctionExtensions("atomicCounterIncrement", 1, &E_GL_ARB_shader_atomic_counters);
+ symbolTable.setFunctionExtensions("atomicCounterDecrement", 1, &E_GL_ARB_shader_atomic_counters);
+ symbolTable.setFunctionExtensions("atomicCounter" , 1, &E_GL_ARB_shader_atomic_counters);
+ }
+
+ // E_GL_ARB_derivative_control
+ if (profile != EEsProfile && version < 450) {
+ symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_ARB_derivative_control);
+ symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_ARB_derivative_control);
+ }
+
+ // E_GL_ARB_sparse_texture2
+ if (profile != EEsProfile)
+ {
+ symbolTable.setFunctionExtensions("sparseTextureARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureLodARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelFetchARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelFetchOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureLodOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGradARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGradOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherOffsetARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTextureGatherOffsetsARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseImageLoadARB", 1, &E_GL_ARB_sparse_texture2);
+ symbolTable.setFunctionExtensions("sparseTexelsResident", 1, &E_GL_ARB_sparse_texture2);
+ }
+
+ // E_GL_ARB_sparse_texture_clamp
+ if (profile != EEsProfile)
+ {
+ symbolTable.setFunctionExtensions("sparseTextureClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("sparseTextureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ symbolTable.setFunctionExtensions("textureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp);
+ }
+
+#ifdef AMD_EXTENSIONS
+ // E_GL_AMD_shader_explicit_vertex_parameter
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordSmoothSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+ symbolTable.setVariableExtensions("gl_BaryCoordPullModelAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+
+ symbolTable.setFunctionExtensions("interpolateAtVertexAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter);
+
+ BuiltInVariable("gl_BaryCoordNoPerspAMD", EbvBaryCoordNoPersp, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspCentroidAMD", EbvBaryCoordNoPerspCentroid, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspSampleAMD", EbvBaryCoordNoPerspSample, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothAMD", EbvBaryCoordSmooth, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothCentroidAMD", EbvBaryCoordSmoothCentroid, symbolTable);
+ BuiltInVariable("gl_BaryCoordSmoothSampleAMD", EbvBaryCoordSmoothSample, symbolTable);
+ BuiltInVariable("gl_BaryCoordPullModelAMD", EbvBaryCoordPullModel, symbolTable);
+ }
+
+ // E_GL_AMD_texture_gather_bias_lod
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("textureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("textureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("textureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod);
+ }
+
+ // E_GL_AMD_shader_image_load_store_lod
+ if (profile != EEsProfile) {
+ symbolTable.setFunctionExtensions("imageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ symbolTable.setFunctionExtensions("imageStoreLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ symbolTable.setFunctionExtensions("sparseImageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod);
+ }
+#endif
+
+#ifdef NV_EXTENSIONS
+ if (profile != EEsProfile && version >= 430) {
+ symbolTable.setVariableExtensions("gl_FragFullyCoveredNV", 1, &E_GL_NV_conservative_raster_underestimation);
+ BuiltInVariable("gl_FragFullyCoveredNV", EbvFragFullyCoveredNV, symbolTable);
+ }
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ symbolTable.setVariableExtensions("gl_FragmentSizeNV", 1, &E_GL_NV_shading_rate_image);
+ symbolTable.setVariableExtensions("gl_InvocationsPerPixelNV", 1, &E_GL_NV_shading_rate_image);
+ BuiltInVariable("gl_FragmentSizeNV", EbvFragmentSizeNV, symbolTable);
+ BuiltInVariable("gl_InvocationsPerPixelNV", EbvInvocationsPerPixelNV, symbolTable);
+ symbolTable.setVariableExtensions("gl_BaryCoordNV", 1, &E_GL_NV_fragment_shader_barycentric);
+ symbolTable.setVariableExtensions("gl_BaryCoordNoPerspNV", 1, &E_GL_NV_fragment_shader_barycentric);
+ BuiltInVariable("gl_BaryCoordNV", EbvBaryCoordNV, symbolTable);
+ BuiltInVariable("gl_BaryCoordNoPerspNV", EbvBaryCoordNoPerspNV, symbolTable);
+ }
+ if (((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) &&
+ language == EShLangCompute) {
+ symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives);
+ }
+#endif
+
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_FragSizeEXT", 1, &E_GL_EXT_fragment_invocation_density);
+ symbolTable.setVariableExtensions("gl_FragInvocationCountEXT", 1, &E_GL_EXT_fragment_invocation_density);
+ BuiltInVariable("gl_FragSizeEXT", EbvFragSizeEXT, symbolTable);
+ BuiltInVariable("gl_FragInvocationCountEXT", EbvFragInvocationCountEXT, symbolTable);
+ }
+
+ symbolTable.setVariableExtensions("gl_FragDepthEXT", 1, &E_GL_EXT_frag_depth);
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setVariableExtensions("gl_PrimitiveID", Num_AEP_geometry_shader, AEP_geometry_shader);
+ symbolTable.setVariableExtensions("gl_Layer", Num_AEP_geometry_shader, AEP_geometry_shader);
+ }
+
+ if (profile == EEsProfile && version < 320) {
+ symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic);
+ symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic);
+ }
+
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ if (version >= 300 /* both ES and non-ES */) {
+ symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs);
+ BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupBarrier", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrier", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierBuffer", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierImage", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupElect", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setFunctionExtensions("subgroupAll", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupAny", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupAllEqual", 1, &E_GL_KHR_shader_subgroup_vote);
+ symbolTable.setFunctionExtensions("subgroupBroadcast", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBroadcastFirst", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallot", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupInverseBallot", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotBitExtract", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotInclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotExclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotFindLSB", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupBallotFindMSB", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setFunctionExtensions("subgroupShuffle", 1, &E_GL_KHR_shader_subgroup_shuffle);
+ symbolTable.setFunctionExtensions("subgroupShuffleXor", 1, &E_GL_KHR_shader_subgroup_shuffle);
+ symbolTable.setFunctionExtensions("subgroupShuffleUp", 1, &E_GL_KHR_shader_subgroup_shuffle_relative);
+ symbolTable.setFunctionExtensions("subgroupShuffleDown", 1, &E_GL_KHR_shader_subgroup_shuffle_relative);
+ symbolTable.setFunctionExtensions("subgroupAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupInclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupExclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic);
+ symbolTable.setFunctionExtensions("subgroupClusteredAdd", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMul", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMin", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredMax", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredAnd", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredOr", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupClusteredXor", 1, &E_GL_KHR_shader_subgroup_clustered);
+ symbolTable.setFunctionExtensions("subgroupQuadBroadcast", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapHorizontal", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapVertical", 1, &E_GL_KHR_shader_subgroup_quad);
+ symbolTable.setFunctionExtensions("subgroupQuadSwapDiagonal", 1, &E_GL_KHR_shader_subgroup_quad);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.setFunctionExtensions("subgroupPartitionNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+ symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned);
+#endif
+
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers);
+ symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers);
+ }
+
+ if (spvVersion.vulkan > 0) {
+ symbolTable.setVariableExtensions("gl_ScopeDevice", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeWorkgroup", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeSubgroup", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_ScopeInvocation", 1, &E_GL_KHR_memory_scope_semantics);
+
+ symbolTable.setVariableExtensions("gl_SemanticsRelaxed", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsAcquire", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsRelease", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsAcquireRelease", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsMakeAvailable", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_SemanticsMakeVisible", 1, &E_GL_KHR_memory_scope_semantics);
+
+ symbolTable.setVariableExtensions("gl_StorageSemanticsNone", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsBuffer", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsShared", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsImage", 1, &E_GL_KHR_memory_scope_semantics);
+ symbolTable.setVariableExtensions("gl_StorageSemanticsOutput", 1, &E_GL_KHR_memory_scope_semantics);
+ }
+ break;
+
+ case EShLangCompute:
+ BuiltInVariable("gl_NumWorkGroups", EbvNumWorkGroups, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+
+ if (profile != EEsProfile && version < 430) {
+ symbolTable.setVariableExtensions("gl_NumWorkGroups", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_ARB_compute_shader);
+
+ symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupCount", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupSize", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeUniformComponents", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeTextureImageUnits", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeImageUniforms", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounters", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounterBuffers", 1, &E_GL_ARB_compute_shader);
+
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierAtomicCounter", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierBuffer", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierImage", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_ARB_compute_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_ARB_compute_shader);
+ }
+
+ symbolTable.setFunctionExtensions("controlBarrier", 1, &E_GL_KHR_memory_scope_semantics);
+
+ // GL_ARB_shader_ballot
+ if (profile != EEsProfile) {
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview);
+ BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+
+ symbolTable.setFunctionExtensions("coopMatLoadNV", 1, &E_GL_NV_cooperative_matrix);
+ symbolTable.setFunctionExtensions("coopMatStoreNV", 1, &E_GL_NV_cooperative_matrix);
+ symbolTable.setFunctionExtensions("coopMatMulAddNV", 1, &E_GL_NV_cooperative_matrix);
+
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.setVariableExtensions("gl_LaunchIDNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_LaunchSizeNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_PrimitiveID", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_InstanceID", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_InstanceCustomIndexNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldRayOriginNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldRayDirectionNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectRayOriginNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectRayDirectionNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_RayTminNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_RayTmaxNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_HitTNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_HitKindNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_ObjectToWorldNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_WorldToObjectNV", 1, &E_GL_NV_ray_tracing);
+ symbolTable.setVariableExtensions("gl_IncomingRayFlagsNV", 1, &E_GL_NV_ray_tracing);
+
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+
+ BuiltInVariable("gl_LaunchIDNV", EbvLaunchIdNV, symbolTable);
+ BuiltInVariable("gl_LaunchSizeNV", EbvLaunchSizeNV, symbolTable);
+ BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_InstanceID", EbvInstanceId, symbolTable);
+ BuiltInVariable("gl_InstanceCustomIndexNV", EbvInstanceCustomIndexNV,symbolTable);
+ BuiltInVariable("gl_WorldRayOriginNV", EbvWorldRayOriginNV, symbolTable);
+ BuiltInVariable("gl_WorldRayDirectionNV", EbvWorldRayDirectionNV, symbolTable);
+ BuiltInVariable("gl_ObjectRayOriginNV", EbvObjectRayOriginNV, symbolTable);
+ BuiltInVariable("gl_ObjectRayDirectionNV", EbvObjectRayDirectionNV, symbolTable);
+ BuiltInVariable("gl_RayTminNV", EbvRayTminNV, symbolTable);
+ BuiltInVariable("gl_RayTmaxNV", EbvRayTmaxNV, symbolTable);
+ BuiltInVariable("gl_HitTNV", EbvHitTNV, symbolTable);
+ BuiltInVariable("gl_HitKindNV", EbvHitKindNV, symbolTable);
+ BuiltInVariable("gl_ObjectToWorldNV", EbvObjectToWorldNV, symbolTable);
+ BuiltInVariable("gl_WorldToObjectNV", EbvWorldToObjectNV, symbolTable);
+ BuiltInVariable("gl_IncomingRayFlagsNV", EbvIncomingRayFlagsNV, symbolTable);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+ }
+ break;
+ case EShLangMeshNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ // per-vertex builtins
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_Position", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PointSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistance", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistance", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshVerticesNV", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PositionPerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshVerticesNV", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", EbvClipDistancePerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", EbvCullDistancePerViewNV, symbolTable);
+
+ // per-primitive builtins
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_PrimitiveID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_Layer", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportIndex", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMask", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_PrimitiveID", EbvPrimitiveId, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_Layer", EbvLayer, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportIndex", EbvViewportIndex, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMask", EbvViewportMaskNV, symbolTable);
+
+ // per-view per-primitive builtins
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", EbvLayerPerViewNV, symbolTable);
+ BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable);
+
+ // other builtins
+ symbolTable.setVariableExtensions("gl_PrimitiveCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_PrimitiveIndicesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_PrimitiveCountNV", EbvPrimitiveCountNV, symbolTable);
+ BuiltInVariable("gl_PrimitiveIndicesNV", EbvPrimitiveIndicesNV, symbolTable);
+ BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable);
+ BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+
+ // builtin constants
+ symbolTable.setVariableExtensions("gl_MaxMeshOutputVerticesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshOutputPrimitivesNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+
+ // builtin functions
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader);
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ // GL_EXT_device_group
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+
+ // GL_ARB_shader_draw_parameters
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ if (version >= 460) {
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+ break;
+
+ case EShLangTaskNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.setVariableExtensions("gl_TaskCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader);
+
+ BuiltInVariable("gl_TaskCountNV", EbvTaskCountNV, symbolTable);
+ BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable);
+ BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable);
+ BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable);
+ BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable);
+ BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable);
+ BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable);
+
+ symbolTable.setVariableExtensions("gl_MaxTaskWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader);
+
+ symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader);
+ symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader);
+ }
+
+ if (profile != EEsProfile && version >= 450) {
+ // GL_EXT_device_group
+ symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group);
+ BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable);
+
+ // GL_ARB_shader_draw_parameters
+ symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters);
+ BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable);
+ if (version >= 460) {
+ BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable);
+ }
+
+ // GL_ARB_shader_ballot
+ symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot);
+ symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot);
+
+ BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable);
+ BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable);
+ BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable);
+
+ if (spvVersion.vulkan > 0)
+ // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan
+ SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable);
+ else
+ BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable);
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic);
+ symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+ symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot);
+
+ BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable);
+ BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable);
+ BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable);
+ BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable);
+ BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable);
+ BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable);
+
+ symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic);
+ }
+ break;
+#endif
+
+ default:
+ assert(false && "Language not supported");
+ break;
+ }
+
+ //
+ // Next, identify which built-ins have a mapping to an operator.
+ // If PureOperatorBuiltins is false, those that are not identified as such are
+ // expected to be resolved through a library of functions, versus as
+ // operations.
+ //
+ symbolTable.relateToOperator("not", EOpVectorLogicalNot);
+
+ symbolTable.relateToOperator("matrixCompMult", EOpMul);
+ // 120 and 150 are correct for both ES and desktop
+ if (version >= 120) {
+ symbolTable.relateToOperator("outerProduct", EOpOuterProduct);
+ symbolTable.relateToOperator("transpose", EOpTranspose);
+ if (version >= 150) {
+ symbolTable.relateToOperator("determinant", EOpDeterminant);
+ symbolTable.relateToOperator("inverse", EOpMatrixInverse);
+ }
+ }
+
+ symbolTable.relateToOperator("mod", EOpMod);
+ symbolTable.relateToOperator("modf", EOpModf);
+
+ symbolTable.relateToOperator("equal", EOpVectorEqual);
+ symbolTable.relateToOperator("notEqual", EOpVectorNotEqual);
+ symbolTable.relateToOperator("lessThan", EOpLessThan);
+ symbolTable.relateToOperator("greaterThan", EOpGreaterThan);
+ symbolTable.relateToOperator("lessThanEqual", EOpLessThanEqual);
+ symbolTable.relateToOperator("greaterThanEqual", EOpGreaterThanEqual);
+
+ symbolTable.relateToOperator("radians", EOpRadians);
+ symbolTable.relateToOperator("degrees", EOpDegrees);
+ symbolTable.relateToOperator("sin", EOpSin);
+ symbolTable.relateToOperator("cos", EOpCos);
+ symbolTable.relateToOperator("tan", EOpTan);
+ symbolTable.relateToOperator("asin", EOpAsin);
+ symbolTable.relateToOperator("acos", EOpAcos);
+ symbolTable.relateToOperator("atan", EOpAtan);
+ symbolTable.relateToOperator("sinh", EOpSinh);
+ symbolTable.relateToOperator("cosh", EOpCosh);
+ symbolTable.relateToOperator("tanh", EOpTanh);
+ symbolTable.relateToOperator("asinh", EOpAsinh);
+ symbolTable.relateToOperator("acosh", EOpAcosh);
+ symbolTable.relateToOperator("atanh", EOpAtanh);
+
+ symbolTable.relateToOperator("pow", EOpPow);
+ symbolTable.relateToOperator("exp2", EOpExp2);
+ symbolTable.relateToOperator("log", EOpLog);
+ symbolTable.relateToOperator("exp", EOpExp);
+ symbolTable.relateToOperator("log2", EOpLog2);
+ symbolTable.relateToOperator("sqrt", EOpSqrt);
+ symbolTable.relateToOperator("inversesqrt", EOpInverseSqrt);
+
+ symbolTable.relateToOperator("abs", EOpAbs);
+ symbolTable.relateToOperator("sign", EOpSign);
+ symbolTable.relateToOperator("floor", EOpFloor);
+ symbolTable.relateToOperator("trunc", EOpTrunc);
+ symbolTable.relateToOperator("round", EOpRound);
+ symbolTable.relateToOperator("roundEven", EOpRoundEven);
+ symbolTable.relateToOperator("ceil", EOpCeil);
+ symbolTable.relateToOperator("fract", EOpFract);
+ symbolTable.relateToOperator("min", EOpMin);
+ symbolTable.relateToOperator("max", EOpMax);
+ symbolTable.relateToOperator("clamp", EOpClamp);
+ symbolTable.relateToOperator("mix", EOpMix);
+ symbolTable.relateToOperator("step", EOpStep);
+ symbolTable.relateToOperator("smoothstep", EOpSmoothStep);
+
+ symbolTable.relateToOperator("isnan", EOpIsNan);
+ symbolTable.relateToOperator("isinf", EOpIsInf);
+
+ symbolTable.relateToOperator("floatBitsToInt", EOpFloatBitsToInt);
+ symbolTable.relateToOperator("floatBitsToUint", EOpFloatBitsToUint);
+ symbolTable.relateToOperator("intBitsToFloat", EOpIntBitsToFloat);
+ symbolTable.relateToOperator("uintBitsToFloat", EOpUintBitsToFloat);
+ symbolTable.relateToOperator("doubleBitsToInt64", EOpDoubleBitsToInt64);
+ symbolTable.relateToOperator("doubleBitsToUint64", EOpDoubleBitsToUint64);
+ symbolTable.relateToOperator("int64BitsToDouble", EOpInt64BitsToDouble);
+ symbolTable.relateToOperator("uint64BitsToDouble", EOpUint64BitsToDouble);
+ symbolTable.relateToOperator("halfBitsToInt16", EOpFloat16BitsToInt16);
+ symbolTable.relateToOperator("halfBitsToUint16", EOpFloat16BitsToUint16);
+ symbolTable.relateToOperator("float16BitsToInt16", EOpFloat16BitsToInt16);
+ symbolTable.relateToOperator("float16BitsToUint16", EOpFloat16BitsToUint16);
+ symbolTable.relateToOperator("int16BitsToFloat16", EOpInt16BitsToFloat16);
+ symbolTable.relateToOperator("uint16BitsToFloat16", EOpUint16BitsToFloat16);
+
+ symbolTable.relateToOperator("int16BitsToHalf", EOpInt16BitsToFloat16);
+ symbolTable.relateToOperator("uint16BitsToHalf", EOpUint16BitsToFloat16);
+
+ symbolTable.relateToOperator("packSnorm2x16", EOpPackSnorm2x16);
+ symbolTable.relateToOperator("unpackSnorm2x16", EOpUnpackSnorm2x16);
+ symbolTable.relateToOperator("packUnorm2x16", EOpPackUnorm2x16);
+ symbolTable.relateToOperator("unpackUnorm2x16", EOpUnpackUnorm2x16);
+
+ symbolTable.relateToOperator("packSnorm4x8", EOpPackSnorm4x8);
+ symbolTable.relateToOperator("unpackSnorm4x8", EOpUnpackSnorm4x8);
+ symbolTable.relateToOperator("packUnorm4x8", EOpPackUnorm4x8);
+ symbolTable.relateToOperator("unpackUnorm4x8", EOpUnpackUnorm4x8);
+
+ symbolTable.relateToOperator("packDouble2x32", EOpPackDouble2x32);
+ symbolTable.relateToOperator("unpackDouble2x32", EOpUnpackDouble2x32);
+
+ symbolTable.relateToOperator("packHalf2x16", EOpPackHalf2x16);
+ symbolTable.relateToOperator("unpackHalf2x16", EOpUnpackHalf2x16);
+
+ symbolTable.relateToOperator("packInt2x32", EOpPackInt2x32);
+ symbolTable.relateToOperator("unpackInt2x32", EOpUnpackInt2x32);
+ symbolTable.relateToOperator("packUint2x32", EOpPackUint2x32);
+ symbolTable.relateToOperator("unpackUint2x32", EOpUnpackUint2x32);
+
+ symbolTable.relateToOperator("packInt2x16", EOpPackInt2x16);
+ symbolTable.relateToOperator("unpackInt2x16", EOpUnpackInt2x16);
+ symbolTable.relateToOperator("packUint2x16", EOpPackUint2x16);
+ symbolTable.relateToOperator("unpackUint2x16", EOpUnpackUint2x16);
+
+ symbolTable.relateToOperator("packInt4x16", EOpPackInt4x16);
+ symbolTable.relateToOperator("unpackInt4x16", EOpUnpackInt4x16);
+ symbolTable.relateToOperator("packUint4x16", EOpPackUint4x16);
+ symbolTable.relateToOperator("unpackUint4x16", EOpUnpackUint4x16);
+ symbolTable.relateToOperator("packFloat2x16", EOpPackFloat2x16);
+ symbolTable.relateToOperator("unpackFloat2x16", EOpUnpackFloat2x16);
+
+ symbolTable.relateToOperator("pack16", EOpPack16);
+ symbolTable.relateToOperator("pack32", EOpPack32);
+ symbolTable.relateToOperator("pack64", EOpPack64);
+
+ symbolTable.relateToOperator("unpack32", EOpUnpack32);
+ symbolTable.relateToOperator("unpack16", EOpUnpack16);
+ symbolTable.relateToOperator("unpack8", EOpUnpack8);
+
+ symbolTable.relateToOperator("length", EOpLength);
+ symbolTable.relateToOperator("distance", EOpDistance);
+ symbolTable.relateToOperator("dot", EOpDot);
+ symbolTable.relateToOperator("cross", EOpCross);
+ symbolTable.relateToOperator("normalize", EOpNormalize);
+ symbolTable.relateToOperator("faceforward", EOpFaceForward);
+ symbolTable.relateToOperator("reflect", EOpReflect);
+ symbolTable.relateToOperator("refract", EOpRefract);
+
+ symbolTable.relateToOperator("any", EOpAny);
+ symbolTable.relateToOperator("all", EOpAll);
+
+ symbolTable.relateToOperator("barrier", EOpBarrier);
+ symbolTable.relateToOperator("controlBarrier", EOpBarrier);
+ symbolTable.relateToOperator("memoryBarrier", EOpMemoryBarrier);
+ symbolTable.relateToOperator("memoryBarrierAtomicCounter", EOpMemoryBarrierAtomicCounter);
+ symbolTable.relateToOperator("memoryBarrierBuffer", EOpMemoryBarrierBuffer);
+ symbolTable.relateToOperator("memoryBarrierImage", EOpMemoryBarrierImage);
+
+ symbolTable.relateToOperator("atomicAdd", EOpAtomicAdd);
+ symbolTable.relateToOperator("atomicMin", EOpAtomicMin);
+ symbolTable.relateToOperator("atomicMax", EOpAtomicMax);
+ symbolTable.relateToOperator("atomicAnd", EOpAtomicAnd);
+ symbolTable.relateToOperator("atomicOr", EOpAtomicOr);
+ symbolTable.relateToOperator("atomicXor", EOpAtomicXor);
+ symbolTable.relateToOperator("atomicExchange", EOpAtomicExchange);
+ symbolTable.relateToOperator("atomicCompSwap", EOpAtomicCompSwap);
+ symbolTable.relateToOperator("atomicLoad", EOpAtomicLoad);
+ symbolTable.relateToOperator("atomicStore", EOpAtomicStore);
+
+ symbolTable.relateToOperator("atomicCounterIncrement", EOpAtomicCounterIncrement);
+ symbolTable.relateToOperator("atomicCounterDecrement", EOpAtomicCounterDecrement);
+ symbolTable.relateToOperator("atomicCounter", EOpAtomicCounter);
+
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("atomicCounterAdd", EOpAtomicCounterAdd);
+ symbolTable.relateToOperator("atomicCounterSubtract", EOpAtomicCounterSubtract);
+ symbolTable.relateToOperator("atomicCounterMin", EOpAtomicCounterMin);
+ symbolTable.relateToOperator("atomicCounterMax", EOpAtomicCounterMax);
+ symbolTable.relateToOperator("atomicCounterAnd", EOpAtomicCounterAnd);
+ symbolTable.relateToOperator("atomicCounterOr", EOpAtomicCounterOr);
+ symbolTable.relateToOperator("atomicCounterXor", EOpAtomicCounterXor);
+ symbolTable.relateToOperator("atomicCounterExchange", EOpAtomicCounterExchange);
+ symbolTable.relateToOperator("atomicCounterCompSwap", EOpAtomicCounterCompSwap);
+ }
+
+ symbolTable.relateToOperator("fma", EOpFma);
+ symbolTable.relateToOperator("frexp", EOpFrexp);
+ symbolTable.relateToOperator("ldexp", EOpLdexp);
+ symbolTable.relateToOperator("uaddCarry", EOpAddCarry);
+ symbolTable.relateToOperator("usubBorrow", EOpSubBorrow);
+ symbolTable.relateToOperator("umulExtended", EOpUMulExtended);
+ symbolTable.relateToOperator("imulExtended", EOpIMulExtended);
+ symbolTable.relateToOperator("bitfieldExtract", EOpBitfieldExtract);
+ symbolTable.relateToOperator("bitfieldInsert", EOpBitfieldInsert);
+ symbolTable.relateToOperator("bitfieldReverse", EOpBitFieldReverse);
+ symbolTable.relateToOperator("bitCount", EOpBitCount);
+ symbolTable.relateToOperator("findLSB", EOpFindLSB);
+ symbolTable.relateToOperator("findMSB", EOpFindMSB);
+
+ if (PureOperatorBuiltins) {
+ symbolTable.relateToOperator("imageSize", EOpImageQuerySize);
+ symbolTable.relateToOperator("imageSamples", EOpImageQuerySamples);
+ symbolTable.relateToOperator("imageLoad", EOpImageLoad);
+ symbolTable.relateToOperator("imageStore", EOpImageStore);
+ symbolTable.relateToOperator("imageAtomicAdd", EOpImageAtomicAdd);
+ symbolTable.relateToOperator("imageAtomicMin", EOpImageAtomicMin);
+ symbolTable.relateToOperator("imageAtomicMax", EOpImageAtomicMax);
+ symbolTable.relateToOperator("imageAtomicAnd", EOpImageAtomicAnd);
+ symbolTable.relateToOperator("imageAtomicOr", EOpImageAtomicOr);
+ symbolTable.relateToOperator("imageAtomicXor", EOpImageAtomicXor);
+ symbolTable.relateToOperator("imageAtomicExchange", EOpImageAtomicExchange);
+ symbolTable.relateToOperator("imageAtomicCompSwap", EOpImageAtomicCompSwap);
+ symbolTable.relateToOperator("imageAtomicLoad", EOpImageAtomicLoad);
+ symbolTable.relateToOperator("imageAtomicStore", EOpImageAtomicStore);
+
+ symbolTable.relateToOperator("subpassLoad", EOpSubpassLoad);
+ symbolTable.relateToOperator("subpassLoadMS", EOpSubpassLoadMS);
+
+ symbolTable.relateToOperator("textureSize", EOpTextureQuerySize);
+ symbolTable.relateToOperator("textureQueryLod", EOpTextureQueryLod);
+ symbolTable.relateToOperator("textureQueryLevels", EOpTextureQueryLevels);
+ symbolTable.relateToOperator("textureSamples", EOpTextureQuerySamples);
+ symbolTable.relateToOperator("texture", EOpTexture);
+ symbolTable.relateToOperator("textureProj", EOpTextureProj);
+ symbolTable.relateToOperator("textureLod", EOpTextureLod);
+ symbolTable.relateToOperator("textureOffset", EOpTextureOffset);
+ symbolTable.relateToOperator("texelFetch", EOpTextureFetch);
+ symbolTable.relateToOperator("texelFetchOffset", EOpTextureFetchOffset);
+ symbolTable.relateToOperator("textureProjOffset", EOpTextureProjOffset);
+ symbolTable.relateToOperator("textureLodOffset", EOpTextureLodOffset);
+ symbolTable.relateToOperator("textureProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("textureProjLodOffset", EOpTextureProjLodOffset);
+ symbolTable.relateToOperator("textureGrad", EOpTextureGrad);
+ symbolTable.relateToOperator("textureGradOffset", EOpTextureGradOffset);
+ symbolTable.relateToOperator("textureProjGrad", EOpTextureProjGrad);
+ symbolTable.relateToOperator("textureProjGradOffset", EOpTextureProjGradOffset);
+ symbolTable.relateToOperator("textureGather", EOpTextureGather);
+ symbolTable.relateToOperator("textureGatherOffset", EOpTextureGatherOffset);
+ symbolTable.relateToOperator("textureGatherOffsets", EOpTextureGatherOffsets);
+
+ symbolTable.relateToOperator("noise1", EOpNoise);
+ symbolTable.relateToOperator("noise2", EOpNoise);
+ symbolTable.relateToOperator("noise3", EOpNoise);
+ symbolTable.relateToOperator("noise4", EOpNoise);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.relateToOperator("textureFootprintNV", EOpImageSampleFootprintNV);
+ symbolTable.relateToOperator("textureFootprintClampNV", EOpImageSampleFootprintClampNV);
+ symbolTable.relateToOperator("textureFootprintLodNV", EOpImageSampleFootprintLodNV);
+ symbolTable.relateToOperator("textureFootprintGradNV", EOpImageSampleFootprintGradNV);
+ symbolTable.relateToOperator("textureFootprintGradClampNV", EOpImageSampleFootprintGradClampNV);
+#endif
+
+ if (spvVersion.spv == 0 && (IncludeLegacy(version, profile, spvVersion) ||
+ (profile == EEsProfile && version == 100))) {
+ symbolTable.relateToOperator("ftransform", EOpFtransform);
+
+ symbolTable.relateToOperator("texture1D", EOpTexture);
+ symbolTable.relateToOperator("texture1DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture1DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture1DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture1DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture1DProjLod", EOpTextureProjLod);
+
+ symbolTable.relateToOperator("texture2DRect", EOpTexture);
+ symbolTable.relateToOperator("texture2DRectProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture2DRectGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DRectProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow2DRect", EOpTexture);
+ symbolTable.relateToOperator("shadow2DRectProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow2DRectGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow2DRectProjGradARB", EOpTextureProjGrad);
+
+ symbolTable.relateToOperator("texture2D", EOpTexture);
+ symbolTable.relateToOperator("texture2DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture2DGradEXT", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture2DProjGradEXT", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture2DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture2DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture2DLodEXT", EOpTextureLod);
+ symbolTable.relateToOperator("texture2DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("texture2DProjLodEXT", EOpTextureProjLod);
+
+ symbolTable.relateToOperator("texture3D", EOpTexture);
+ symbolTable.relateToOperator("texture3DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("texture3DProj", EOpTextureProj);
+ symbolTable.relateToOperator("texture3DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("texture3DLod", EOpTextureLod);
+ symbolTable.relateToOperator("texture3DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("textureCube", EOpTexture);
+ symbolTable.relateToOperator("textureCubeGradEXT", EOpTextureGrad);
+ symbolTable.relateToOperator("textureCubeGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("textureCubeLod", EOpTextureLod);
+ symbolTable.relateToOperator("textureCubeLodEXT", EOpTextureLod);
+ symbolTable.relateToOperator("shadow1D", EOpTexture);
+ symbolTable.relateToOperator("shadow1DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow2D", EOpTexture);
+ symbolTable.relateToOperator("shadow2DGradARB", EOpTextureGrad);
+ symbolTable.relateToOperator("shadow1DProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow2DProj", EOpTextureProj);
+ symbolTable.relateToOperator("shadow1DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow2DProjGradARB", EOpTextureProjGrad);
+ symbolTable.relateToOperator("shadow1DLod", EOpTextureLod);
+ symbolTable.relateToOperator("shadow2DLod", EOpTextureLod);
+ symbolTable.relateToOperator("shadow1DProjLod", EOpTextureProjLod);
+ symbolTable.relateToOperator("shadow2DProjLod", EOpTextureProjLod);
+ }
+
+ if (profile != EEsProfile) {
+ symbolTable.relateToOperator("sparseTextureARB", EOpSparseTexture);
+ symbolTable.relateToOperator("sparseTextureLodARB", EOpSparseTextureLod);
+ symbolTable.relateToOperator("sparseTextureOffsetARB", EOpSparseTextureOffset);
+ symbolTable.relateToOperator("sparseTexelFetchARB", EOpSparseTextureFetch);
+ symbolTable.relateToOperator("sparseTexelFetchOffsetARB", EOpSparseTextureFetchOffset);
+ symbolTable.relateToOperator("sparseTextureLodOffsetARB", EOpSparseTextureLodOffset);
+ symbolTable.relateToOperator("sparseTextureGradARB", EOpSparseTextureGrad);
+ symbolTable.relateToOperator("sparseTextureGradOffsetARB", EOpSparseTextureGradOffset);
+ symbolTable.relateToOperator("sparseTextureGatherARB", EOpSparseTextureGather);
+ symbolTable.relateToOperator("sparseTextureGatherOffsetARB", EOpSparseTextureGatherOffset);
+ symbolTable.relateToOperator("sparseTextureGatherOffsetsARB", EOpSparseTextureGatherOffsets);
+ symbolTable.relateToOperator("sparseImageLoadARB", EOpSparseImageLoad);
+ symbolTable.relateToOperator("sparseTexelsResidentARB", EOpSparseTexelsResident);
+
+ symbolTable.relateToOperator("sparseTextureClampARB", EOpSparseTextureClamp);
+ symbolTable.relateToOperator("sparseTextureOffsetClampARB", EOpSparseTextureOffsetClamp);
+ symbolTable.relateToOperator("sparseTextureGradClampARB", EOpSparseTextureGradClamp);
+ symbolTable.relateToOperator("sparseTextureGradOffsetClampARB", EOpSparseTextureGradOffsetClamp);
+ symbolTable.relateToOperator("textureClampARB", EOpTextureClamp);
+ symbolTable.relateToOperator("textureOffsetClampARB", EOpTextureOffsetClamp);
+ symbolTable.relateToOperator("textureGradClampARB", EOpTextureGradClamp);
+ symbolTable.relateToOperator("textureGradOffsetClampARB", EOpTextureGradOffsetClamp);
+
+ symbolTable.relateToOperator("ballotARB", EOpBallot);
+ symbolTable.relateToOperator("readInvocationARB", EOpReadInvocation);
+ symbolTable.relateToOperator("readFirstInvocationARB", EOpReadFirstInvocation);
+
+ if (version >= 430) {
+ symbolTable.relateToOperator("anyInvocationARB", EOpAnyInvocation);
+ symbolTable.relateToOperator("allInvocationsARB", EOpAllInvocations);
+ symbolTable.relateToOperator("allInvocationsEqualARB", EOpAllInvocationsEqual);
+ }
+ if (version >= 460) {
+ symbolTable.relateToOperator("anyInvocation", EOpAnyInvocation);
+ symbolTable.relateToOperator("allInvocations", EOpAllInvocations);
+ symbolTable.relateToOperator("allInvocationsEqual", EOpAllInvocationsEqual);
+ }
+#ifdef AMD_EXTENSIONS
+ symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations);
+ symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations);
+ symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations);
+ symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform);
+ symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform);
+ symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform);
+ symbolTable.relateToOperator("minInvocationsInclusiveScanAMD", EOpMinInvocationsInclusiveScan);
+ symbolTable.relateToOperator("maxInvocationsInclusiveScanAMD", EOpMaxInvocationsInclusiveScan);
+ symbolTable.relateToOperator("addInvocationsInclusiveScanAMD", EOpAddInvocationsInclusiveScan);
+ symbolTable.relateToOperator("minInvocationsInclusiveScanNonUniformAMD", EOpMinInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("maxInvocationsInclusiveScanNonUniformAMD", EOpMaxInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("addInvocationsInclusiveScanNonUniformAMD", EOpAddInvocationsInclusiveScanNonUniform);
+ symbolTable.relateToOperator("minInvocationsExclusiveScanAMD", EOpMinInvocationsExclusiveScan);
+ symbolTable.relateToOperator("maxInvocationsExclusiveScanAMD", EOpMaxInvocationsExclusiveScan);
+ symbolTable.relateToOperator("addInvocationsExclusiveScanAMD", EOpAddInvocationsExclusiveScan);
+ symbolTable.relateToOperator("minInvocationsExclusiveScanNonUniformAMD", EOpMinInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("maxInvocationsExclusiveScanNonUniformAMD", EOpMaxInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("addInvocationsExclusiveScanNonUniformAMD", EOpAddInvocationsExclusiveScanNonUniform);
+ symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations);
+ symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked);
+ symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation);
+ symbolTable.relateToOperator("mbcntAMD", EOpMbcnt);
+
+ symbolTable.relateToOperator("min3", EOpMin3);
+ symbolTable.relateToOperator("max3", EOpMax3);
+ symbolTable.relateToOperator("mid3", EOpMid3);
+
+ symbolTable.relateToOperator("cubeFaceIndexAMD", EOpCubeFaceIndex);
+ symbolTable.relateToOperator("cubeFaceCoordAMD", EOpCubeFaceCoord);
+ symbolTable.relateToOperator("timeAMD", EOpTime);
+
+ symbolTable.relateToOperator("textureGatherLodAMD", EOpTextureGatherLod);
+ symbolTable.relateToOperator("textureGatherLodOffsetAMD", EOpTextureGatherLodOffset);
+ symbolTable.relateToOperator("textureGatherLodOffsetsAMD", EOpTextureGatherLodOffsets);
+ symbolTable.relateToOperator("sparseTextureGatherLodAMD", EOpSparseTextureGatherLod);
+ symbolTable.relateToOperator("sparseTextureGatherLodOffsetAMD", EOpSparseTextureGatherLodOffset);
+ symbolTable.relateToOperator("sparseTextureGatherLodOffsetsAMD", EOpSparseTextureGatherLodOffsets);
+
+ symbolTable.relateToOperator("imageLoadLodAMD", EOpImageLoadLod);
+ symbolTable.relateToOperator("imageStoreLodAMD", EOpImageStoreLod);
+ symbolTable.relateToOperator("sparseImageLoadLodAMD", EOpSparseImageLoadLod);
+
+ symbolTable.relateToOperator("fragmentMaskFetchAMD", EOpFragmentMaskFetch);
+ symbolTable.relateToOperator("fragmentFetchAMD", EOpFragmentFetch);
+#endif
+ }
+
+ // GL_KHR_shader_subgroup
+ if ((profile == EEsProfile && version >= 310) ||
+ (profile != EEsProfile && version >= 140)) {
+ symbolTable.relateToOperator("subgroupBarrier", EOpSubgroupBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrier", EOpSubgroupMemoryBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrierBuffer", EOpSubgroupMemoryBarrierBuffer);
+ symbolTable.relateToOperator("subgroupMemoryBarrierImage", EOpSubgroupMemoryBarrierImage);
+ symbolTable.relateToOperator("subgroupElect", EOpSubgroupElect);
+ symbolTable.relateToOperator("subgroupAll", EOpSubgroupAll);
+ symbolTable.relateToOperator("subgroupAny", EOpSubgroupAny);
+ symbolTable.relateToOperator("subgroupAllEqual", EOpSubgroupAllEqual);
+ symbolTable.relateToOperator("subgroupBroadcast", EOpSubgroupBroadcast);
+ symbolTable.relateToOperator("subgroupBroadcastFirst", EOpSubgroupBroadcastFirst);
+ symbolTable.relateToOperator("subgroupBallot", EOpSubgroupBallot);
+ symbolTable.relateToOperator("subgroupInverseBallot", EOpSubgroupInverseBallot);
+ symbolTable.relateToOperator("subgroupBallotBitExtract", EOpSubgroupBallotBitExtract);
+ symbolTable.relateToOperator("subgroupBallotBitCount", EOpSubgroupBallotBitCount);
+ symbolTable.relateToOperator("subgroupBallotInclusiveBitCount", EOpSubgroupBallotInclusiveBitCount);
+ symbolTable.relateToOperator("subgroupBallotExclusiveBitCount", EOpSubgroupBallotExclusiveBitCount);
+ symbolTable.relateToOperator("subgroupBallotFindLSB", EOpSubgroupBallotFindLSB);
+ symbolTable.relateToOperator("subgroupBallotFindMSB", EOpSubgroupBallotFindMSB);
+ symbolTable.relateToOperator("subgroupShuffle", EOpSubgroupShuffle);
+ symbolTable.relateToOperator("subgroupShuffleXor", EOpSubgroupShuffleXor);
+ symbolTable.relateToOperator("subgroupShuffleUp", EOpSubgroupShuffleUp);
+ symbolTable.relateToOperator("subgroupShuffleDown", EOpSubgroupShuffleDown);
+ symbolTable.relateToOperator("subgroupAdd", EOpSubgroupAdd);
+ symbolTable.relateToOperator("subgroupMul", EOpSubgroupMul);
+ symbolTable.relateToOperator("subgroupMin", EOpSubgroupMin);
+ symbolTable.relateToOperator("subgroupMax", EOpSubgroupMax);
+ symbolTable.relateToOperator("subgroupAnd", EOpSubgroupAnd);
+ symbolTable.relateToOperator("subgroupOr", EOpSubgroupOr);
+ symbolTable.relateToOperator("subgroupXor", EOpSubgroupXor);
+ symbolTable.relateToOperator("subgroupInclusiveAdd", EOpSubgroupInclusiveAdd);
+ symbolTable.relateToOperator("subgroupInclusiveMul", EOpSubgroupInclusiveMul);
+ symbolTable.relateToOperator("subgroupInclusiveMin", EOpSubgroupInclusiveMin);
+ symbolTable.relateToOperator("subgroupInclusiveMax", EOpSubgroupInclusiveMax);
+ symbolTable.relateToOperator("subgroupInclusiveAnd", EOpSubgroupInclusiveAnd);
+ symbolTable.relateToOperator("subgroupInclusiveOr", EOpSubgroupInclusiveOr);
+ symbolTable.relateToOperator("subgroupInclusiveXor", EOpSubgroupInclusiveXor);
+ symbolTable.relateToOperator("subgroupExclusiveAdd", EOpSubgroupExclusiveAdd);
+ symbolTable.relateToOperator("subgroupExclusiveMul", EOpSubgroupExclusiveMul);
+ symbolTable.relateToOperator("subgroupExclusiveMin", EOpSubgroupExclusiveMin);
+ symbolTable.relateToOperator("subgroupExclusiveMax", EOpSubgroupExclusiveMax);
+ symbolTable.relateToOperator("subgroupExclusiveAnd", EOpSubgroupExclusiveAnd);
+ symbolTable.relateToOperator("subgroupExclusiveOr", EOpSubgroupExclusiveOr);
+ symbolTable.relateToOperator("subgroupExclusiveXor", EOpSubgroupExclusiveXor);
+ symbolTable.relateToOperator("subgroupClusteredAdd", EOpSubgroupClusteredAdd);
+ symbolTable.relateToOperator("subgroupClusteredMul", EOpSubgroupClusteredMul);
+ symbolTable.relateToOperator("subgroupClusteredMin", EOpSubgroupClusteredMin);
+ symbolTable.relateToOperator("subgroupClusteredMax", EOpSubgroupClusteredMax);
+ symbolTable.relateToOperator("subgroupClusteredAnd", EOpSubgroupClusteredAnd);
+ symbolTable.relateToOperator("subgroupClusteredOr", EOpSubgroupClusteredOr);
+ symbolTable.relateToOperator("subgroupClusteredXor", EOpSubgroupClusteredXor);
+ symbolTable.relateToOperator("subgroupQuadBroadcast", EOpSubgroupQuadBroadcast);
+ symbolTable.relateToOperator("subgroupQuadSwapHorizontal", EOpSubgroupQuadSwapHorizontal);
+ symbolTable.relateToOperator("subgroupQuadSwapVertical", EOpSubgroupQuadSwapVertical);
+ symbolTable.relateToOperator("subgroupQuadSwapDiagonal", EOpSubgroupQuadSwapDiagonal);
+
+#ifdef NV_EXTENSIONS
+ symbolTable.relateToOperator("subgroupPartitionNV", EOpSubgroupPartition);
+ symbolTable.relateToOperator("subgroupPartitionedAddNV", EOpSubgroupPartitionedAdd);
+ symbolTable.relateToOperator("subgroupPartitionedMulNV", EOpSubgroupPartitionedMul);
+ symbolTable.relateToOperator("subgroupPartitionedMinNV", EOpSubgroupPartitionedMin);
+ symbolTable.relateToOperator("subgroupPartitionedMaxNV", EOpSubgroupPartitionedMax);
+ symbolTable.relateToOperator("subgroupPartitionedAndNV", EOpSubgroupPartitionedAnd);
+ symbolTable.relateToOperator("subgroupPartitionedOrNV", EOpSubgroupPartitionedOr);
+ symbolTable.relateToOperator("subgroupPartitionedXorNV", EOpSubgroupPartitionedXor);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveAddNV", EOpSubgroupPartitionedInclusiveAdd);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMulNV", EOpSubgroupPartitionedInclusiveMul);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMinNV", EOpSubgroupPartitionedInclusiveMin);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveMaxNV", EOpSubgroupPartitionedInclusiveMax);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveAndNV", EOpSubgroupPartitionedInclusiveAnd);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveOrNV", EOpSubgroupPartitionedInclusiveOr);
+ symbolTable.relateToOperator("subgroupPartitionedInclusiveXorNV", EOpSubgroupPartitionedInclusiveXor);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveAddNV", EOpSubgroupPartitionedExclusiveAdd);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMulNV", EOpSubgroupPartitionedExclusiveMul);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMinNV", EOpSubgroupPartitionedExclusiveMin);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveMaxNV", EOpSubgroupPartitionedExclusiveMax);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveAndNV", EOpSubgroupPartitionedExclusiveAnd);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveOrNV", EOpSubgroupPartitionedExclusiveOr);
+ symbolTable.relateToOperator("subgroupPartitionedExclusiveXorNV", EOpSubgroupPartitionedExclusiveXor);
+#endif
+ }
+
+ if (profile == EEsProfile) {
+ symbolTable.relateToOperator("shadow2DEXT", EOpTexture);
+ symbolTable.relateToOperator("shadow2DProjEXT", EOpTextureProj);
+ }
+ }
+
+ switch(language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ symbolTable.relateToOperator("EmitStreamVertex", EOpEmitStreamVertex);
+ symbolTable.relateToOperator("EndStreamPrimitive", EOpEndStreamPrimitive);
+ symbolTable.relateToOperator("EmitVertex", EOpEmitVertex);
+ symbolTable.relateToOperator("EndPrimitive", EOpEndPrimitive);
+ break;
+
+ case EShLangFragment:
+ symbolTable.relateToOperator("dFdx", EOpDPdx);
+ symbolTable.relateToOperator("dFdy", EOpDPdy);
+ symbolTable.relateToOperator("fwidth", EOpFwidth);
+ if (profile != EEsProfile && version >= 400) {
+ symbolTable.relateToOperator("dFdxFine", EOpDPdxFine);
+ symbolTable.relateToOperator("dFdyFine", EOpDPdyFine);
+ symbolTable.relateToOperator("fwidthFine", EOpFwidthFine);
+ symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse);
+ symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse);
+ symbolTable.relateToOperator("fwidthCoarse", EOpFwidthCoarse);
+ }
+ symbolTable.relateToOperator("interpolateAtCentroid", EOpInterpolateAtCentroid);
+ symbolTable.relateToOperator("interpolateAtSample", EOpInterpolateAtSample);
+ symbolTable.relateToOperator("interpolateAtOffset", EOpInterpolateAtOffset);
+
+#ifdef AMD_EXTENSIONS
+ if (profile != EEsProfile)
+ symbolTable.relateToOperator("interpolateAtVertexAMD", EOpInterpolateAtVertex);
+#endif
+ break;
+
+ case EShLangCompute:
+ symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared);
+ symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier);
+ symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared);
+#ifdef NV_EXTENSIONS
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("dFdx", EOpDPdx);
+ symbolTable.relateToOperator("dFdy", EOpDPdy);
+ symbolTable.relateToOperator("fwidth", EOpFwidth);
+ symbolTable.relateToOperator("dFdxFine", EOpDPdxFine);
+ symbolTable.relateToOperator("dFdyFine", EOpDPdyFine);
+ symbolTable.relateToOperator("fwidthFine", EOpFwidthFine);
+ symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse);
+ symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse);
+ symbolTable.relateToOperator("fwidthCoarse",EOpFwidthCoarse);
+ }
+#endif
+ symbolTable.relateToOperator("coopMatLoadNV", EOpCooperativeMatrixLoad);
+ symbolTable.relateToOperator("coopMatStoreNV", EOpCooperativeMatrixStore);
+ symbolTable.relateToOperator("coopMatMulAddNV", EOpCooperativeMatrixMulAdd);
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("traceNV", EOpTraceNV);
+ symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
+ }
+ break;
+ case EShLangIntersectNV:
+ if (profile != EEsProfile && version >= 460)
+ symbolTable.relateToOperator("reportIntersectionNV", EOpReportIntersectionNV);
+ break;
+ case EShLangAnyHitNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersectionNV);
+ symbolTable.relateToOperator("terminateRayNV", EOpTerminateRayNV);
+ }
+ break;
+ case EShLangCallableNV:
+ if (profile != EEsProfile && version >= 460) {
+ symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
+ }
+ break;
+ case EShLangMeshNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("writePackedPrimitiveIndices4x8NV", EOpWritePackedPrimitiveIndices4x8NV);
+ }
+ // fall through
+ case EShLangTaskNV:
+ if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) {
+ symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared);
+ symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier);
+ }
+ break;
+#endif
+
+ default:
+ assert(false && "Language not supported");
+ }
+}
+
+//
+// Add context-dependent (resource-specific) built-ins not handled by the above. These
+// would be ones that need to be programmatically added because they cannot
+// be added by simple text strings. For these, also
+// 1) Map built-in functions to operators, for those that will turn into an operation node
+// instead of remaining a function call.
+// 2) Tag extension-related symbols added to their base version with their extensions, so
+// that if an early version has the extension turned off, there is an error reported on use.
+//
+void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources)
+{
+ if (profile != EEsProfile && version >= 430 && version < 440) {
+ symbolTable.setVariableExtensions("gl_MaxTransformFeedbackBuffers", 1, &E_GL_ARB_enhanced_layouts);
+ symbolTable.setVariableExtensions("gl_MaxTransformFeedbackInterleavedComponents", 1, &E_GL_ARB_enhanced_layouts);
+ }
+ if (profile != EEsProfile && version >= 130 && version < 420) {
+ symbolTable.setVariableExtensions("gl_MinProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack);
+ symbolTable.setVariableExtensions("gl_MaxProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack);
+ }
+ if (profile != EEsProfile && version >= 150 && version < 410)
+ symbolTable.setVariableExtensions("gl_MaxViewports", 1, &E_GL_ARB_viewport_array);
+
+ switch(language) {
+ case EShLangFragment:
+ // Set up gl_FragData based on current array size.
+ if (version == 100 || IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && profile != EEsProfile && version < 420)) {
+ TPrecisionQualifier pq = profile == EEsProfile ? EpqMedium : EpqNone;
+ TType fragData(EbtFloat, EvqFragColor, pq, 4);
+ TArraySizes* arraySizes = new TArraySizes;
+ arraySizes->addInnerSize(resources.maxDrawBuffers);
+ fragData.transferArraySizes(arraySizes);
+ symbolTable.insert(*new TVariable(NewPoolTString("gl_FragData"), fragData));
+ SpecialQualifier("gl_FragData", EvqFragColor, EbvFragData, symbolTable);
+ }
+ break;
+
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ // Because of the context-dependent array size (gl_MaxPatchVertices),
+ // these variables were added later than the others and need to be mapped now.
+
+ // standard members
+ BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable);
+ BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable);
+ BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable);
+ BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable);
+
+ // compatibility members
+ BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable);
+ BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable);
+ BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable);
+
+ // extension requirements
+ if (profile == EEsProfile) {
+ symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.h b/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.h
new file mode 100644
index 0000000..b5de324
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Initialize.h
@@ -0,0 +1,110 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INITIALIZE_INCLUDED_
+#define _INITIALIZE_INCLUDED_
+
+#include "../Include/ResourceLimits.h"
+#include "../Include/Common.h"
+#include "../Include/ShHandle.h"
+#include "SymbolTable.h"
+#include "Versions.h"
+
+namespace glslang {
+
+//
+// This is made to hold parseable strings for almost all the built-in
+// functions and variables for one specific combination of version
+// and profile. (Some still need to be added programmatically.)
+// This is a base class for language-specific derivations, which
+// can be used for language independent builtins.
+//
+// The strings are organized by
+// commonBuiltins: intersection of all stages' built-ins, processed just once
+// stageBuiltins[]: anything a stage needs that's not in commonBuiltins
+//
+class TBuiltInParseables {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TBuiltInParseables();
+ virtual ~TBuiltInParseables();
+ virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0;
+ virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0;
+ virtual const TString& getCommonString() const { return commonBuiltins; }
+ virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; }
+
+ virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0;
+ virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0;
+
+protected:
+ TString commonBuiltins;
+ TString stageBuiltins[EShLangCount];
+};
+
+//
+// This is a GLSL specific derivation of TBuiltInParseables. To present a stable
+// interface and match other similar code, it is called TBuiltIns, rather
+// than TBuiltInParseablesGlsl.
+//
+class TBuiltIns : public TBuiltInParseables {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TBuiltIns();
+ virtual ~TBuiltIns();
+ void initialize(int version, EProfile, const SpvVersion& spvVersion);
+ void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
+
+ void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
+ void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
+
+protected:
+ void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
+ void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
+ void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+ void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile);
+
+ // Helpers for making textual representations of the permutations
+ // of texturing/imaging functions.
+ const char* postfixes[5];
+ const char* prefixes[EbtNumTypes];
+ int dimMap[EsdNumDims];
+};
+
+} // end namespace glslang
+
+#endif // _INITIALIZE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp
new file mode 100644
index 0000000..f46010b
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/IntermTraverse.cpp
@@ -0,0 +1,302 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (c) 2002-2010 The ANGLE Project Authors.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/intermediate.h"
+
+namespace glslang {
+
+//
+// Traverse the intermediate representation tree, and
+// call a node type specific function for each node.
+// Done recursively through the member function Traverse().
+// Node types can be skipped if their function to call is 0,
+// but their subtree will still be traversed.
+// Nodes with children can have their whole subtree skipped
+// if preVisit is turned on and the type specific function
+// returns false.
+//
+// preVisit, postVisit, and rightToLeft control what order
+// nodes are visited in.
+//
+
+//
+// Traversal functions for terminals are straightforward....
+//
+void TIntermMethod::traverse(TIntermTraverser*)
+{
+ // Tree should always resolve all methods as a non-method.
+}
+
+void TIntermSymbol::traverse(TIntermTraverser *it)
+{
+ it->visitSymbol(this);
+}
+
+void TIntermConstantUnion::traverse(TIntermTraverser *it)
+{
+ it->visitConstantUnion(this);
+}
+
+//
+// Traverse a binary node.
+//
+void TIntermBinary::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ //
+ // visit the node before children if pre-visiting.
+ //
+ if (it->preVisit)
+ visit = it->visitBinary(EvPreVisit, this);
+
+ //
+ // Visit the children, in the right order.
+ //
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ if (right)
+ right->traverse(it);
+
+ if (it->inVisit)
+ visit = it->visitBinary(EvInVisit, this);
+
+ if (visit && left)
+ left->traverse(it);
+ } else {
+ if (left)
+ left->traverse(it);
+
+ if (it->inVisit)
+ visit = it->visitBinary(EvInVisit, this);
+
+ if (visit && right)
+ right->traverse(it);
+ }
+
+ it->decrementDepth();
+ }
+
+ //
+ // Visit the node after the children, if requested and the traversal
+ // hasn't been canceled yet.
+ //
+ if (visit && it->postVisit)
+ it->visitBinary(EvPostVisit, this);
+}
+
+//
+// Traverse a unary node. Same comments in binary node apply here.
+//
+void TIntermUnary::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitUnary(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ operand->traverse(it);
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitUnary(EvPostVisit, this);
+}
+
+//
+// Traverse an aggregate node. Same comments in binary node apply here.
+//
+void TIntermAggregate::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitAggregate(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ for (TIntermSequence::reverse_iterator sit = sequence.rbegin(); sit != sequence.rend(); sit++) {
+ (*sit)->traverse(it);
+
+ if (visit && it->inVisit) {
+ if (*sit != sequence.front())
+ visit = it->visitAggregate(EvInVisit, this);
+ }
+ }
+ } else {
+ for (TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++) {
+ (*sit)->traverse(it);
+
+ if (visit && it->inVisit) {
+ if (*sit != sequence.back())
+ visit = it->visitAggregate(EvInVisit, this);
+ }
+ }
+ }
+
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitAggregate(EvPostVisit, this);
+}
+
+//
+// Traverse a selection node. Same comments in binary node apply here.
+//
+void TIntermSelection::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitSelection(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ if (it->rightToLeft) {
+ if (falseBlock)
+ falseBlock->traverse(it);
+ if (trueBlock)
+ trueBlock->traverse(it);
+ condition->traverse(it);
+ } else {
+ condition->traverse(it);
+ if (trueBlock)
+ trueBlock->traverse(it);
+ if (falseBlock)
+ falseBlock->traverse(it);
+ }
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitSelection(EvPostVisit, this);
+}
+
+//
+// Traverse a loop node. Same comments in binary node apply here.
+//
+void TIntermLoop::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitLoop(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+
+ if (it->rightToLeft) {
+ if (terminal)
+ terminal->traverse(it);
+
+ if (body)
+ body->traverse(it);
+
+ if (test)
+ test->traverse(it);
+ } else {
+ if (test)
+ test->traverse(it);
+
+ if (body)
+ body->traverse(it);
+
+ if (terminal)
+ terminal->traverse(it);
+ }
+
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitLoop(EvPostVisit, this);
+}
+
+//
+// Traverse a branch node. Same comments in binary node apply here.
+//
+void TIntermBranch::traverse(TIntermTraverser *it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitBranch(EvPreVisit, this);
+
+ if (visit && expression) {
+ it->incrementDepth(this);
+ expression->traverse(it);
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitBranch(EvPostVisit, this);
+}
+
+//
+// Traverse a switch node.
+//
+void TIntermSwitch::traverse(TIntermTraverser* it)
+{
+ bool visit = true;
+
+ if (it->preVisit)
+ visit = it->visitSwitch(EvPreVisit, this);
+
+ if (visit) {
+ it->incrementDepth(this);
+ if (it->rightToLeft) {
+ body->traverse(it);
+ condition->traverse(it);
+ } else {
+ condition->traverse(it);
+ body->traverse(it);
+ }
+ it->decrementDepth();
+ }
+
+ if (visit && it->postVisit)
+ it->visitSwitch(EvPostVisit, this);
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Intermediate.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/Intermediate.cpp
new file mode 100644
index 0000000..5e9c784
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Intermediate.cpp
@@ -0,0 +1,3967 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Build the intermediate representation.
+//
+
+#include "localintermediate.h"
+#include "RemoveTree.h"
+#include "SymbolTable.h"
+#include "propagateNoContraction.h"
+
+#include <cfloat>
+#include <utility>
+#include <tuple>
+
+namespace glslang {
+
+////////////////////////////////////////////////////////////////////////////
+//
+// First set of functions are to help build the intermediate representation.
+// These functions are not member functions of the nodes.
+// They are called from parser productions.
+//
+/////////////////////////////////////////////////////////////////////////////
+
+//
+// Add a terminal node for an identifier in an expression.
+//
+// Returns the added node.
+//
+
+TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray,
+ TIntermTyped* constSubtree, const TSourceLoc& loc)
+{
+ TIntermSymbol* node = new TIntermSymbol(id, name, type);
+ node->setLoc(loc);
+ node->setConstArray(constArray);
+ node->setConstSubtree(constSubtree);
+
+ return node;
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol)
+{
+ return addSymbol(intermSymbol.getId(),
+ intermSymbol.getName(),
+ intermSymbol.getType(),
+ intermSymbol.getConstArray(),
+ intermSymbol.getConstSubtree(),
+ intermSymbol.getLoc());
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable)
+{
+ glslang::TSourceLoc loc; // just a null location
+ loc.init();
+
+ return addSymbol(variable, loc);
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc)
+{
+ return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc);
+}
+
+TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc)
+{
+ TConstUnionArray unionArray; // just a null constant
+
+ return addSymbol(0, "", type, unionArray, nullptr, loc);
+}
+
+//
+// Connect two nodes with a new parent that does a binary operation on the nodes.
+//
+// Returns the added node.
+//
+// Returns nullptr if the working conversions and promotions could not be found.
+//
+TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
+{
+ // No operations work on blocks
+ if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ // Try converting the children's base types to compatible types.
+ auto children = addConversion(op, left, right);
+ left = std::get<0>(children);
+ right = std::get<1>(children);
+
+ if (left == nullptr || right == nullptr)
+ return nullptr;
+
+ // Convert the children's type shape to be compatible.
+ addBiShapeConversion(op, left, right);
+ if (left == nullptr || right == nullptr)
+ return nullptr;
+
+ //
+ // Need a new node holding things together. Make
+ // one and promote it to the right type.
+ //
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ //
+ // If they are both (non-specialization) constants, they must be folded.
+ // (Unless it's the sequence (comma) operator, but that's handled in addComma().)
+ //
+ TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion();
+ TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion();
+ if (leftTempConstant && rightTempConstant) {
+ TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant);
+ if (folded)
+ return folded;
+ }
+
+ // If can propagate spec-constantness and if the operation is an allowed
+ // specialization-constant operation, make a spec-constant.
+ if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node))
+ node->getWritableType().getQualifier().makeSpecConstant();
+
+ // If must propagate nonuniform, make a nonuniform.
+ if ((node->getLeft()->getQualifier().nonUniform || node->getRight()->getQualifier().nonUniform) &&
+ isNonuniformPropagating(node->getOp()))
+ node->getWritableType().getQualifier().nonUniform = true;
+
+ return node;
+}
+
+//
+// Low level: add binary node (no promotions or other argument modifications)
+//
+TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const
+{
+ // build the node
+ TIntermBinary* node = new TIntermBinary(op);
+ if (loc.line == 0)
+ loc = left->getLoc();
+ node->setLoc(loc);
+ node->setLeft(left);
+ node->setRight(right);
+
+ return node;
+}
+
+//
+// like non-type form, but sets node's type.
+//
+TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const
+{
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+ node->setType(type);
+ return node;
+}
+
+//
+// Low level: add unary node (no promotions or other argument modifications)
+//
+TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const
+{
+ TIntermUnary* node = new TIntermUnary(op);
+ if (loc.line == 0)
+ loc = child->getLoc();
+ node->setLoc(loc);
+ node->setOperand(child);
+
+ return node;
+}
+
+//
+// like non-type form, but sets node's type.
+//
+TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const
+{
+ TIntermUnary* node = addUnaryNode(op, child, loc);
+ node->setType(type);
+ return node;
+}
+
+//
+// Connect two nodes through an assignment.
+//
+// Returns the added node.
+//
+// Returns nullptr if the 'right' type could not be converted to match the 'left' type,
+// or the resulting operation cannot be properly promoted.
+//
+TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
+{
+ // No block assignment
+ if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ //
+ // Like adding binary math, except the conversion can only go
+ // from right to left.
+ //
+
+ // convert base types, nullptr return means not possible
+ right = addConversion(op, left->getType(), right);
+ if (right == nullptr)
+ return nullptr;
+
+ // convert shape
+ right = addUniShapeConversion(op, left->getType(), right);
+
+ // build the node
+ TIntermBinary* node = addBinaryNode(op, left, right, loc);
+
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ return node;
+}
+
+//
+// Connect two nodes through an index operator, where the left node is the base
+// of an array or struct, and the right node is a direct or indirect offset.
+//
+// Returns the added node.
+// The caller should set the type of the returned node.
+//
+TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc)
+{
+ // caller should set the type
+ return addBinaryNode(op, base, index, loc);
+}
+
+//
+// Add one node as the parent of another that it operates on.
+//
+// Returns the added node.
+//
+TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc)
+{
+ if (child == 0)
+ return nullptr;
+
+ if (child->getType().getBasicType() == EbtBlock)
+ return nullptr;
+
+ switch (op) {
+ case EOpLogicalNot:
+ if (source == EShSourceHlsl) {
+ break; // HLSL can promote logical not
+ }
+
+ if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) {
+ return nullptr;
+ }
+ break;
+
+ case EOpPostIncrement:
+ case EOpPreIncrement:
+ case EOpPostDecrement:
+ case EOpPreDecrement:
+ case EOpNegative:
+ if (child->getType().getBasicType() == EbtStruct || child->getType().isArray())
+ return nullptr;
+ default: break; // some compilers want this
+ }
+
+ //
+ // Do we need to promote the operand?
+ //
+ TBasicType newType = EbtVoid;
+ switch (op) {
+ case EOpConstructInt8: newType = EbtInt8; break;
+ case EOpConstructUint8: newType = EbtUint8; break;
+ case EOpConstructInt16: newType = EbtInt16; break;
+ case EOpConstructUint16: newType = EbtUint16; break;
+ case EOpConstructInt: newType = EbtInt; break;
+ case EOpConstructUint: newType = EbtUint; break;
+ case EOpConstructInt64: newType = EbtInt64; break;
+ case EOpConstructUint64: newType = EbtUint64; break;
+ case EOpConstructBool: newType = EbtBool; break;
+ case EOpConstructFloat: newType = EbtFloat; break;
+ case EOpConstructDouble: newType = EbtDouble; break;
+ case EOpConstructFloat16: newType = EbtFloat16; break;
+ default: break; // some compilers want this
+ }
+
+ if (newType != EbtVoid) {
+ child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(),
+ child->getMatrixCols(),
+ child->getMatrixRows(),
+ child->isVector()),
+ child);
+ if (child == nullptr)
+ return nullptr;
+ }
+
+ //
+ // For constructors, we are now done, it was all in the conversion.
+ // TODO: but, did this bypass constant folding?
+ //
+ switch (op) {
+ case EOpConstructInt8:
+ case EOpConstructUint8:
+ case EOpConstructInt16:
+ case EOpConstructUint16:
+ case EOpConstructInt:
+ case EOpConstructUint:
+ case EOpConstructInt64:
+ case EOpConstructUint64:
+ case EOpConstructBool:
+ case EOpConstructFloat:
+ case EOpConstructDouble:
+ case EOpConstructFloat16:
+ return child;
+ default: break; // some compilers want this
+ }
+
+ //
+ // Make a new node for the operator.
+ //
+ TIntermUnary* node = addUnaryNode(op, child, loc);
+
+ if (! promote(node))
+ return nullptr;
+
+ node->updatePrecision();
+
+ // If it's a (non-specialization) constant, it must be folded.
+ if (node->getOperand()->getAsConstantUnion())
+ return node->getOperand()->getAsConstantUnion()->fold(op, node->getType());
+
+ // If it's a specialization constant, the result is too,
+ // if the operation is allowed for specialization constants.
+ if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node))
+ node->getWritableType().getQualifier().makeSpecConstant();
+
+ // If must propagate nonuniform, make a nonuniform.
+ if (node->getOperand()->getQualifier().nonUniform && isNonuniformPropagating(node->getOp()))
+ node->getWritableType().getQualifier().nonUniform = true;
+
+ return node;
+}
+
+TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary,
+ TIntermNode* childNode, const TType& returnType)
+{
+ if (unary) {
+ //
+ // Treat it like a unary operator.
+ // addUnaryMath() should get the type correct on its own;
+ // including constness (which would differ from the prototype).
+ //
+ TIntermTyped* child = childNode->getAsTyped();
+ if (child == nullptr)
+ return nullptr;
+
+ if (child->getAsConstantUnion()) {
+ TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType);
+ if (folded)
+ return folded;
+ }
+
+ return addUnaryNode(op, child, child->getLoc(), returnType);
+ } else {
+ // setAggregateOperater() calls fold() for constant folding
+ TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc);
+
+ return node;
+ }
+}
+
+//
+// This is the safe way to change the operator on an aggregate, as it
+// does lots of error checking and fixing. Especially for establishing
+// a function call's operation on it's set of parameters. Sequences
+// of instructions are also aggregates, but they just directly set
+// their operator to EOpSequence.
+//
+// Returns an aggregate node, which could be the one passed in if
+// it was already an aggregate.
+//
+TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc)
+{
+ TIntermAggregate* aggNode;
+
+ //
+ // Make sure we have an aggregate. If not turn it into one.
+ //
+ if (node != nullptr) {
+ aggNode = node->getAsAggregate();
+ if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
+ //
+ // Make an aggregate containing this node.
+ //
+ aggNode = new TIntermAggregate();
+ aggNode->getSequence().push_back(node);
+ if (loc.line == 0)
+ loc = node->getLoc();
+ }
+ } else
+ aggNode = new TIntermAggregate();
+
+ //
+ // Set the operator.
+ //
+ aggNode->setOperator(op);
+ if (loc.line != 0)
+ aggNode->setLoc(loc);
+
+ aggNode->setType(type);
+
+ return fold(aggNode);
+}
+
+bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
+{
+ //
+ // Does the base type even allow the operation?
+ //
+ switch (node->getBasicType()) {
+ case EbtVoid:
+ return false;
+ case EbtAtomicUint:
+ case EbtSampler:
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV:
+#endif
+ // opaque types can be passed to functions
+ if (op == EOpFunction)
+ break;
+
+ // HLSL can assign samplers directly (no constructor)
+ if (source == EShSourceHlsl && node->getBasicType() == EbtSampler)
+ break;
+
+ // samplers can get assigned via a sampler constructor
+ // (well, not yet, but code in the rest of this function is ready for it)
+ if (node->getBasicType() == EbtSampler && op == EOpAssign &&
+ node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
+ break;
+
+ // otherwise, opaque types can't even be operated on, let alone converted
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+// This is 'mechanism' here, it does any conversion told.
+// It is about basic type, not about shape.
+// The policy comes from the shader or the calling code.
+TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const
+{
+ //
+ // Add a new newNode for the conversion.
+ //
+ TIntermUnary* newNode = nullptr;
+
+ TOperator newOp = EOpNull;
+
+ switch (convertTo) {
+ case EbtDouble:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToDouble; break;
+ case EbtUint8: newOp = EOpConvUint8ToDouble; break;
+ case EbtInt16: newOp = EOpConvInt16ToDouble; break;
+ case EbtUint16: newOp = EOpConvUint16ToDouble; break;
+ case EbtInt: newOp = EOpConvIntToDouble; break;
+ case EbtUint: newOp = EOpConvUintToDouble; break;
+ case EbtBool: newOp = EOpConvBoolToDouble; break;
+ case EbtFloat: newOp = EOpConvFloatToDouble; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToDouble; break;
+ case EbtInt64: newOp = EOpConvInt64ToDouble; break;
+ case EbtUint64: newOp = EOpConvUint64ToDouble; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtFloat:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToFloat; break;
+ case EbtUint8: newOp = EOpConvUint8ToFloat; break;
+ case EbtInt16: newOp = EOpConvInt16ToFloat; break;
+ case EbtUint16: newOp = EOpConvUint16ToFloat; break;
+ case EbtInt: newOp = EOpConvIntToFloat; break;
+ case EbtUint: newOp = EOpConvUintToFloat; break;
+ case EbtBool: newOp = EOpConvBoolToFloat; break;
+ case EbtDouble: newOp = EOpConvDoubleToFloat; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToFloat; break;
+ case EbtInt64: newOp = EOpConvInt64ToFloat; break;
+ case EbtUint64: newOp = EOpConvUint64ToFloat; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtFloat16:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToFloat16; break;
+ case EbtUint8: newOp = EOpConvUint8ToFloat16; break;
+ case EbtInt16: newOp = EOpConvInt16ToFloat16; break;
+ case EbtUint16: newOp = EOpConvUint16ToFloat16; break;
+ case EbtInt: newOp = EOpConvIntToFloat16; break;
+ case EbtUint: newOp = EOpConvUintToFloat16; break;
+ case EbtBool: newOp = EOpConvBoolToFloat16; break;
+ case EbtFloat: newOp = EOpConvFloatToFloat16; break;
+ case EbtDouble: newOp = EOpConvDoubleToFloat16; break;
+ case EbtInt64: newOp = EOpConvInt64ToFloat16; break;
+ case EbtUint64: newOp = EOpConvUint64ToFloat16; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtBool:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToBool; break;
+ case EbtUint8: newOp = EOpConvUint8ToBool; break;
+ case EbtInt16: newOp = EOpConvInt16ToBool; break;
+ case EbtUint16: newOp = EOpConvUint16ToBool; break;
+ case EbtInt: newOp = EOpConvIntToBool; break;
+ case EbtUint: newOp = EOpConvUintToBool; break;
+ case EbtFloat: newOp = EOpConvFloatToBool; break;
+ case EbtDouble: newOp = EOpConvDoubleToBool; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToBool; break;
+ case EbtInt64: newOp = EOpConvInt64ToBool; break;
+ case EbtUint64: newOp = EOpConvUint64ToBool; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtInt8:
+ switch (node->getBasicType()) {
+ case EbtUint8: newOp = EOpConvUint8ToInt8; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt8; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt8; break;
+ case EbtInt: newOp = EOpConvIntToInt8; break;
+ case EbtUint: newOp = EOpConvUintToInt8; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt8; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt8; break;
+ case EbtBool: newOp = EOpConvBoolToInt8; break;
+ case EbtFloat: newOp = EOpConvFloatToInt8; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt8; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt8; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint8:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint8; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint8; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint8; break;
+ case EbtInt: newOp = EOpConvIntToUint8; break;
+ case EbtUint: newOp = EOpConvUintToUint8; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint8; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint8; break;
+ case EbtBool: newOp = EOpConvBoolToUint8; break;
+ case EbtFloat: newOp = EOpConvFloatToUint8; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint8; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint8; break;
+ default:
+ return nullptr;
+ }
+ break;
+
+ case EbtInt16:
+ switch (node->getBasicType()) {
+ case EbtUint8: newOp = EOpConvUint8ToInt16; break;
+ case EbtInt8: newOp = EOpConvInt8ToInt16; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt16; break;
+ case EbtInt: newOp = EOpConvIntToInt16; break;
+ case EbtUint: newOp = EOpConvUintToInt16; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt16; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt16; break;
+ case EbtBool: newOp = EOpConvBoolToInt16; break;
+ case EbtFloat: newOp = EOpConvFloatToInt16; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt16; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt16; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint16:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint16; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint16; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint16; break;
+ case EbtInt: newOp = EOpConvIntToUint16; break;
+ case EbtUint: newOp = EOpConvUintToUint16; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint16; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint16; break;
+ case EbtBool: newOp = EOpConvBoolToUint16; break;
+ case EbtFloat: newOp = EOpConvFloatToUint16; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint16; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint16; break;
+ default:
+ return nullptr;
+ }
+ break;
+
+ case EbtInt:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToInt; break;
+ case EbtUint8: newOp = EOpConvUint8ToInt; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt; break;
+ case EbtUint: newOp = EOpConvUintToInt; break;
+ case EbtBool: newOp = EOpConvBoolToInt; break;
+ case EbtFloat: newOp = EOpConvFloatToInt; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt; break;
+ case EbtInt64: newOp = EOpConvInt64ToInt; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint; break;
+ case EbtInt: newOp = EOpConvIntToUint; break;
+ case EbtBool: newOp = EOpConvBoolToUint; break;
+ case EbtFloat: newOp = EOpConvFloatToUint; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint; break;
+ case EbtUint64: newOp = EOpConvUint64ToUint; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtInt64:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToInt64; break;
+ case EbtUint8: newOp = EOpConvUint8ToInt64; break;
+ case EbtInt16: newOp = EOpConvInt16ToInt64; break;
+ case EbtUint16: newOp = EOpConvUint16ToInt64; break;
+ case EbtInt: newOp = EOpConvIntToInt64; break;
+ case EbtUint: newOp = EOpConvUintToInt64; break;
+ case EbtBool: newOp = EOpConvBoolToInt64; break;
+ case EbtFloat: newOp = EOpConvFloatToInt64; break;
+ case EbtDouble: newOp = EOpConvDoubleToInt64; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToInt64; break;
+ case EbtUint64: newOp = EOpConvUint64ToInt64; break;
+ default:
+ return nullptr;
+ }
+ break;
+ case EbtUint64:
+ switch (node->getBasicType()) {
+ case EbtInt8: newOp = EOpConvInt8ToUint64; break;
+ case EbtUint8: newOp = EOpConvUint8ToUint64; break;
+ case EbtInt16: newOp = EOpConvInt16ToUint64; break;
+ case EbtUint16: newOp = EOpConvUint16ToUint64; break;
+ case EbtInt: newOp = EOpConvIntToUint64; break;
+ case EbtUint: newOp = EOpConvUintToUint64; break;
+ case EbtBool: newOp = EOpConvBoolToUint64; break;
+ case EbtFloat: newOp = EOpConvFloatToUint64; break;
+ case EbtDouble: newOp = EOpConvDoubleToUint64; break;
+ case EbtFloat16: newOp = EOpConvFloat16ToUint64; break;
+ case EbtInt64: newOp = EOpConvInt64ToUint64; break;
+ default:
+ return nullptr;
+ }
+ break;
+ default:
+ return nullptr;
+ }
+
+ TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows());
+ newNode = addUnaryNode(newOp, node, node->getLoc(), newType);
+
+ if (node->getAsConstantUnion()) {
+ TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType);
+ if (folded)
+ return folded;
+ }
+
+ // Propagate specialization-constant-ness, if allowed
+ if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode))
+ newNode->getWritableType().getQualifier().makeSpecConstant();
+
+ return newNode;
+}
+
+TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const
+{
+ return createConversion(convertTo, node);
+}
+
+// For converting a pair of operands to a binary operation to compatible
+// types with each other, relative to the operation in 'op'.
+// This does not cover assignment operations, which is asymmetric in that the
+// left type is not changeable.
+// See addConversion(op, type, node) for assignments and unary operation
+// conversions.
+//
+// Generally, this is focused on basic type conversion, not shape conversion.
+// See addShapeConversion() for shape conversions.
+//
+// Returns the converted pair of nodes.
+// Returns <nullptr, nullptr> when there is no conversion.
+std::tuple<TIntermTyped*, TIntermTyped*>
+TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
+{
+ if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
+ return std::make_tuple(nullptr, nullptr);
+
+ if (node0->getType() != node1->getType()) {
+ // If differing structure, then no conversions.
+ if (node0->isStruct() || node1->isStruct())
+ return std::make_tuple(nullptr, nullptr);
+
+ // If differing arrays, then no conversions.
+ if (node0->getType().isArray() || node1->getType().isArray())
+ return std::make_tuple(nullptr, nullptr);
+
+ // No implicit conversions for operations involving cooperative matrices
+ if (node0->getType().isCoopMat() || node1->getType().isCoopMat())
+ return std::make_tuple(node0, node1);
+ }
+
+ auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes);
+
+ switch (op) {
+ //
+ // List all the binary ops that can implicitly convert one operand to the other's type;
+ // This implements the 'policy' for implicit type conversion.
+ //
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpEqual:
+ case EOpNotEqual:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpDiv:
+ case EOpMod:
+
+ case EOpVectorTimesScalar:
+ case EOpVectorTimesMatrix:
+ case EOpMatrixTimesVector:
+ case EOpMatrixTimesScalar:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpSequence: // used by ?:
+
+ if (node0->getBasicType() == node1->getBasicType())
+ return std::make_tuple(node0, node1);
+
+ promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op);
+ if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
+ return std::make_tuple(nullptr, nullptr);
+
+ break;
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ if (source == EShSourceHlsl)
+ promoteTo = std::make_tuple(EbtBool, EbtBool);
+ else
+ return std::make_tuple(node0, node1);
+ break;
+
+ // There are no conversions needed for GLSL; the shift amount just needs to be an
+ // integer type, as does the base.
+ // HLSL can promote bools to ints to make this work.
+ case EOpLeftShift:
+ case EOpRightShift:
+ if (source == EShSourceHlsl) {
+ TBasicType node0BasicType = node0->getBasicType();
+ if (node0BasicType == EbtBool)
+ node0BasicType = EbtInt;
+ if (node1->getBasicType() == EbtBool)
+ promoteTo = std::make_tuple(node0BasicType, EbtInt);
+ else
+ promoteTo = std::make_tuple(node0BasicType, node1->getBasicType());
+ } else {
+ if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType()))
+ return std::make_tuple(node0, node1);
+ else
+ return std::make_tuple(nullptr, nullptr);
+ }
+ break;
+
+ default:
+ if (node0->getType() == node1->getType())
+ return std::make_tuple(node0, node1);
+
+ return std::make_tuple(nullptr, nullptr);
+ }
+
+ TIntermTyped* newNode0;
+ TIntermTyped* newNode1;
+
+ if (std::get<0>(promoteTo) != node0->getType().getBasicType()) {
+ if (node0->getAsConstantUnion())
+ newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion());
+ else
+ newNode0 = createConversion(std::get<0>(promoteTo), node0);
+ } else
+ newNode0 = node0;
+
+ if (std::get<1>(promoteTo) != node1->getType().getBasicType()) {
+ if (node1->getAsConstantUnion())
+ newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion());
+ else
+ newNode1 = createConversion(std::get<1>(promoteTo), node1);
+ } else
+ newNode1 = node1;
+
+ return std::make_tuple(newNode0, newNode1);
+}
+
+//
+// Convert the node's type to the given type, as allowed by the operation involved: 'op'.
+// For implicit conversions, 'op' is not the requested conversion, it is the explicit
+// operation requiring the implicit conversion.
+//
+// Binary operation conversions should be handled by addConversion(op, node, node), not here.
+//
+// Returns a node representing the conversion, which could be the same
+// node passed in if no conversion was needed.
+//
+// Generally, this is focused on basic type conversion, not shape conversion.
+// See addShapeConversion() for shape conversions.
+//
+// Return nullptr if a conversion can't be done.
+//
+TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node)
+{
+ if (!isConversionAllowed(op, node))
+ return nullptr;
+
+ // Otherwise, if types are identical, no problem
+ if (type == node->getType())
+ return node;
+
+ // If one's a structure, then no conversions.
+ if (type.isStruct() || node->isStruct())
+ return nullptr;
+
+ // If one's an array, then no conversions.
+ if (type.isArray() || node->getType().isArray())
+ return nullptr;
+
+ // Note: callers are responsible for other aspects of shape,
+ // like vector and matrix sizes.
+
+ TBasicType promoteTo;
+ // GL_EXT_shader_16bit_storage can't do OpConstantComposite with
+ // 16-bit types, so disable promotion for those types.
+ bool canPromoteConstant = true;
+
+ switch (op) {
+ //
+ // Explicit conversions (unary operations)
+ //
+ case EOpConstructBool:
+ promoteTo = EbtBool;
+ break;
+ case EOpConstructFloat:
+ promoteTo = EbtFloat;
+ break;
+ case EOpConstructDouble:
+ promoteTo = EbtDouble;
+ break;
+ case EOpConstructFloat16:
+ promoteTo = EbtFloat16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
+ break;
+ case EOpConstructInt8:
+ promoteTo = EbtInt8;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
+ break;
+ case EOpConstructUint8:
+ promoteTo = EbtUint8;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
+ break;
+ case EOpConstructInt16:
+ promoteTo = EbtInt16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
+ break;
+ case EOpConstructUint16:
+ promoteTo = EbtUint16;
+ canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
+ break;
+ case EOpConstructInt:
+ promoteTo = EbtInt;
+ break;
+ case EOpConstructUint:
+ promoteTo = EbtUint;
+ break;
+ case EOpConstructInt64:
+ promoteTo = EbtInt64;
+ break;
+ case EOpConstructUint64:
+ promoteTo = EbtUint64;
+ break;
+
+ case EOpLogicalNot:
+
+ case EOpFunctionCall:
+
+ case EOpReturn:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpVectorTimesScalarAssign:
+ case EOpMatrixTimesScalarAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ case EOpAtan:
+ case EOpClamp:
+ case EOpCross:
+ case EOpDistance:
+ case EOpDot:
+ case EOpDst:
+ case EOpFaceForward:
+ case EOpFma:
+ case EOpFrexp:
+ case EOpLdexp:
+ case EOpMix:
+ case EOpLit:
+ case EOpMax:
+ case EOpMin:
+ case EOpModf:
+ case EOpPow:
+ case EOpReflect:
+ case EOpRefract:
+ case EOpSmoothStep:
+ case EOpStep:
+
+ case EOpSequence:
+ case EOpConstructStruct:
+ case EOpConstructCooperativeMatrix:
+
+ if (type.getBasicType() == EbtReference || node->getType().getBasicType() == EbtReference) {
+ // types must match to assign a reference
+ if (type == node->getType())
+ return node;
+ else
+ return nullptr;
+ }
+
+ if (type.getBasicType() == node->getType().getBasicType())
+ return node;
+
+ if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
+ promoteTo = type.getBasicType();
+ else
+ return nullptr;
+ break;
+
+ // For GLSL, there are no conversions needed; the shift amount just needs to be an
+ // integer type, as do the base/result.
+ // HLSL can convert the shift from a bool to an int.
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ {
+ if (source == EShSourceHlsl && node->getType().getBasicType() == EbtBool)
+ promoteTo = type.getBasicType();
+ else {
+ if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
+ return node;
+ else
+ return nullptr;
+ }
+ break;
+ }
+
+ default:
+ // default is to require a match; all exceptions should have case statements above
+
+ if (type.getBasicType() == node->getType().getBasicType())
+ return node;
+ else
+ return nullptr;
+ }
+
+ if (canPromoteConstant && node->getAsConstantUnion())
+ return promoteConstantUnion(promoteTo, node->getAsConstantUnion());
+
+ //
+ // Add a new newNode for the conversion.
+ //
+ TIntermTyped* newNode = createConversion(promoteTo, node);
+
+ return newNode;
+}
+
+// Convert the node's shape of type for the given type, as allowed by the
+// operation involved: 'op'. This is for situations where there is only one
+// direction to consider doing the shape conversion.
+//
+// This implements policy, it call addShapeConversion() for the mechanism.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+// Return 'node' if no conversion was done. Promotion handles final shape
+// checking.
+//
+TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node)
+{
+ // some source languages don't do this
+ switch (source) {
+ case EShSourceHlsl:
+ break;
+ case EShSourceGlsl:
+ default:
+ return node;
+ }
+
+ // some operations don't do this
+ switch (op) {
+ case EOpFunctionCall:
+ case EOpReturn:
+ break;
+
+ case EOpMulAssign:
+ // want to support vector *= scalar native ops in AST and lower, not smear, similarly for
+ // matrix *= scalar, etc.
+
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ if (node->getVectorSize() == 1)
+ return node;
+ break;
+
+ case EOpAssign:
+ break;
+
+ case EOpMix:
+ break;
+
+ default:
+ return node;
+ }
+
+ return addShapeConversion(type, node);
+}
+
+// Convert the nodes' shapes to be compatible for the operation 'op'.
+//
+// This implements policy, it call addShapeConversion() for the mechanism.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode)
+{
+ // some source languages don't do this
+ switch (source) {
+ case EShSourceHlsl:
+ break;
+ case EShSourceGlsl:
+ default:
+ return;
+ }
+
+ // some operations don't do this
+ // 'break' will mean attempt bidirectional conversion
+ switch (op) {
+ case EOpMulAssign:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ // switch to unidirectional conversion (the lhs can't change)
+ rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode);
+ return;
+
+ case EOpMul:
+ // matrix multiply does not change shapes
+ if (lhsNode->isMatrix() && rhsNode->isMatrix())
+ return;
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ // want to support vector * scalar native ops in AST and lower, not smear, similarly for
+ // matrix * vector, etc.
+ if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1)
+ return;
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ // can natively support the right operand being a scalar and the left a vector,
+ // but not the reverse
+ if (rhsNode->getVectorSize() == 1)
+ return;
+ break;
+
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpEqual:
+ case EOpNotEqual:
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpMix:
+ break;
+
+ default:
+ return;
+ }
+
+ // Do bidirectional conversions
+ if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) {
+ if (lhsNode->getType().isScalarOrVec1())
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
+ else
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
+ }
+ lhsNode = addShapeConversion(rhsNode->getType(), lhsNode);
+ rhsNode = addShapeConversion(lhsNode->getType(), rhsNode);
+}
+
+// Convert the node's shape of type for the given type, as allowed by the
+// operation involved: 'op'.
+//
+// Generally, the AST represents allowed GLSL shapes, so this isn't needed
+// for GLSL. Bad shapes are caught in conversion or promotion.
+//
+// Return 'node' if no conversion was done. Promotion handles final shape
+// checking.
+//
+TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node)
+{
+ // no conversion needed
+ if (node->getType() == type)
+ return node;
+
+ // structures and arrays don't change shape, either to or from
+ if (node->getType().isStruct() || node->getType().isArray() ||
+ type.isStruct() || type.isArray())
+ return node;
+
+ // The new node that handles the conversion
+ TOperator constructorOp = mapTypeToConstructorOp(type);
+
+ if (source == EShSourceHlsl) {
+ // HLSL rules for scalar, vector and matrix conversions:
+ // 1) scalar can become anything, initializing every component with its value
+ // 2) vector and matrix can become scalar, first element is used (warning: truncation)
+ // 3) matrix can become matrix with less rows and/or columns (warning: truncation)
+ // 4) vector can become vector with less rows size (warning: truncation)
+ // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret)
+ // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret)
+
+ const TType &sourceType = node->getType();
+
+ // rule 1 for scalar to matrix is special
+ if (sourceType.isScalarOrVec1() && type.isMatrix()) {
+
+ // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its
+ // own devices, the constructor from a scalar would populate the diagonal. This forces replication
+ // to every matrix element.
+
+ // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here
+ // repeatedly, so we copy it to a temp, then use the temp.
+ const int matSize = type.computeNumComponents();
+ TIntermAggregate* rhsAggregate = new TIntermAggregate();
+
+ const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr);
+
+ if (!isSimple) {
+ assert(0); // TODO: use node replicator service when available.
+ }
+
+ for (int x = 0; x < matSize; ++x)
+ rhsAggregate->getSequence().push_back(node);
+
+ return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc());
+ }
+
+ // rule 1 and 2
+ if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar()))
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+
+ // rule 3 and 5b
+ if (sourceType.isMatrix()) {
+ // rule 3
+ if (type.isMatrix()) {
+ if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) &&
+ sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows())
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ // rule 5b
+ } else if (type.isVector()) {
+ if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2)
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ }
+ }
+
+ // rule 4 and 5a
+ if (sourceType.isVector()) {
+ // rule 4
+ if (type.isVector())
+ {
+ if (sourceType.getVectorSize() > type.getVectorSize())
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ // rule 5a
+ } else if (type.isMatrix()) {
+ if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2)
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+ }
+ }
+ }
+
+ // scalar -> vector or vec1 -> vector or
+ // vector -> scalar or
+ // bigger vector -> smaller vector
+ if ((node->getType().isScalarOrVec1() && type.isVector()) ||
+ (node->getType().isVector() && type.isScalar()) ||
+ (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize()))
+ return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc());
+
+ return node;
+}
+
+bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const
+{
+ // integral promotions
+ if (to == EbtInt) {
+ switch(from) {
+ case EbtInt8:
+ case EbtInt16:
+ case EbtUint8:
+ case EbtUint16:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const
+{
+ // floating-point promotions
+ if (to == EbtDouble) {
+ switch(from) {
+ case EbtFloat16:
+ case EbtFloat:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const
+{
+ switch (from) {
+ case EbtInt8:
+ switch (to) {
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint8:
+ switch (to) {
+ case EbtInt16:
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt16:
+ switch(to) {
+ case EbtUint16:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint16:
+ switch(to) {
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt:
+ switch(to) {
+ case EbtUint:
+ return version >= 400 || (source == EShSourceHlsl);
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtUint:
+ switch(to) {
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt64:
+ if (to == EbtUint64) {
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const
+{
+ if (to == EbtFloat && from == EbtFloat16) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
+{
+ switch (from) {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ switch (to) {
+ case EbtFloat16:
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt:
+ case EbtUint:
+ switch(to) {
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case EbtInt64:
+ case EbtUint64:
+ if (to == EbtDouble) {
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+//
+// See if the 'from' type is allowed to be implicitly converted to the
+// 'to' type. This is not about vector/array/struct, only about basic type.
+//
+bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
+{
+ if (profile == EEsProfile || version == 110)
+ return false;
+
+ if (from == to)
+ return true;
+
+ // TODO: Move more policies into language-specific handlers.
+ // Some languages allow more general (or potentially, more specific) conversions under some conditions.
+ if (source == EShSourceHlsl) {
+ const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool);
+ const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool);
+
+ if (fromConvertable && toConvertable) {
+ switch (op) {
+ case EOpAndAssign: // assignments can perform arbitrary conversions
+ case EOpInclusiveOrAssign: // ...
+ case EOpExclusiveOrAssign: // ...
+ case EOpAssign: // ...
+ case EOpAddAssign: // ...
+ case EOpSubAssign: // ...
+ case EOpMulAssign: // ...
+ case EOpVectorTimesScalarAssign: // ...
+ case EOpMatrixTimesScalarAssign: // ...
+ case EOpDivAssign: // ...
+ case EOpModAssign: // ...
+ case EOpReturn: // function returns can also perform arbitrary conversions
+ case EOpFunctionCall: // conversion of a calling parameter
+ case EOpLogicalNot:
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpConstructStruct:
+ return true;
+ default:
+ break;
+ }
+ }
+ }
+
+ bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
+ extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64);
+
+ if (explicitTypesEnabled) {
+ // integral promotions
+ if (isIntegralPromotion(from, to)) {
+ return true;
+ }
+
+ // floating-point promotions
+ if (isFPPromotion(from, to)) {
+ return true;
+ }
+
+ // integral conversions
+ if (isIntegralConversion(from, to)) {
+ return true;
+ }
+
+ // floating-point conversions
+ if (isFPConversion(from, to)) {
+ return true;
+ }
+
+ // floating-integral conversions
+ if (isFPIntegralConversion(from, to)) {
+ return true;
+ }
+
+ // hlsl supported conversions
+ if (source == EShSourceHlsl) {
+ if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
+ return true;
+ }
+ } else {
+ switch (to) {
+ case EbtDouble:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtFloat:
+ case EbtDouble:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ case EbtFloat16:
+ return extensionRequested(E_GL_AMD_gpu_shader_half_float);
+#endif
+ default:
+ return false;
+ }
+ case EbtFloat:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtFloat:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ case EbtFloat16:
+ return
+#ifdef AMD_EXTENSIONS
+ extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ (source == EShSourceHlsl);
+ default:
+ return false;
+ }
+ case EbtUint:
+ switch (from) {
+ case EbtInt:
+ return version >= 400 || (source == EShSourceHlsl);
+ case EbtUint:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtInt:
+ switch (from) {
+ case EbtInt:
+ return true;
+ case EbtBool:
+ return (source == EShSourceHlsl);
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtUint64:
+ switch (from) {
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtInt64:
+ switch (from) {
+ case EbtInt:
+ case EbtInt64:
+ return true;
+#ifdef AMD_EXTENSIONS
+ case EbtInt16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+#endif
+ default:
+ return false;
+ }
+ case EbtFloat16:
+#ifdef AMD_EXTENSIONS
+ switch (from) {
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ case EbtFloat16:
+ return extensionRequested(E_GL_AMD_gpu_shader_half_float);
+ default:
+ break;
+ }
+#endif
+ return false;
+ case EbtUint16:
+#ifdef AMD_EXTENSIONS
+ switch (from) {
+ case EbtInt16:
+ case EbtUint16:
+ return extensionRequested(E_GL_AMD_gpu_shader_int16);
+ default:
+ break;
+ }
+#endif
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType) {
+ switch(sintType) {
+ case EbtInt8:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt16:
+ switch(uintType) {
+ case EbtUint8:
+ return true;
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ return true;
+ case EbtUint:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ case EbtInt64:
+ switch(uintType) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ return true;
+ case EbtUint64:
+ return false;
+ default:
+ assert(false);
+ return false;
+ }
+ break;
+ default:
+ assert(false);
+ return false;
+ }
+}
+
+
+static TBasicType getCorrespondingUnsignedType(TBasicType type) {
+ switch(type) {
+ case EbtInt8:
+ return EbtUint8;
+ case EbtInt16:
+ return EbtUint16;
+ case EbtInt:
+ return EbtUint;
+ case EbtInt64:
+ return EbtUint64;
+ default:
+ assert(false);
+ return EbtNumTypes;
+ }
+}
+
+// Implements the following rules
+// - If either operand has type float64_t or derived from float64_t,
+// the other shall be converted to float64_t or derived type.
+// - Otherwise, if either operand has type float32_t or derived from
+// float32_t, the other shall be converted to float32_t or derived type.
+// - Otherwise, if either operand has type float16_t or derived from
+// float16_t, the other shall be converted to float16_t or derived type.
+// - Otherwise, if both operands have integer types the following rules
+// shall be applied to the operands:
+// - If both operands have the same type, no further conversion
+// is needed.
+// - Otherwise, if both operands have signed integer types or both
+// have unsigned integer types, the operand with the type of lesser
+// integer conversion rank shall be converted to the type of the
+// operand with greater rank.
+// - Otherwise, if the operand that has unsigned integer type has rank
+// greater than or equal to the rank of the type of the other
+// operand, the operand with signed integer type shall be converted
+// to the type of the operand with unsigned integer type.
+// - Otherwise, if the type of the operand with signed integer type can
+// represent all of the values of the type of the operand with
+// unsigned integer type, the operand with unsigned integer type
+// shall be converted to the type of the operand with signed
+// integer type.
+// - Otherwise, both operands shall be converted to the unsigned
+// integer type corresponding to the type of the operand with signed
+// integer type.
+
+std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const
+{
+ TBasicType res0 = EbtNumTypes;
+ TBasicType res1 = EbtNumTypes;
+
+ if (profile == EEsProfile || version == 110)
+ return std::make_tuple(res0, res1);;
+
+ if (source == EShSourceHlsl) {
+ if (canImplicitlyPromote(type1, type0, op)) {
+ res0 = type0;
+ res1 = type0;
+ } else if (canImplicitlyPromote(type0, type1, op)) {
+ res0 = type1;
+ res1 = type1;
+ }
+ return std::make_tuple(res0, res1);
+ }
+
+ if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) ||
+ (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) {
+ res0 = EbtDouble;
+ res1 = EbtDouble;
+ } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) ||
+ (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) {
+ res0 = EbtFloat;
+ res1 = EbtFloat;
+ } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) ||
+ (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) {
+ res0 = EbtFloat16;
+ res1 = EbtFloat16;
+ } else if (isTypeInt(type0) && isTypeInt(type1) &&
+ (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) {
+ if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) ||
+ (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) {
+ if (getTypeRank(type0) < getTypeRank(type1)) {
+ res0 = type1;
+ res1 = type1;
+ } else {
+ res0 = type0;
+ res1 = type0;
+ }
+ } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) {
+ res0 = type0;
+ res1 = type0;
+ } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) {
+ res0 = type1;
+ res1 = type1;
+ } else if (isTypeSignedInt(type0)) {
+ if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) {
+ res0 = type0;
+ res1 = type0;
+ } else {
+ res0 = getCorrespondingUnsignedType(type0);
+ res1 = getCorrespondingUnsignedType(type0);
+ }
+ } else if (isTypeSignedInt(type1)) {
+ if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) {
+ res0 = type1;
+ res1 = type1;
+ } else {
+ res0 = getCorrespondingUnsignedType(type1);
+ res1 = getCorrespondingUnsignedType(type1);
+ }
+ }
+ }
+
+ return std::make_tuple(res0, res1);
+}
+
+//
+// Given a type, find what operation would fully construct it.
+//
+TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
+{
+ TOperator op = EOpNull;
+
+ if (type.getQualifier().nonUniform)
+ return EOpConstructNonuniform;
+
+ if (type.isCoopMat())
+ return EOpConstructCooperativeMatrix;
+
+ switch (type.getBasicType()) {
+ case EbtStruct:
+ op = EOpConstructStruct;
+ break;
+ case EbtSampler:
+ if (type.getSampler().combined)
+ op = EOpConstructTextureSampler;
+ break;
+ case EbtFloat:
+ if (type.isMatrix()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat2x2; break;
+ case 3: op = EOpConstructMat2x3; break;
+ case 4: op = EOpConstructMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat3x2; break;
+ case 3: op = EOpConstructMat3x3; break;
+ case 4: op = EOpConstructMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructMat4x2; break;
+ case 3: op = EOpConstructMat4x3; break;
+ case 4: op = EOpConstructMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ default: break; // some compilers want this
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructFloat; break;
+ case 2: op = EOpConstructVec2; break;
+ case 3: op = EOpConstructVec3; break;
+ case 4: op = EOpConstructVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtDouble:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat2x2; break;
+ case 3: op = EOpConstructDMat2x3; break;
+ case 4: op = EOpConstructDMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat3x2; break;
+ case 3: op = EOpConstructDMat3x3; break;
+ case 4: op = EOpConstructDMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructDMat4x2; break;
+ case 3: op = EOpConstructDMat4x3; break;
+ case 4: op = EOpConstructDMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructDouble; break;
+ case 2: op = EOpConstructDVec2; break;
+ case 3: op = EOpConstructDVec3; break;
+ case 4: op = EOpConstructDVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtFloat16:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat2x2; break;
+ case 3: op = EOpConstructF16Mat2x3; break;
+ case 4: op = EOpConstructF16Mat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat3x2; break;
+ case 3: op = EOpConstructF16Mat3x3; break;
+ case 4: op = EOpConstructF16Mat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructF16Mat4x2; break;
+ case 3: op = EOpConstructF16Mat4x3; break;
+ case 4: op = EOpConstructF16Mat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ }
+ else {
+ switch (type.getVectorSize()) {
+ case 1: op = EOpConstructFloat16; break;
+ case 2: op = EOpConstructF16Vec2; break;
+ case 3: op = EOpConstructF16Vec3; break;
+ case 4: op = EOpConstructF16Vec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtInt8:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt8; break;
+ case 2: op = EOpConstructI8Vec2; break;
+ case 3: op = EOpConstructI8Vec3; break;
+ case 4: op = EOpConstructI8Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint8:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint8; break;
+ case 2: op = EOpConstructU8Vec2; break;
+ case 3: op = EOpConstructU8Vec3; break;
+ case 4: op = EOpConstructU8Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtInt16:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt16; break;
+ case 2: op = EOpConstructI16Vec2; break;
+ case 3: op = EOpConstructI16Vec3; break;
+ case 4: op = EOpConstructI16Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint16:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint16; break;
+ case 2: op = EOpConstructU16Vec2; break;
+ case 3: op = EOpConstructU16Vec3; break;
+ case 4: op = EOpConstructU16Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtInt:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat2x2; break;
+ case 3: op = EOpConstructIMat2x3; break;
+ case 4: op = EOpConstructIMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat3x2; break;
+ case 3: op = EOpConstructIMat3x3; break;
+ case 4: op = EOpConstructIMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructIMat4x2; break;
+ case 3: op = EOpConstructIMat4x3; break;
+ case 4: op = EOpConstructIMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt; break;
+ case 2: op = EOpConstructIVec2; break;
+ case 3: op = EOpConstructIVec3; break;
+ case 4: op = EOpConstructIVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtUint:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat2x2; break;
+ case 3: op = EOpConstructUMat2x3; break;
+ case 4: op = EOpConstructUMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat3x2; break;
+ case 3: op = EOpConstructUMat3x3; break;
+ case 4: op = EOpConstructUMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructUMat4x2; break;
+ case 3: op = EOpConstructUMat4x3; break;
+ case 4: op = EOpConstructUMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint; break;
+ case 2: op = EOpConstructUVec2; break;
+ case 3: op = EOpConstructUVec3; break;
+ case 4: op = EOpConstructUVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtInt64:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructInt64; break;
+ case 2: op = EOpConstructI64Vec2; break;
+ case 3: op = EOpConstructI64Vec3; break;
+ case 4: op = EOpConstructI64Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtUint64:
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructUint64; break;
+ case 2: op = EOpConstructU64Vec2; break;
+ case 3: op = EOpConstructU64Vec3; break;
+ case 4: op = EOpConstructU64Vec4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case EbtBool:
+ if (type.getMatrixCols()) {
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat2x2; break;
+ case 3: op = EOpConstructBMat2x3; break;
+ case 4: op = EOpConstructBMat2x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat3x2; break;
+ case 3: op = EOpConstructBMat3x3; break;
+ case 4: op = EOpConstructBMat3x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: op = EOpConstructBMat4x2; break;
+ case 3: op = EOpConstructBMat4x3; break;
+ case 4: op = EOpConstructBMat4x4; break;
+ default: break; // some compilers want this
+ }
+ break;
+ }
+ } else {
+ switch(type.getVectorSize()) {
+ case 1: op = EOpConstructBool; break;
+ case 2: op = EOpConstructBVec2; break;
+ case 3: op = EOpConstructBVec3; break;
+ case 4: op = EOpConstructBVec4; break;
+ default: break; // some compilers want this
+ }
+ }
+ break;
+ case EbtReference:
+ op = EOpConstructReference;
+ break;
+ default:
+ break;
+ }
+
+ return op;
+}
+
+//
+// Safe way to combine two nodes into an aggregate. Works with null pointers,
+// a node that's not a aggregate yet, etc.
+//
+// Returns the resulting aggregate, unless nullptr was passed in for
+// both existing nodes.
+//
+TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right)
+{
+ if (left == nullptr && right == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = nullptr;
+ if (left != nullptr)
+ aggNode = left->getAsAggregate();
+ if (aggNode == nullptr || aggNode->getOp() != EOpNull) {
+ aggNode = new TIntermAggregate;
+ if (left != nullptr)
+ aggNode->getSequence().push_back(left);
+ }
+
+ if (right != nullptr)
+ aggNode->getSequence().push_back(right);
+
+ return aggNode;
+}
+
+TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc)
+{
+ TIntermAggregate* aggNode = growAggregate(left, right);
+ if (aggNode)
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// Turn an existing node into an aggregate.
+//
+// Returns an aggregate, unless nullptr was passed in for the existing node.
+//
+TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node)
+{
+ if (node == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->getSequence().push_back(node);
+ aggNode->setLoc(node->getLoc());
+
+ return aggNode;
+}
+
+TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc)
+{
+ if (node == nullptr)
+ return nullptr;
+
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->getSequence().push_back(node);
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// Make an aggregate with an empty sequence.
+//
+TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc)
+{
+ TIntermAggregate* aggNode = new TIntermAggregate;
+ aggNode->setLoc(loc);
+
+ return aggNode;
+}
+
+//
+// For "if" test nodes. There are three children; a condition,
+// a true path, and a false path. The two paths are in the
+// nodePair.
+//
+// Returns the selection node created.
+//
+TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc)
+{
+ //
+ // Don't prune the false path for compile-time constants; it's needed
+ // for static access analysis.
+ //
+
+ TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2);
+ node->setLoc(loc);
+
+ return node;
+}
+
+TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
+{
+ // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators
+ // ... are not included in the operators that can create a constant expression.
+ //
+ // if (left->getType().getQualifier().storage == EvqConst &&
+ // right->getType().getQualifier().storage == EvqConst) {
+
+ // return right;
+ //}
+
+ TIntermTyped *commaAggregate = growAggregate(left, right, loc);
+ commaAggregate->getAsAggregate()->setOperator(EOpComma);
+ commaAggregate->setType(right->getType());
+ commaAggregate->getWritableType().getQualifier().makeTemporary();
+
+ return commaAggregate;
+}
+
+TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc)
+{
+ TIntermMethod* method = new TIntermMethod(object, type, *name);
+ method->setLoc(loc);
+
+ return method;
+}
+
+//
+// For "?:" test nodes. There are three children; a condition,
+// a true path, and a false path. The two paths are specified
+// as separate parameters. For vector 'cond', the true and false
+// are not paths, but vectors to mix.
+//
+// Specialization constant operations include
+// - The ternary operator ( ? : )
+//
+// Returns the selection node created, or nullptr if one could not be.
+//
+TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock,
+ const TSourceLoc& loc)
+{
+ // If it's void, go to the if-then-else selection()
+ if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) {
+ TIntermNodePair pair = { trueBlock, falseBlock };
+ TIntermSelection* selection = addSelection(cond, pair, loc);
+ if (getSource() == EShSourceHlsl)
+ selection->setNoShortCircuit();
+
+ return selection;
+ }
+
+ //
+ // Get compatible types.
+ //
+ auto children = addConversion(EOpSequence, trueBlock, falseBlock);
+ trueBlock = std::get<0>(children);
+ falseBlock = std::get<1>(children);
+
+ if (trueBlock == nullptr || falseBlock == nullptr)
+ return nullptr;
+
+ // Handle a vector condition as a mix
+ if (!cond->getType().isScalarOrVec1()) {
+ TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary,
+ cond->getType().getVectorSize());
+ // smear true/false operands as needed
+ trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock);
+ falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock);
+
+ // After conversion, types have to match.
+ if (falseBlock->getType() != trueBlock->getType())
+ return nullptr;
+
+ // make the mix operation
+ TIntermAggregate* mix = makeAggregate(loc);
+ mix = growAggregate(mix, falseBlock);
+ mix = growAggregate(mix, trueBlock);
+ mix = growAggregate(mix, cond);
+ mix->setType(targetVectorType);
+ mix->setOp(EOpMix);
+
+ return mix;
+ }
+
+ // Now have a scalar condition...
+
+ // Convert true and false expressions to matching types
+ addBiShapeConversion(EOpMix, trueBlock, falseBlock);
+
+ // After conversion, types have to match.
+ if (falseBlock->getType() != trueBlock->getType())
+ return nullptr;
+
+ // Eliminate the selection when the condition is a scalar and all operands are constant.
+ if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) {
+ if (cond->getAsConstantUnion()->getConstArray()[0].getBConst())
+ return trueBlock;
+ else
+ return falseBlock;
+ }
+
+ //
+ // Make a selection node.
+ //
+ TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType());
+ node->setLoc(loc);
+ node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision);
+
+ if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) ||
+ (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() &&
+ falseBlock->getQualifier().isConstant()))
+ node->getQualifier().makeSpecConstant();
+ else
+ node->getQualifier().makeTemporary();
+
+ if (getSource() == EShSourceHlsl)
+ node->setNoShortCircuit();
+
+ return node;
+}
+
+//
+// Constant terminal nodes. Has a union that contains bool, float or int constants
+//
+// Returns the constant union node created.
+//
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const
+{
+ TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t);
+ node->getQualifier().storage = EvqConst;
+ node->setLoc(loc);
+ if (literal)
+ node->setLiteral();
+
+ return node;
+}
+TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI8Const(i8);
+
+ return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setUConst(u8);
+
+ return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI16Const(i16);
+
+ return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setU16Const(u16);
+
+ return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setIConst(i);
+
+ return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setUConst(u);
+
+ return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setI64Const(i64);
+
+ return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setU64Const(u64);
+
+ return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setBConst(b);
+
+ return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const
+{
+ assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16);
+
+ TConstUnionArray unionArray(1);
+ unionArray[0].setDConst(d);
+
+ return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal);
+}
+
+TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const
+{
+ TConstUnionArray unionArray(1);
+ unionArray[0].setSConst(s);
+
+ return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal);
+}
+
+// Put vector swizzle selectors onto the given sequence
+void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc)
+{
+ TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc);
+ sequence.push_back(constIntNode);
+}
+
+// Put matrix swizzle selectors onto the given sequence
+void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc)
+{
+ TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc);
+ sequence.push_back(constIntNode);
+ constIntNode = addConstantUnion(selector.coord2, loc);
+ sequence.push_back(constIntNode);
+}
+
+// Make an aggregate node that has a sequence of all selectors.
+template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc);
+template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc);
+template<typename selectorType>
+TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc)
+{
+ TIntermAggregate* node = new TIntermAggregate(EOpSequence);
+
+ node->setLoc(loc);
+ TIntermSequence &sequenceVector = node->getSequence();
+
+ for (int i = 0; i < selector.size(); i++)
+ pushSelector(sequenceVector, selector[i], loc);
+
+ return node;
+}
+
+//
+// Follow the left branches down to the root of an l-value
+// expression (just "." and []).
+//
+// Return the base of the l-value (where following indexing quits working).
+// Return nullptr if a chain following dereferences cannot be followed.
+//
+// 'swizzleOkay' says whether or not it is okay to consider a swizzle
+// a valid part of the dereference chain.
+//
+const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay)
+{
+ do {
+ const TIntermBinary* binary = node->getAsBinaryNode();
+ if (binary == nullptr)
+ return node;
+ TOperator op = binary->getOp();
+ if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle)
+ return nullptr;
+ if (! swizzleOkay) {
+ if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle)
+ return nullptr;
+ if ((op == EOpIndexDirect || op == EOpIndexIndirect) &&
+ (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) &&
+ ! binary->getLeft()->getType().isArray())
+ return nullptr;
+ }
+ node = node->getAsBinaryNode()->getLeft();
+ } while (true);
+}
+
+//
+// Create while and do-while loop nodes.
+//
+TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst,
+ const TSourceLoc& loc)
+{
+ TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst);
+ node->setLoc(loc);
+
+ return node;
+}
+
+//
+// Create a for-loop sequence.
+//
+TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test,
+ TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node)
+{
+ node = new TIntermLoop(body, test, terminal, testFirst);
+ node->setLoc(loc);
+
+ // make a sequence of the initializer and statement, but try to reuse the
+ // aggregate already created for whatever is in the initializer, if there is one
+ TIntermAggregate* loopSequence = (initializer == nullptr ||
+ initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc)
+ : initializer->getAsAggregate();
+ if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence)
+ loopSequence->setOp(EOpNull);
+ loopSequence = growAggregate(loopSequence, node);
+ loopSequence->setOperator(EOpSequence);
+
+ return loopSequence;
+}
+
+//
+// Add branches.
+//
+TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc)
+{
+ return addBranch(branchOp, nullptr, loc);
+}
+
+TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc)
+{
+ TIntermBranch* node = new TIntermBranch(branchOp, expression);
+ node->setLoc(loc);
+
+ return node;
+}
+
+//
+// This is to be executed after the final root is put on top by the parsing
+// process.
+//
+bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
+{
+ if (root == nullptr)
+ return true;
+
+ // Finish off the top-level sequence
+ TIntermAggregate* aggRoot = root->getAsAggregate();
+ if (aggRoot && aggRoot->getOp() == EOpNull)
+ aggRoot->setOperator(EOpSequence);
+
+ // Propagate 'noContraction' label in backward from 'precise' variables.
+ glslang::PropagateNoContraction(*this);
+
+ switch (textureSamplerTransformMode) {
+ case EShTexSampTransKeep:
+ break;
+ case EShTexSampTransUpgradeTextureRemoveSampler:
+ performTextureUpgradeAndSamplerRemovalTransformation(root);
+ break;
+ }
+
+ return true;
+}
+
+void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable)
+{
+ // Add top-level nodes for declarations that must be checked cross
+ // compilation unit by a linker, yet might not have been referenced
+ // by the AST.
+ //
+ // Almost entirely, translation of symbols is driven by what's present
+ // in the AST traversal, not by translating the symbol table.
+ //
+ // However, there are some special cases:
+ // - From the specification: "Special built-in inputs gl_VertexID and
+ // gl_InstanceID are also considered active vertex attributes."
+ // - Linker-based type mismatch error reporting needs to see all
+ // uniforms/ins/outs variables and blocks.
+ // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active.
+ //
+
+ // if (ftransformUsed) {
+ // TODO: 1.1 lowering functionality: track ftransform() usage
+ // addSymbolLinkageNode(root, symbolTable, "gl_Vertex");
+ // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix");
+ //}
+
+ if (language == EShLangVertex) {
+ // the names won't be found in the symbol table unless the versions are right,
+ // so version logic does not need to be repeated here
+ addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID");
+ addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID");
+ }
+
+ // Add a child to the root node for the linker objects
+ linkage->setOperator(EOpLinkerObjects);
+ treeRoot = growAggregate(treeRoot, linkage);
+}
+
+//
+// Add the given name or symbol to the list of nodes at the end of the tree used
+// for link-time checking and external linkage.
+//
+
+void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name)
+{
+ TSymbol* symbol = symbolTable.find(name);
+ if (symbol)
+ addSymbolLinkageNode(linkage, *symbol->getAsVariable());
+}
+
+void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol)
+{
+ const TVariable* variable = symbol.getAsVariable();
+ if (! variable) {
+ // This must be a member of an anonymous block, and we need to add the whole block
+ const TAnonMember* anon = symbol.getAsAnonMember();
+ variable = &anon->getAnonContainer();
+ }
+ TIntermSymbol* node = addSymbol(*variable);
+ linkage = growAggregate(linkage, node);
+}
+
+//
+// Add a caller->callee relationship to the call graph.
+// Assumes the strings are unique per signature.
+//
+void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee)
+{
+ // Duplicates are okay, but faster to not keep them, and they come grouped by caller,
+ // as long as new ones are push on the same end we check on for duplicates
+ for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->caller != caller)
+ break;
+ if (call->callee == callee)
+ return;
+ }
+
+ callGraph.push_front(TCall(caller, callee));
+}
+
+//
+// This deletes the tree.
+//
+void TIntermediate::removeTree()
+{
+ if (treeRoot)
+ RemoveAllTreeNodes(treeRoot);
+}
+
+//
+// Implement the part of KHR_vulkan_glsl that lists the set of operations
+// that can result in a specialization constant operation.
+//
+// "5.x Specialization Constant Operations"
+//
+// Only some operations discussed in this section may be applied to a
+// specialization constant and still yield a result that is as
+// specialization constant. The operations allowed are listed below.
+// When a specialization constant is operated on with one of these
+// operators and with another constant or specialization constant, the
+// result is implicitly a specialization constant.
+//
+// - int(), uint(), and bool() constructors for type conversions
+// from any of the following types to any of the following types:
+// * int
+// * uint
+// * bool
+// - vector versions of the above conversion constructors
+// - allowed implicit conversions of the above
+// - swizzles (e.g., foo.yx)
+// - The following when applied to integer or unsigned integer types:
+// * unary negative ( - )
+// * binary operations ( + , - , * , / , % )
+// * shift ( <<, >> )
+// * bitwise operations ( & , | , ^ )
+// - The following when applied to integer or unsigned integer scalar types:
+// * comparison ( == , != , > , >= , < , <= )
+// - The following when applied to the Boolean scalar type:
+// * not ( ! )
+// * logical operations ( && , || , ^^ )
+// * comparison ( == , != )"
+//
+// This function just handles binary and unary nodes. Construction
+// rules are handled in construction paths that are not covered by the unary
+// and binary paths, while required conversions will still show up here
+// as unary converters in the from a construction operator.
+//
+bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const
+{
+ // The operations resulting in floating point are quite limited
+ // (However, some floating-point operations result in bool, like ">",
+ // so are handled later.)
+ if (node.getType().isFloatingDomain()) {
+ switch (node.getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ case EOpConvFloatToDouble:
+ case EOpConvDoubleToFloat:
+ case EOpConvFloat16ToFloat:
+ case EOpConvFloatToFloat16:
+ case EOpConvFloat16ToDouble:
+ case EOpConvDoubleToFloat16:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Check for floating-point arguments
+ if (const TIntermBinary* bin = node.getAsBinaryNode())
+ if (bin->getLeft() ->getType().isFloatingDomain() ||
+ bin->getRight()->getType().isFloatingDomain())
+ return false;
+
+ // So, for now, we can assume everything left is non-floating-point...
+
+ // Now check for integer/bool-based operations
+ switch (node.getOp()) {
+
+ // dereference/swizzle
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+
+ // (u)int* -> bool
+ case EOpConvInt8ToBool:
+ case EOpConvInt16ToBool:
+ case EOpConvIntToBool:
+ case EOpConvInt64ToBool:
+ case EOpConvUint8ToBool:
+ case EOpConvUint16ToBool:
+ case EOpConvUintToBool:
+ case EOpConvUint64ToBool:
+
+ // bool -> (u)int*
+ case EOpConvBoolToInt8:
+ case EOpConvBoolToInt16:
+ case EOpConvBoolToInt:
+ case EOpConvBoolToInt64:
+ case EOpConvBoolToUint8:
+ case EOpConvBoolToUint16:
+ case EOpConvBoolToUint:
+ case EOpConvBoolToUint64:
+
+ // int8_t -> (u)int*
+ case EOpConvInt8ToInt16:
+ case EOpConvInt8ToInt:
+ case EOpConvInt8ToInt64:
+ case EOpConvInt8ToUint8:
+ case EOpConvInt8ToUint16:
+ case EOpConvInt8ToUint:
+ case EOpConvInt8ToUint64:
+
+ // int16_t -> (u)int*
+ case EOpConvInt16ToInt8:
+ case EOpConvInt16ToInt:
+ case EOpConvInt16ToInt64:
+ case EOpConvInt16ToUint8:
+ case EOpConvInt16ToUint16:
+ case EOpConvInt16ToUint:
+ case EOpConvInt16ToUint64:
+
+ // int32_t -> (u)int*
+ case EOpConvIntToInt8:
+ case EOpConvIntToInt16:
+ case EOpConvIntToInt64:
+ case EOpConvIntToUint8:
+ case EOpConvIntToUint16:
+ case EOpConvIntToUint:
+ case EOpConvIntToUint64:
+
+ // int64_t -> (u)int*
+ case EOpConvInt64ToInt8:
+ case EOpConvInt64ToInt16:
+ case EOpConvInt64ToInt:
+ case EOpConvInt64ToUint8:
+ case EOpConvInt64ToUint16:
+ case EOpConvInt64ToUint:
+ case EOpConvInt64ToUint64:
+
+ // uint8_t -> (u)int*
+ case EOpConvUint8ToInt8:
+ case EOpConvUint8ToInt16:
+ case EOpConvUint8ToInt:
+ case EOpConvUint8ToInt64:
+ case EOpConvUint8ToUint16:
+ case EOpConvUint8ToUint:
+ case EOpConvUint8ToUint64:
+
+ // uint16_t -> (u)int*
+ case EOpConvUint16ToInt8:
+ case EOpConvUint16ToInt16:
+ case EOpConvUint16ToInt:
+ case EOpConvUint16ToInt64:
+ case EOpConvUint16ToUint8:
+ case EOpConvUint16ToUint:
+ case EOpConvUint16ToUint64:
+
+ // uint32_t -> (u)int*
+ case EOpConvUintToInt8:
+ case EOpConvUintToInt16:
+ case EOpConvUintToInt:
+ case EOpConvUintToInt64:
+ case EOpConvUintToUint8:
+ case EOpConvUintToUint16:
+ case EOpConvUintToUint64:
+
+ // uint64_t -> (u)int*
+ case EOpConvUint64ToInt8:
+ case EOpConvUint64ToInt16:
+ case EOpConvUint64ToInt:
+ case EOpConvUint64ToInt64:
+ case EOpConvUint64ToUint8:
+ case EOpConvUint64ToUint16:
+ case EOpConvUint64ToUint:
+
+ // unary operations
+ case EOpNegative:
+ case EOpLogicalNot:
+ case EOpBitwiseNot:
+
+ // binary operations
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpVectorTimesScalar:
+ case EOpDiv:
+ case EOpMod:
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpLogicalAnd:
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Is the operation one that must propagate nonuniform?
+bool TIntermediate::isNonuniformPropagating(TOperator op) const
+{
+ // "* All Operators in Section 5.1 (Operators), except for assignment,
+ // arithmetic assignment, and sequence
+ // * Component selection in Section 5.5
+ // * Matrix components in Section 5.6
+ // * Structure and Array Operations in Section 5.7, except for the length
+ // method."
+ switch (op) {
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+
+ case EOpNegative:
+ case EOpLogicalNot:
+ case EOpVectorLogicalNot:
+ case EOpBitwiseNot:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpMul:
+ case EOpDiv:
+ case EOpMod:
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ case EOpVectorTimesScalar:
+ case EOpVectorTimesMatrix:
+ case EOpMatrixTimesVector:
+ case EOpMatrixTimesScalar:
+
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ case EOpLogicalAnd:
+
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+////////////////////////////////////////////////////////////////
+//
+// Member functions of the nodes used for building the tree.
+//
+////////////////////////////////////////////////////////////////
+
+//
+// Say whether or not an operation node changes the value of a variable.
+//
+// Returns true if state is modified.
+//
+bool TIntermOperator::modifiesState() const
+{
+ switch (op) {
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpVectorTimesMatrixAssign:
+ case EOpVectorTimesScalarAssign:
+ case EOpMatrixTimesScalarAssign:
+ case EOpMatrixTimesMatrixAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ return true;
+ default:
+ return false;
+ }
+}
+
+//
+// returns true if the operator is for one of the constructors
+//
+bool TIntermOperator::isConstructor() const
+{
+ return op > EOpConstructGuardStart && op < EOpConstructGuardEnd;
+}
+
+//
+// Make sure the type of an operator is appropriate for its
+// combination of operation and operand type. This will invoke
+// promoteUnary, promoteBinary, etc as needed.
+//
+// Returns false if nothing makes sense.
+//
+bool TIntermediate::promote(TIntermOperator* node)
+{
+ if (node == nullptr)
+ return false;
+
+ if (node->getAsUnaryNode())
+ return promoteUnary(*node->getAsUnaryNode());
+
+ if (node->getAsBinaryNode())
+ return promoteBinary(*node->getAsBinaryNode());
+
+ if (node->getAsAggregate())
+ return promoteAggregate(*node->getAsAggregate());
+
+ return false;
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteUnary(TIntermUnary& node)
+{
+ const TOperator op = node.getOp();
+ TIntermTyped* operand = node.getOperand();
+
+ switch (op) {
+ case EOpLogicalNot:
+ // Convert operand to a boolean type
+ if (operand->getBasicType() != EbtBool) {
+ // Add constructor to boolean type. If that fails, we can't do it, so return false.
+ TIntermTyped* converted = addConversion(op, TType(EbtBool), operand);
+ if (converted == nullptr)
+ return false;
+
+ // Use the result of converting the node to a bool.
+ node.setOperand(operand = converted); // also updates stack variable
+ }
+ break;
+ case EOpBitwiseNot:
+ if (!isTypeInt(operand->getBasicType()))
+ return false;
+ break;
+ case EOpNegative:
+ case EOpPostIncrement:
+ case EOpPostDecrement:
+ case EOpPreIncrement:
+ case EOpPreDecrement:
+ if (!isTypeInt(operand->getBasicType()) &&
+ operand->getBasicType() != EbtFloat &&
+ operand->getBasicType() != EbtFloat16 &&
+ operand->getBasicType() != EbtDouble)
+
+ return false;
+ break;
+
+ default:
+ if (operand->getBasicType() != EbtFloat)
+
+ return false;
+ }
+
+ node.setType(operand->getType());
+ node.getWritableType().getQualifier().makeTemporary();
+
+ return true;
+}
+
+void TIntermUnary::updatePrecision()
+{
+ if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
+ if (operand->getQualifier().precision > getQualifier().precision)
+ getQualifier().precision = operand->getQualifier().precision;
+ }
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteBinary(TIntermBinary& node)
+{
+ TOperator op = node.getOp();
+ TIntermTyped* left = node.getLeft();
+ TIntermTyped* right = node.getRight();
+
+ // Arrays and structures have to be exact matches.
+ if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct)
+ && left->getType() != right->getType())
+ return false;
+
+ // Base assumption: just make the type the same as the left
+ // operand. Only deviations from this will be coded.
+ node.setType(left->getType());
+ node.getWritableType().getQualifier().clear();
+
+ // Composite and opaque types don't having pending operator changes, e.g.,
+ // array, structure, and samplers. Just establish final type and correctness.
+ if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) {
+ switch (op) {
+ case EOpEqual:
+ case EOpNotEqual:
+ if (left->getBasicType() == EbtSampler) {
+ // can't compare samplers
+ return false;
+ } else {
+ // Promote to conditional
+ node.setType(TType(EbtBool));
+ }
+
+ return true;
+
+ case EOpAssign:
+ // Keep type from above
+
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ //
+ // We now have only scalars, vectors, and matrices to worry about.
+ //
+
+ // HLSL implicitly promotes bool -> int for numeric operations.
+ // (Implicit conversions to make the operands match each other's types were already done.)
+ if (getSource() == EShSourceHlsl &&
+ (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) {
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpRightShift:
+ case EOpLeftShift:
+
+ case EOpMod:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMul:
+ if (left->getBasicType() == EbtBool)
+ left = createConversion(EbtInt, left);
+ if (right->getBasicType() == EbtBool)
+ right = createConversion(EbtInt, right);
+ if (left == nullptr || right == nullptr)
+ return false;
+ node.setLeft(left);
+ node.setRight(right);
+
+ // Update the original base assumption on result type..
+ node.setType(left->getType());
+ node.getWritableType().getQualifier().clear();
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that)
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ // Relational comparisons need numeric types and will promote to scalar Boolean.
+ if (left->getBasicType() == EbtBool)
+ return false;
+
+ node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
+ break;
+
+ case EOpEqual:
+ case EOpNotEqual:
+ if (getSource() == EShSourceHlsl) {
+ const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize());
+
+ // In HLSL, == or != on vectors means component-wise comparison.
+ if (resultWidth > 1) {
+ op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual;
+ node.setOp(op);
+ }
+
+ node.setType(TType(EbtBool, EvqTemporary, resultWidth));
+ } else {
+ // All the above comparisons result in a bool (but not the vector compares)
+ node.setType(TType(EbtBool));
+ }
+ break;
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ // logical ops operate only on Booleans or vectors of Booleans.
+ if (left->getBasicType() != EbtBool || left->isMatrix())
+ return false;
+
+ if (getSource() == EShSourceGlsl) {
+ // logical ops operate only on scalar Booleans and will promote to scalar Boolean.
+ if (left->isVector())
+ return false;
+ }
+
+ node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize()));
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+
+ case EOpMod:
+ case EOpModAssign:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ if (getSource() == EShSourceHlsl)
+ break;
+
+ // Check for integer-only operands.
+ if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType()))
+ return false;
+ if (left->isMatrix() || right->isMatrix())
+ return false;
+
+ break;
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMul:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpDivAssign:
+ // check for non-Boolean operands
+ if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)
+ return false;
+
+ default:
+ break;
+ }
+
+ // Compare left and right, and finish with the cases where the operand types must match
+ switch (op) {
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+
+ case EOpEqual:
+ case EOpNotEqual:
+ case EOpVectorEqual:
+ case EOpVectorNotEqual:
+
+ case EOpLogicalAnd:
+ case EOpLogicalOr:
+ case EOpLogicalXor:
+ return left->getType() == right->getType();
+
+ case EOpMod:
+ case EOpModAssign:
+
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ // Quick out in case the types do match
+ if (left->getType() == right->getType())
+ return true;
+
+ // Fall through
+
+ case EOpMul:
+ case EOpMulAssign:
+ // At least the basic type has to match
+ if (left->getBasicType() != right->getBasicType())
+ return false;
+
+ default:
+ break;
+ }
+
+ if (left->getType().isCoopMat() || right->getType().isCoopMat()) {
+ if (left->getType().isCoopMat() && right->getType().isCoopMat() &&
+ *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) {
+ return false;
+ }
+ switch (op) {
+ case EOpMul:
+ case EOpMulAssign:
+ if (left->getType().isCoopMat() && right->getType().isCoopMat()) {
+ return false;
+ }
+ if (op == EOpMulAssign && right->getType().isCoopMat()) {
+ return false;
+ }
+ node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar);
+ if (right->getType().isCoopMat()) {
+ node.setType(right->getType());
+ }
+ return true;
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpAssign:
+ // These require both to be cooperative matrices
+ if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) {
+ return false;
+ }
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ // Finish handling the case, for all ops, where both operands are scalars.
+ if (left->isScalar() && right->isScalar())
+ return true;
+
+ // Finish handling the case, for all ops, where there are two vectors of different sizes
+ if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1)
+ return false;
+
+ //
+ // We now have a mix of scalars, vectors, or matrices, for non-relational operations.
+ //
+
+ // Can these two operands be combined, what is the resulting type?
+ TBasicType basicType = left->getBasicType();
+ switch (op) {
+ case EOpMul:
+ if (!left->isMatrix() && right->isMatrix()) {
+ if (left->isVector()) {
+ if (left->getVectorSize() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpVectorTimesMatrix);
+ node.setType(TType(basicType, EvqTemporary, right->getMatrixCols()));
+ } else {
+ node.setOp(op = EOpMatrixTimesScalar);
+ node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows()));
+ }
+ } else if (left->isMatrix() && !right->isMatrix()) {
+ if (right->isVector()) {
+ if (left->getMatrixCols() != right->getVectorSize())
+ return false;
+ node.setOp(op = EOpMatrixTimesVector);
+ node.setType(TType(basicType, EvqTemporary, left->getMatrixRows()));
+ } else {
+ node.setOp(op = EOpMatrixTimesScalar);
+ }
+ } else if (left->isMatrix() && right->isMatrix()) {
+ if (left->getMatrixCols() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpMatrixTimesMatrix);
+ node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows()));
+ } else if (! left->isMatrix() && ! right->isMatrix()) {
+ if (left->isVector() && right->isVector()) {
+ ; // leave as component product
+ } else if (left->isVector() || right->isVector()) {
+ node.setOp(op = EOpVectorTimesScalar);
+ if (right->isVector())
+ node.setType(TType(basicType, EvqTemporary, right->getVectorSize()));
+ }
+ } else {
+ return false;
+ }
+ break;
+ case EOpMulAssign:
+ if (! left->isMatrix() && right->isMatrix()) {
+ if (left->isVector()) {
+ if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols())
+ return false;
+ node.setOp(op = EOpVectorTimesMatrixAssign);
+ } else {
+ return false;
+ }
+ } else if (left->isMatrix() && !right->isMatrix()) {
+ if (right->isVector()) {
+ return false;
+ } else {
+ node.setOp(op = EOpMatrixTimesScalarAssign);
+ }
+ } else if (left->isMatrix() && right->isMatrix()) {
+ if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows())
+ return false;
+ node.setOp(op = EOpMatrixTimesMatrixAssign);
+ } else if (!left->isMatrix() && !right->isMatrix()) {
+ if (left->isVector() && right->isVector()) {
+ // leave as component product
+ } else if (left->isVector() || right->isVector()) {
+ if (! left->isVector())
+ return false;
+ node.setOp(op = EOpVectorTimesScalarAssign);
+ }
+ } else {
+ return false;
+ }
+ break;
+
+ case EOpRightShift:
+ case EOpLeftShift:
+ case EOpRightShiftAssign:
+ case EOpLeftShiftAssign:
+ if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize()))
+ return false;
+ break;
+
+ case EOpAssign:
+ if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())
+ return false;
+ // fall through
+
+ case EOpAdd:
+ case EOpSub:
+ case EOpDiv:
+ case EOpMod:
+ case EOpAnd:
+ case EOpInclusiveOr:
+ case EOpExclusiveOr:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+
+ if ((left->isMatrix() && right->isVector()) ||
+ (left->isVector() && right->isMatrix()) ||
+ left->getBasicType() != right->getBasicType())
+ return false;
+ if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()))
+ return false;
+ if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize())
+ return false;
+ if (right->isVector() || right->isMatrix()) {
+ node.getWritableType().shallowCopy(right->getType());
+ node.getWritableType().getQualifier().makeTemporary();
+ }
+ break;
+
+ default:
+ return false;
+ }
+
+ //
+ // One more check for assignment.
+ //
+ switch (op) {
+ // The resulting type has to match the left operand.
+ case EOpAssign:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ case EOpMulAssign:
+ case EOpDivAssign:
+ case EOpModAssign:
+ case EOpAndAssign:
+ case EOpInclusiveOrAssign:
+ case EOpExclusiveOrAssign:
+ case EOpLeftShiftAssign:
+ case EOpRightShiftAssign:
+ if (node.getType() != left->getType())
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+//
+// See TIntermediate::promote
+//
+bool TIntermediate::promoteAggregate(TIntermAggregate& node)
+{
+ TOperator op = node.getOp();
+ TIntermSequence& args = node.getSequence();
+ const int numArgs = static_cast<int>(args.size());
+
+ // Presently, only hlsl does intrinsic promotions.
+ if (getSource() != EShSourceHlsl)
+ return true;
+
+ // set of opcodes that can be promoted in this manner.
+ switch (op) {
+ case EOpAtan:
+ case EOpClamp:
+ case EOpCross:
+ case EOpDistance:
+ case EOpDot:
+ case EOpDst:
+ case EOpFaceForward:
+ // case EOpFindMSB: TODO:
+ // case EOpFindLSB: TODO:
+ case EOpFma:
+ case EOpMod:
+ case EOpFrexp:
+ case EOpLdexp:
+ case EOpMix:
+ case EOpLit:
+ case EOpMax:
+ case EOpMin:
+ case EOpModf:
+ // case EOpGenMul: TODO:
+ case EOpPow:
+ case EOpReflect:
+ case EOpRefract:
+ // case EOpSinCos: TODO:
+ case EOpSmoothStep:
+ case EOpStep:
+ break;
+ default:
+ return true;
+ }
+
+ // TODO: array and struct behavior
+
+ // Try converting all nodes to the given node's type
+ TIntermSequence convertedArgs(numArgs, nullptr);
+
+ // Try to convert all types to the nonConvArg type.
+ for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) {
+ // Try converting all args to this arg's type
+ for (int convArg = 0; convArg < numArgs; ++convArg) {
+ convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(),
+ args[convArg]->getAsTyped());
+ }
+
+ // If we successfully converted all the args, use the result.
+ if (std::all_of(convertedArgs.begin(), convertedArgs.end(),
+ [](const TIntermNode* node) { return node != nullptr; })) {
+
+ std::swap(args, convertedArgs);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void TIntermBinary::updatePrecision()
+{
+ if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
+ getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
+ if (getQualifier().precision != EpqNone) {
+ left->propagatePrecision(getQualifier().precision);
+ right->propagatePrecision(getQualifier().precision);
+ }
+ }
+}
+
+void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
+{
+ if (getQualifier().precision != EpqNone || (getBasicType() != EbtInt && getBasicType() != EbtUint && getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
+ return;
+
+ getQualifier().precision = newPrecision;
+
+ TIntermBinary* binaryNode = getAsBinaryNode();
+ if (binaryNode) {
+ binaryNode->getLeft()->propagatePrecision(newPrecision);
+ binaryNode->getRight()->propagatePrecision(newPrecision);
+
+ return;
+ }
+
+ TIntermUnary* unaryNode = getAsUnaryNode();
+ if (unaryNode) {
+ unaryNode->getOperand()->propagatePrecision(newPrecision);
+
+ return;
+ }
+
+ TIntermAggregate* aggregateNode = getAsAggregate();
+ if (aggregateNode) {
+ TIntermSequence operands = aggregateNode->getSequence();
+ for (unsigned int i = 0; i < operands.size(); ++i) {
+ TIntermTyped* typedNode = operands[i]->getAsTyped();
+ if (! typedNode)
+ break;
+ typedNode->propagatePrecision(newPrecision);
+ }
+
+ return;
+ }
+
+ TIntermSelection* selectionNode = getAsSelectionNode();
+ if (selectionNode) {
+ TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped();
+ if (typedNode) {
+ typedNode->propagatePrecision(newPrecision);
+ typedNode = selectionNode->getFalseBlock()->getAsTyped();
+ if (typedNode)
+ typedNode->propagatePrecision(newPrecision);
+ }
+
+ return;
+ }
+}
+
+TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const
+{
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ int size = node->getType().computeNumComponents();
+
+ TConstUnionArray leftUnionArray(size);
+
+ for (int i=0; i < size; i++) {
+ switch (promoteTo) {
+ case EbtFloat:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtDouble:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtFloat16:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setDConst(static_cast<double>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtInt:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtUint:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setIConst(static_cast<int>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtUint:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setUConst(static_cast<unsigned int>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtBool:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getIConst() != 0);
+ break;
+ case EbtUint:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getUConst() != 0);
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getI64Const() != 0);
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getU64Const() != 0);
+ break;
+ case EbtBool:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setBConst(rightUnionArray[i].getDConst() != 0.0);
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtInt64:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtUint64:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getU64Const()));
+ break;
+ case EbtBool:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setI64Const(static_cast<long long>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ case EbtUint64:
+ switch (node->getType().getBasicType()) {
+ case EbtInt:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getIConst()));
+ break;
+ case EbtUint:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getUConst()));
+ break;
+ case EbtInt64:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getI64Const()));
+ break;
+ case EbtUint64:
+ leftUnionArray[i] = rightUnionArray[i];
+ break;
+ case EbtBool:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getBConst()));
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ leftUnionArray[i].setU64Const(static_cast<unsigned long long>(rightUnionArray[i].getDConst()));
+ break;
+ default:
+ return node;
+ }
+ break;
+ default:
+ return node;
+ }
+ }
+
+ const TType& t = node->getType();
+
+ return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()),
+ node->getLoc());
+}
+
+void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable)
+{
+ assert(pragmaTable == nullptr);
+ pragmaTable = new TPragmaTable;
+ *pragmaTable = pTable;
+}
+
+// If either node is a specialization constant, while the other is
+// a constant (or specialization constant), the result is still
+// a specialization constant.
+bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2)
+{
+ return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) ||
+ (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant());
+}
+
+struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser {
+ void visitSymbol(TIntermSymbol* symbol) override {
+ if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) {
+ symbol->getWritableType().getSampler().combined = true;
+ }
+ }
+ bool visitAggregate(TVisit, TIntermAggregate* ag) override {
+ using namespace std;
+ TIntermSequence& seq = ag->getSequence();
+ TQualifierList& qual = ag->getQualifierList();
+
+ // qual and seq are indexed using the same indices, so we have to modify both in lock-step
+ assert(seq.size() == qual.size() || qual.empty());
+
+ size_t write = 0;
+ for (size_t i = 0; i < seq.size(); ++i) {
+ TIntermSymbol* symbol = seq[i]->getAsSymbolNode();
+ if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) {
+ // remove pure sampler variables
+ continue;
+ }
+
+ TIntermNode* result = seq[i];
+
+ // replace constructors with sampler/textures
+ TIntermAggregate *constructor = seq[i]->getAsAggregate();
+ if (constructor && constructor->getOp() == EOpConstructTextureSampler) {
+ if (!constructor->getSequence().empty())
+ result = constructor->getSequence()[0];
+ }
+
+ // write new node & qualifier
+ seq[write] = result;
+ if (!qual.empty())
+ qual[write] = qual[i];
+ write++;
+ }
+
+ seq.resize(write);
+ if (!qual.empty())
+ qual.resize(write);
+
+ return true;
+ }
+};
+
+void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root)
+{
+ TextureUpgradeAndSamplerRemovalTransform transform;
+ root->traverse(&transform);
+}
+
+const char* TIntermediate::getResourceName(TResourceType res)
+{
+ switch (res) {
+ case EResSampler: return "shift-sampler-binding";
+ case EResTexture: return "shift-texture-binding";
+ case EResImage: return "shift-image-binding";
+ case EResUbo: return "shift-UBO-binding";
+ case EResSsbo: return "shift-ssbo-binding";
+ case EResUav: return "shift-uav-binding";
+ default:
+ assert(0); // internal error: should only be called with valid resource types.
+ return nullptr;
+ }
+}
+
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/LiveTraverser.h b/src/3rdparty/glslang/glslang/MachineIndependent/LiveTraverser.h
new file mode 100644
index 0000000..7333bc9
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/LiveTraverser.h
@@ -0,0 +1,138 @@
+//
+// Copyright (C) 2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#pragma once
+
+#include "../Include/Common.h"
+#include "reflection.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+#include <list>
+#include <unordered_set>
+
+namespace glslang {
+
+//
+// The traverser: mostly pass through, except
+// - processing function-call nodes to push live functions onto the stack of functions to process
+// - processing selection nodes to trim semantically dead code
+//
+// This is in the glslang namespace directly so it can be a friend of TReflection.
+// This can be derived from to implement reflection database traversers or
+// binding mappers: anything that wants to traverse the live subset of the tree.
+//
+
+class TLiveTraverser : public TIntermTraverser {
+public:
+ TLiveTraverser(const TIntermediate& i, bool traverseAll = false,
+ bool preVisit = true, bool inVisit = false, bool postVisit = false) :
+ TIntermTraverser(preVisit, inVisit, postVisit),
+ intermediate(i), traverseAll(traverseAll)
+ { }
+
+ //
+ // Given a function name, find its subroot in the tree, and push it onto the stack of
+ // functions left to process.
+ //
+ void pushFunction(const TString& name)
+ {
+ TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
+ for (unsigned int f = 0; f < globals.size(); ++f) {
+ TIntermAggregate* candidate = globals[f]->getAsAggregate();
+ if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
+ functions.push_back(candidate);
+ break;
+ }
+ }
+ }
+
+ typedef std::list<TIntermAggregate*> TFunctionStack;
+ TFunctionStack functions;
+
+protected:
+ // To catch which function calls are not dead, and hence which functions must be visited.
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node)
+ {
+ if (!traverseAll)
+ if (node->getOp() == EOpFunctionCall)
+ addFunctionCall(node);
+
+ return true; // traverse this subtree
+ }
+
+ // To prune semantically dead paths.
+ virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node)
+ {
+ if (traverseAll)
+ return true; // traverse all code
+
+ TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion();
+ if (constant) {
+ // cull the path that is dead
+ if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock())
+ node->getTrueBlock()->traverse(this);
+ if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock())
+ node->getFalseBlock()->traverse(this);
+
+ return false; // don't traverse any more, we did it all above
+ } else
+ return true; // traverse the whole subtree
+ }
+
+ // Track live functions as well as uniforms, so that we don't visit dead functions
+ // and only visit each function once.
+ void addFunctionCall(TIntermAggregate* call)
+ {
+ // // just use the map to ensure we process each function at most once
+ if (liveFunctions.find(call->getName()) == liveFunctions.end()) {
+ liveFunctions.insert(call->getName());
+ pushFunction(call->getName());
+ }
+ }
+
+ const TIntermediate& intermediate;
+ typedef std::unordered_set<TString> TLiveFunctions;
+ TLiveFunctions liveFunctions;
+ bool traverseAll;
+
+private:
+ // prevent copy & copy construct
+ TLiveTraverser(TLiveTraverser&);
+ TLiveTraverser& operator=(TLiveTraverser&);
+};
+
+} // namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp
new file mode 100644
index 0000000..c9ddaea
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/ParseContextBase.cpp
@@ -0,0 +1,628 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// Implement the TParseContextBase class.
+
+#include <cstdarg>
+
+#include "ParseHelper.h"
+
+extern int yyparse(glslang::TParseContext*);
+
+namespace glslang {
+
+//
+// Used to output syntax, parsing, and semantic errors.
+//
+
+void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReason,
+ const char* szToken,
+ const char* szExtraInfoFormat,
+ TPrefixType prefix, va_list args)
+{
+ const int maxSize = MaxTokenLength + 200;
+ char szExtraInfo[maxSize];
+
+ safe_vsprintf(szExtraInfo, maxSize, szExtraInfoFormat, args);
+
+ infoSink.info.prefix(prefix);
+ infoSink.info.location(loc);
+ infoSink.info << "'" << szToken << "' : " << szReason << " " << szExtraInfo << "\n";
+
+ if (prefix == EPrefixError) {
+ ++numErrors;
+ }
+}
+
+void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ if (messages & EShMsgOnlyPreprocessor)
+ return;
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
+ va_end(args);
+
+ if ((messages & EShMsgCascadingErrors) == 0)
+ currentScanner->setEndOfInput();
+}
+
+void C_DECL TParseContextBase::warn(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ if (suppressWarnings())
+ return;
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
+ va_end(args);
+}
+
+void C_DECL TParseContextBase::ppError(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
+ va_end(args);
+
+ if ((messages & EShMsgCascadingErrors) == 0)
+ currentScanner->setEndOfInput();
+}
+
+void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...)
+{
+ va_list args;
+ va_start(args, szExtraInfoFormat);
+ outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
+ va_end(args);
+}
+
+//
+// Both test and if necessary, spit out an error, to see if the node is really
+// an l-value that can be operated on this way.
+//
+// Returns true if there was an error.
+//
+bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+
+ if (binaryNode) {
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect: // fall through
+ case EOpIndexDirectStruct: // fall through
+ case EOpVectorSwizzle:
+ case EOpMatrixSwizzle:
+ return lValueErrorCheck(loc, op, binaryNode->getLeft());
+ default:
+ break;
+ }
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ const char* symbol = nullptr;
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode != nullptr)
+ symbol = symNode->getName().c_str();
+
+ const char* message = nullptr;
+ switch (node->getQualifier().storage) {
+ case EvqConst: message = "can't modify a const"; break;
+ case EvqConstReadOnly: message = "can't modify a const"; break;
+ case EvqUniform: message = "can't modify a uniform"; break;
+ case EvqBuffer:
+ if (node->getQualifier().readonly)
+ message = "can't modify a readonly buffer";
+#ifdef NV_EXTENSIONS
+ if (node->getQualifier().layoutShaderRecordNV)
+ message = "can't modify a shaderrecordnv qualified buffer";
+#endif
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqHitAttrNV:
+ if (language != EShLangIntersectNV)
+ message = "cannot modify hitAttributeNV in this stage";
+ break;
+#endif
+
+ default:
+ //
+ // Type that can't be written to?
+ //
+ switch (node->getBasicType()) {
+ case EbtSampler:
+ message = "can't modify a sampler";
+ break;
+ case EbtAtomicUint:
+ message = "can't modify an atomic_uint";
+ break;
+ case EbtVoid:
+ message = "can't modify void";
+ break;
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV:
+ message = "can't modify accelerationStructureNV";
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
+ if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ //
+ // Everything else is okay, no error.
+ //
+ if (message == nullptr)
+ return false;
+
+ //
+ // If we get here, we have an error and a message.
+ //
+ if (symNode)
+ error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
+ else
+ error(loc, " l-value required", op, "(%s)", message);
+
+ return true;
+}
+
+// Test for and give an error if the node can't be read from.
+void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ if (! node)
+ return;
+
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+ if (binaryNode) {
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ case EOpVectorSwizzle:
+ case EOpMatrixSwizzle:
+ rValueErrorCheck(loc, op, binaryNode->getLeft());
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode && symNode->getQualifier().writeonly)
+ error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
+}
+
+// Add 'symbol' to the list of deferred linkage symbols, which
+// are later processed in finish(), at which point the symbol
+// must still be valid.
+// It is okay if the symbol's type will be subsequently edited;
+// the modifications will be tracked.
+// Order is preserved, to avoid creating novel forward references.
+void TParseContextBase::trackLinkage(TSymbol& symbol)
+{
+ if (!parsingBuiltins)
+ linkageSymbols.push_back(&symbol);
+}
+
+// Ensure index is in bounds, correct if necessary.
+// Give an error if not.
+void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
+{
+ if (index < 0) {
+ error(loc, "", "[", "index out of range '%d'", index);
+ index = 0;
+ } else if (type.isArray()) {
+ if (type.isSizedArray() && index >= type.getOuterArraySize()) {
+ error(loc, "", "[", "array index out of range '%d'", index);
+ index = type.getOuterArraySize() - 1;
+ }
+ } else if (type.isVector()) {
+ if (index >= type.getVectorSize()) {
+ error(loc, "", "[", "vector index out of range '%d'", index);
+ index = type.getVectorSize() - 1;
+ }
+ } else if (type.isMatrix()) {
+ if (index >= type.getMatrixCols()) {
+ error(loc, "", "[", "matrix index out of range '%d'", index);
+ index = type.getMatrixCols() - 1;
+ }
+ }
+}
+
+// Make a shared symbol have a non-shared version that can be edited by the current
+// compile, such that editing its type will not change the shared version and will
+// effect all nodes already sharing it (non-shallow type),
+// or adopting its full type after being edited (shallow type).
+void TParseContextBase::makeEditable(TSymbol*& symbol)
+{
+ // copyUp() does a deep copy of the type.
+ symbol = symbolTable.copyUp(symbol);
+
+ // Save it (deferred, so it can be edited first) in the AST for linker use.
+ if (symbol)
+ trackLinkage(*symbol);
+}
+
+// Return a writable version of the variable 'name'.
+//
+// Return nullptr if 'name' is not found. This should mean
+// something is seriously wrong (e.g., compiler asking self for
+// built-in that doesn't exist).
+TVariable* TParseContextBase::getEditableVariable(const char* name)
+{
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(name, &builtIn);
+
+ assert(symbol != nullptr);
+ if (symbol == nullptr)
+ return nullptr;
+
+ if (builtIn)
+ makeEditable(symbol);
+
+ return symbol->getAsVariable();
+}
+
+// Select the best matching function for 'call' from 'candidateList'.
+//
+// Assumptions
+//
+// There is no exact match, so a selection algorithm needs to run. That is, the
+// language-specific handler should check for exact match first, to
+// decide what to do, before calling this selector.
+//
+// Input
+//
+// * list of candidate signatures to select from
+// * the call
+// * a predicate function convertible(from, to) that says whether or not type
+// 'from' can implicitly convert to type 'to' (it includes the case of what
+// the calling language would consider a matching type with no conversion
+// needed)
+// * a predicate function better(from1, from2, to1, to2) that says whether or
+// not a conversion from <-> to2 is considered better than a conversion
+// from <-> to1 (both in and out directions need testing, as declared by the
+// formal parameter)
+//
+// Output
+//
+// * best matching candidate (or none, if no viable candidates found)
+// * whether there was a tie for the best match (ambiguous overload selection,
+// caller's choice for how to report)
+//
+const TFunction* TParseContextBase::selectFunction(
+ const TVector<const TFunction*> candidateList,
+ const TFunction& call,
+ std::function<bool(const TType& from, const TType& to, TOperator op, int arg)> convertible,
+ std::function<bool(const TType& from, const TType& to1, const TType& to2)> better,
+ /* output */ bool& tie)
+{
+//
+// Operation
+//
+// 1. Prune the input list of candidates down to a list of viable candidates,
+// where each viable candidate has
+//
+// * at least as many parameters as there are calling arguments, with any
+// remaining parameters being optional or having default values
+// * each parameter is true under convertible(A, B), where A is the calling
+// type for in and B is the formal type, and in addition, for out B is the
+// calling type and A is the formal type
+//
+// 2. If there are no viable candidates, return with no match.
+//
+// 3. If there is only one viable candidate, it is the best match.
+//
+// 4. If there are multiple viable candidates, select the first viable candidate
+// as the incumbent. Compare the incumbent to the next viable candidate, and if
+// that candidate is better (bullets below), make it the incumbent. Repeat, with
+// a linear walk through the viable candidate list. The final incumbent will be
+// returned as the best match. A viable candidate is better than the incumbent if
+//
+// * it has a function argument with a better(...) conversion than the incumbent,
+// for all directions needed by in and out
+// * the incumbent has no argument with a better(...) conversion then the
+// candidate, for either in or out (as needed)
+//
+// 5. Check for ambiguity by comparing the best match against all other viable
+// candidates. If any other viable candidate has a function argument with a
+// better(...) conversion than the best candidate (for either in or out
+// directions), return that there was a tie for best.
+//
+
+ tie = false;
+
+ // 1. prune to viable...
+ TVector<const TFunction*> viableCandidates;
+ for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
+ const TFunction& candidate = *(*it);
+
+ // to even be a potential match, number of arguments must be >= the number of
+ // fixed (non-default) parameters, and <= the total (including parameter with defaults).
+ if (call.getParamCount() < candidate.getFixedParamCount() ||
+ call.getParamCount() > candidate.getParamCount())
+ continue;
+
+ // see if arguments are convertible
+ bool viable = true;
+
+ // The call can have fewer parameters than the candidate, if some have defaults.
+ const int paramCount = std::min(call.getParamCount(), candidate.getParamCount());
+ for (int param = 0; param < paramCount; ++param) {
+ if (candidate[param].type->getQualifier().isParamInput()) {
+ if (! convertible(*call[param].type, *candidate[param].type, candidate.getBuiltInOp(), param)) {
+ viable = false;
+ break;
+ }
+ }
+ if (candidate[param].type->getQualifier().isParamOutput()) {
+ if (! convertible(*candidate[param].type, *call[param].type, candidate.getBuiltInOp(), param)) {
+ viable = false;
+ break;
+ }
+ }
+ }
+
+ if (viable)
+ viableCandidates.push_back(&candidate);
+ }
+
+ // 2. none viable...
+ if (viableCandidates.size() == 0)
+ return nullptr;
+
+ // 3. only one viable...
+ if (viableCandidates.size() == 1)
+ return viableCandidates.front();
+
+ // 4. find best...
+ const auto betterParam = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
+ // is call -> can2 better than call -> can1 for any parameter
+ bool hasBetterParam = false;
+ for (int param = 0; param < call.getParamCount(); ++param) {
+ if (better(*call[param].type, *can1[param].type, *can2[param].type)) {
+ hasBetterParam = true;
+ break;
+ }
+ }
+ return hasBetterParam;
+ };
+
+ const auto equivalentParams = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
+ // is call -> can2 equivalent to call -> can1 for all the call parameters?
+ for (int param = 0; param < call.getParamCount(); ++param) {
+ if (better(*call[param].type, *can1[param].type, *can2[param].type) ||
+ better(*call[param].type, *can2[param].type, *can1[param].type))
+ return false;
+ }
+ return true;
+ };
+
+ const TFunction* incumbent = viableCandidates.front();
+ for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) {
+ const TFunction& candidate = *(*it);
+ if (betterParam(*incumbent, candidate) && ! betterParam(candidate, *incumbent))
+ incumbent = &candidate;
+ }
+
+ // 5. ambiguity...
+ for (auto it = viableCandidates.begin(); it != viableCandidates.end(); ++it) {
+ if (incumbent == *it)
+ continue;
+ const TFunction& candidate = *(*it);
+
+ // In the case of default parameters, it may have an identical initial set, which is
+ // also ambiguous
+ if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate))
+ tie = true;
+ }
+
+ return incumbent;
+}
+
+//
+// Look at a '.' field selector string and change it into numerical selectors
+// for a vector or scalar.
+//
+// Always return some form of swizzle, so the result is always usable.
+//
+void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TString& compString, int vecSize,
+ TSwizzleSelectors<TVectorSelector>& selector)
+{
+ // Too long?
+ if (compString.size() > MaxSwizzleSelectors)
+ error(loc, "vector swizzle too long", compString.c_str(), "");
+
+ // Use this to test that all swizzle characters are from the same swizzle-namespace-set
+ enum {
+ exyzw,
+ ergba,
+ estpq,
+ } fieldSet[MaxSwizzleSelectors];
+
+ // Decode the swizzle string.
+ int size = std::min(MaxSwizzleSelectors, (int)compString.size());
+ for (int i = 0; i < size; ++i) {
+ switch (compString[i]) {
+ case 'x':
+ selector.push_back(0);
+ fieldSet[i] = exyzw;
+ break;
+ case 'r':
+ selector.push_back(0);
+ fieldSet[i] = ergba;
+ break;
+ case 's':
+ selector.push_back(0);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'y':
+ selector.push_back(1);
+ fieldSet[i] = exyzw;
+ break;
+ case 'g':
+ selector.push_back(1);
+ fieldSet[i] = ergba;
+ break;
+ case 't':
+ selector.push_back(1);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'z':
+ selector.push_back(2);
+ fieldSet[i] = exyzw;
+ break;
+ case 'b':
+ selector.push_back(2);
+ fieldSet[i] = ergba;
+ break;
+ case 'p':
+ selector.push_back(2);
+ fieldSet[i] = estpq;
+ break;
+
+ case 'w':
+ selector.push_back(3);
+ fieldSet[i] = exyzw;
+ break;
+ case 'a':
+ selector.push_back(3);
+ fieldSet[i] = ergba;
+ break;
+ case 'q':
+ selector.push_back(3);
+ fieldSet[i] = estpq;
+ break;
+
+ default:
+ error(loc, "unknown swizzle selection", compString.c_str(), "");
+ break;
+ }
+ }
+
+ // Additional error checking.
+ for (int i = 0; i < selector.size(); ++i) {
+ if (selector[i] >= vecSize) {
+ error(loc, "vector swizzle selection out of range", compString.c_str(), "");
+ selector.resize(i);
+ break;
+ }
+
+ if (i > 0 && fieldSet[i] != fieldSet[i-1]) {
+ error(loc, "vector swizzle selectors not from the same set", compString.c_str(), "");
+ selector.resize(i);
+ break;
+ }
+ }
+
+ // Ensure it is valid.
+ if (selector.size() == 0)
+ selector.push_back(0);
+}
+
+//
+// Make the passed-in variable information become a member of the
+// global uniform block. If this doesn't exist yet, make it.
+//
+void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList)
+{
+ // Make the global block, if not yet made.
+ if (globalUniformBlock == nullptr) {
+ TQualifier blockQualifier;
+ blockQualifier.clear();
+ blockQualifier.storage = EvqUniform;
+ TType blockType(new TTypeList, *NewPoolTString(getGlobalUniformBlockName()), blockQualifier);
+ setUniformBlockDefaults(blockType);
+ globalUniformBlock = new TVariable(NewPoolTString(""), blockType, true);
+ firstNewMember = 0;
+ }
+
+ // Update with binding and set
+ globalUniformBlock->getWritableType().getQualifier().layoutBinding = globalUniformBinding;
+ globalUniformBlock->getWritableType().getQualifier().layoutSet = globalUniformSet;
+
+ // Add the requested member as a member to the global block.
+ TType* type = new TType;
+ type->shallowCopy(memberType);
+ type->setFieldName(memberName);
+ if (typeList)
+ type->setStruct(typeList);
+ TTypeLoc typeLoc = {type, loc};
+ globalUniformBlock->getType().getWritableStruct()->push_back(typeLoc);
+
+ // Insert into the symbol table.
+ if (firstNewMember == 0) {
+ // This is the first request; we need a normal symbol table insert
+ if (symbolTable.insert(*globalUniformBlock))
+ trackLinkage(*globalUniformBlock);
+ else
+ error(loc, "failed to insert the global constant buffer", "uniform", "");
+ } else {
+ // This is a follow-on request; we need to amend the first insert
+ symbolTable.amend(*globalUniformBlock, firstNewMember);
+ }
+
+ ++firstNewMember;
+}
+
+void TParseContextBase::finish()
+{
+ if (parsingBuiltins)
+ return;
+
+ // Transfer the linkage symbols to AST nodes, preserving order.
+ TIntermAggregate* linkage = new TIntermAggregate;
+ for (auto i = linkageSymbols.begin(); i != linkageSymbols.end(); ++i)
+ intermediate.addSymbolLinkageNode(linkage, **i);
+ intermediate.addSymbolLinkageNodes(linkage, getLanguage(), symbolTable);
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp
new file mode 100644
index 0000000..e1e2ee9
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.cpp
@@ -0,0 +1,7997 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2015 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "ParseHelper.h"
+#include "Scan.h"
+
+#include "../OSDependent/osinclude.h"
+#include <algorithm>
+
+#include "preprocessor/PpContext.h"
+
+extern int yyparse(glslang::TParseContext*);
+
+namespace glslang {
+
+TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins,
+ int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
+ const TString* entryPoint) :
+ TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language,
+ infoSink, forwardCompatible, messages, entryPoint),
+ inMain(false),
+ blockName(nullptr),
+ limits(resources.limits),
+ atomicUintOffsets(nullptr), anyIndexLimits(false)
+{
+ // decide whether precision qualifiers should be ignored or respected
+ if (profile == EEsProfile || spvVersion.vulkan > 0) {
+ precisionManager.respectPrecisionQualifiers();
+ if (! parsingBuiltins && language == EShLangFragment && profile != EEsProfile && spvVersion.vulkan > 0)
+ precisionManager.warnAboutDefaults();
+ }
+
+ setPrecisionDefaults();
+
+ globalUniformDefaults.clear();
+ globalUniformDefaults.layoutMatrix = ElmColumnMajor;
+ globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared;
+
+ globalBufferDefaults.clear();
+ globalBufferDefaults.layoutMatrix = ElmColumnMajor;
+ globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared;
+
+ // use storage buffer on SPIR-V 1.3 and up
+ if (spvVersion.spv >= EShTargetSpv_1_3)
+ intermediate.setUseStorageBuffer();
+
+ globalInputDefaults.clear();
+ globalOutputDefaults.clear();
+
+ // "Shaders in the transform
+ // feedback capturing mode have an initial global default of
+ // layout(xfb_buffer = 0) out;"
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry)
+ globalOutputDefaults.layoutXfbBuffer = 0;
+
+ if (language == EShLangGeometry)
+ globalOutputDefaults.layoutStream = 0;
+
+ if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main")
+ infoSink.info.message(EPrefixError, "Source entry point must be \"main\"");
+}
+
+TParseContext::~TParseContext()
+{
+ delete [] atomicUintOffsets;
+}
+
+// Set up all default precisions as needed by the current environment.
+// Intended just as a TParseContext constructor helper.
+void TParseContext::setPrecisionDefaults()
+{
+ // Set all precision defaults to EpqNone, which is correct for all types
+ // when not obeying precision qualifiers, and correct for types that don't
+ // have defaults (thus getting an error on use) when obeying precision
+ // qualifiers.
+
+ for (int type = 0; type < EbtNumTypes; ++type)
+ defaultPrecision[type] = EpqNone;
+
+ for (int type = 0; type < maxSamplerIndex; ++type)
+ defaultSamplerPrecision[type] = EpqNone;
+
+ // replace with real precision defaults for those that have them
+ if (obeyPrecisionQualifiers()) {
+ if (profile == EEsProfile) {
+ // Most don't have defaults, a few default to lowp.
+ TSampler sampler;
+ sampler.set(EbtFloat, Esd2D);
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ sampler.set(EbtFloat, EsdCube);
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ sampler.set(EbtFloat, Esd2D);
+ sampler.external = true;
+ defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow;
+ }
+
+ // If we are parsing built-in computational variables/functions, it is meaningful to record
+ // whether the built-in has no precision qualifier, as that ambiguity
+ // is used to resolve the precision from the supplied arguments/operands instead.
+ // So, we don't actually want to replace EpqNone with a default precision for built-ins.
+ if (! parsingBuiltins) {
+ if (profile == EEsProfile && language == EShLangFragment) {
+ defaultPrecision[EbtInt] = EpqMedium;
+ defaultPrecision[EbtUint] = EpqMedium;
+ } else {
+ defaultPrecision[EbtInt] = EpqHigh;
+ defaultPrecision[EbtUint] = EpqHigh;
+ defaultPrecision[EbtFloat] = EpqHigh;
+ }
+
+ if (profile != EEsProfile) {
+ // Non-ES profile
+ // All sampler precisions default to highp.
+ for (int type = 0; type < maxSamplerIndex; ++type)
+ defaultSamplerPrecision[type] = EpqHigh;
+ }
+ }
+
+ defaultPrecision[EbtSampler] = EpqLow;
+ defaultPrecision[EbtAtomicUint] = EpqHigh;
+ }
+}
+
+void TParseContext::setLimits(const TBuiltInResource& r)
+{
+ resources = r;
+
+ anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing ||
+ ! limits.generalConstantMatrixVectorIndexing ||
+ ! limits.generalSamplerIndexing ||
+ ! limits.generalUniformIndexing ||
+ ! limits.generalVariableIndexing ||
+ ! limits.generalVaryingIndexing;
+
+ intermediate.setLimits(resources);
+
+ // "Each binding point tracks its own current default offset for
+ // inheritance of subsequent variables using the same binding. The initial state of compilation is that all
+ // binding points have an offset of 0."
+ atomicUintOffsets = new int[resources.maxAtomicCounterBindings];
+ for (int b = 0; b < resources.maxAtomicCounterBindings; ++b)
+ atomicUintOffsets[b] = 0;
+}
+
+//
+// Parse an array of strings using yyparse, going through the
+// preprocessor to tokenize the shader strings, then through
+// the GLSL scanner.
+//
+// Returns true for successful acceptance of the shader, false if any errors.
+//
+bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError)
+{
+ currentScanner = &input;
+ ppContext.setInput(input, versionWillBeError);
+ yyparse(this);
+
+ finish();
+
+ return numErrors == 0;
+}
+
+// This is called from bison when it has a parse (syntax) error
+// Note though that to stop cascading errors, we set EOF, which
+// will usually cause a syntax error, so be more accurate that
+// compilation is terminating.
+void TParseContext::parserError(const char* s)
+{
+ if (! getScanner()->atEndOfInput() || numErrors == 0)
+ error(getCurrentLoc(), "", "", s, "");
+ else
+ error(getCurrentLoc(), "compilation terminated", "", "");
+}
+
+void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>& tokens)
+{
+ if (pragmaCallback)
+ pragmaCallback(loc.line, tokens);
+
+ if (tokens.size() == 0)
+ return;
+
+ if (tokens[0].compare("optimize") == 0) {
+ if (tokens.size() != 4) {
+ error(loc, "optimize pragma syntax is incorrect", "#pragma", "");
+ return;
+ }
+
+ if (tokens[1].compare("(") != 0) {
+ error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", "");
+ return;
+ }
+
+ if (tokens[2].compare("on") == 0)
+ contextPragma.optimize = true;
+ else if (tokens[2].compare("off") == 0)
+ contextPragma.optimize = false;
+ else {
+ error(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", "");
+ return;
+ }
+
+ if (tokens[3].compare(")") != 0) {
+ error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", "");
+ return;
+ }
+ } else if (tokens[0].compare("debug") == 0) {
+ if (tokens.size() != 4) {
+ error(loc, "debug pragma syntax is incorrect", "#pragma", "");
+ return;
+ }
+
+ if (tokens[1].compare("(") != 0) {
+ error(loc, "\"(\" expected after 'debug' keyword", "#pragma", "");
+ return;
+ }
+
+ if (tokens[2].compare("on") == 0)
+ contextPragma.debug = true;
+ else if (tokens[2].compare("off") == 0)
+ contextPragma.debug = false;
+ else {
+ error(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", "");
+ return;
+ }
+
+ if (tokens[3].compare(")") != 0) {
+ error(loc, "\")\" expected to end 'debug' pragma", "#pragma", "");
+ return;
+ }
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ intermediate.setUseStorageBuffer();
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ intermediate.setUseVulkanMemoryModel();
+ } else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) {
+ if (tokens.size() != 1)
+ error(loc, "extra tokens", "#pragma", "");
+ if (spvVersion.spv < glslang::EShTargetSpv_1_3)
+ error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", "");
+ intermediate.setUseVariablePointers();
+ } else if (tokens[0].compare("once") == 0) {
+ warn(loc, "not implemented", "#pragma once", "");
+ } else if (tokens[0].compare("glslang_binary_double_output") == 0)
+ intermediate.setBinaryDoubleOutput();
+}
+
+//
+// Handle seeing a variable identifier in the grammar.
+//
+TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string)
+{
+ TIntermTyped* node = nullptr;
+
+ // Error check for requiring specific extensions present.
+ if (symbol && symbol->getNumExtensions())
+ requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str());
+
+ if (symbol && symbol->isReadOnly()) {
+ // All shared things containing an unsized array must be copied up
+ // on first use, so that all future references will share its array structure,
+ // so that editing the implicit size will effect all nodes consuming it,
+ // and so that editing the implicit size won't change the shared one.
+ //
+ // If this is a variable or a block, check it and all it contains, but if this
+ // is a member of an anonymous block, check the whole block, as the whole block
+ // will need to be copied up if it contains an unsized array.
+ if (symbol->getType().containsUnsizedArray() ||
+ (symbol->getAsAnonMember() &&
+ symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray()))
+ makeEditable(symbol);
+ }
+
+ const TVariable* variable;
+ const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr;
+ if (anon) {
+ // It was a member of an anonymous container.
+
+ // Create a subtree for its dereference.
+ variable = anon->getAnonContainer().getAsVariable();
+ TIntermTyped* container = intermediate.addSymbol(*variable, loc);
+ TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc);
+ node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc);
+
+ node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type);
+ if (node->getType().hiddenMember())
+ error(loc, "member of nameless block was not redeclared", string->c_str(), "");
+ } else {
+ // Not a member of an anonymous container.
+
+ // The symbol table search was done in the lexical phase.
+ // See if it was a variable.
+ variable = symbol ? symbol->getAsVariable() : nullptr;
+ if (variable) {
+ if ((variable->getType().getBasicType() == EbtBlock ||
+ variable->getType().getBasicType() == EbtStruct) && variable->getType().getStruct() == nullptr) {
+ error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), "");
+ variable = nullptr;
+ }
+ } else {
+ if (symbol)
+ error(loc, "variable name expected", string->c_str(), "");
+ }
+
+ // Recovery, if it wasn't found or was not a variable.
+ if (! variable)
+ variable = new TVariable(string, TType(EbtVoid));
+
+ if (variable->getType().getQualifier().isFrontEndConstant())
+ node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc);
+ else
+ node = intermediate.addSymbol(*variable, loc);
+ }
+
+ if (variable->getType().getQualifier().isIo())
+ intermediate.addIoAccessed(*string);
+
+ if (variable->getType().getBasicType() == EbtReference &&
+ variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) {
+ intermediate.setUseVulkanMemoryModel();
+ }
+
+ return node;
+}
+
+//
+// Handle seeing a base[index] dereference in the grammar.
+//
+TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index)
+{
+ int indexValue = 0;
+ if (index->getQualifier().isFrontEndConstant())
+ indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst();
+
+ // basic type checks...
+ variableCheck(base);
+
+ if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat()) {
+ if (base->getAsSymbolNode())
+ error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), "");
+ else
+ error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", "");
+
+ // Insert dummy error-recovery result
+ return intermediate.addConstantUnion(0.0, EbtFloat, loc);
+ }
+
+ if (!base->isArray() && base->isVector()) {
+ if (base->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16");
+ if (base->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16");
+ if (base->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8");
+ }
+
+ // check for constant folding
+ if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) {
+ // both base and index are front-end constants
+ checkIndex(loc, base->getType(), indexValue);
+ return intermediate.foldDereference(base, indexValue, loc);
+ }
+
+ // at least one of base and index is not a front-end constant variable...
+ TIntermTyped* result = nullptr;
+ if (index->getQualifier().isFrontEndConstant())
+ checkIndex(loc, base->getType(), indexValue);
+
+ if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
+ handleIoResizeArrayAccess(loc, base);
+
+ if (index->getQualifier().isFrontEndConstant()) {
+ if (base->getType().isUnsizedArray()) {
+ base->getWritableType().updateImplicitArraySize(indexValue + 1);
+#ifdef NV_EXTENSIONS
+ // For 2D per-view builtin arrays, update the inner dimension size in parent type
+ if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) {
+ TIntermBinary* binaryNode = base->getAsBinaryNode();
+ if (binaryNode) {
+ TType& leftType = binaryNode->getLeft()->getWritableType();
+ TArraySizes& arraySizes = *leftType.getArraySizes();
+ assert(arraySizes.getNumDims() == 2);
+ arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1));
+ }
+ }
+#endif
+ } else
+ checkIndex(loc, base->getType(), indexValue);
+ result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
+ } else {
+ if (base->getType().isUnsizedArray()) {
+ // we have a variable index into an unsized array, which is okay,
+ // depending on the situation
+ if (base->getAsSymbolNode() && isIoResizeArray(base->getType()))
+ error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable");
+ else {
+ // it is okay for a run-time sized array
+ checkRuntimeSizable(loc, *base);
+ }
+ base->getWritableType().setArrayVariablyIndexed();
+ }
+ if (base->getBasicType() == EbtBlock) {
+ if (base->getQualifier().storage == EvqBuffer)
+ requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array");
+ else if (base->getQualifier().storage == EvqUniform)
+ profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "variable indexing uniform block array");
+ else {
+ // input/output blocks either don't exist or can be variable indexed
+ }
+ } else if (language == EShLangFragment && base->getQualifier().isPipeOutput())
+ requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array");
+ else if (base->getBasicType() == EbtSampler && version >= 130) {
+ const char* explanation = "variable indexing sampler array";
+ requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation);
+ profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation);
+ profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation);
+ }
+
+ result = intermediate.addIndex(EOpIndexIndirect, base, index, loc);
+ }
+
+ // Insert valid dereferenced result
+ TType newType(base->getType(), 0); // dereferenced type
+ if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) {
+ newType.getQualifier().storage = EvqConst;
+ // If base or index is a specialization constant, the result should also be a specialization constant.
+ if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) {
+ newType.getQualifier().makeSpecConstant();
+ }
+ } else {
+ newType.getQualifier().makePartialTemporary();
+ }
+ result->setType(newType);
+
+ // Propagate nonuniform
+ if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform())
+ result->getWritableType().getQualifier().nonUniform = true;
+
+ if (anyIndexLimits)
+ handleIndexLimits(loc, base, index);
+
+ return result;
+}
+
+// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms
+void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index)
+{
+ if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) ||
+ (! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) ||
+ (! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) ||
+ (! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) ||
+ (! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() &&
+ ! base->getType().getQualifier().isPipeInput() &&
+ ! base->getType().getQualifier().isPipeOutput() &&
+ ! base->getType().getQualifier().isConstant()) ||
+ (! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() ||
+ base->getType().getQualifier().isPipeOutput()))) {
+ // it's too early to know what the inductive variables are, save it for post processing
+ needsIndexLimitationChecking.push_back(index);
+ }
+}
+
+// Make a shared symbol have a non-shared version that can be edited by the current
+// compile, such that editing its type will not change the shared version and will
+// effect all nodes sharing it.
+void TParseContext::makeEditable(TSymbol*& symbol)
+{
+ TParseContextBase::makeEditable(symbol);
+
+ // See if it's tied to IO resizing
+ if (isIoResizeArray(symbol->getType()))
+ ioArraySymbolResizeList.push_back(symbol);
+}
+
+// Return true if this is a geometry shader input array or tessellation control output array
+// or mesh shader output array.
+bool TParseContext::isIoResizeArray(const TType& type) const
+{
+ return type.isArray() &&
+ ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) ||
+ (language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut && ! type.getQualifier().patch)
+#ifdef NV_EXTENSIONS
+ ||
+ (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn && type.getQualifier().pervertexNV) ||
+ (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut && !type.getQualifier().perTaskNV)
+
+#endif
+ );
+}
+
+// If an array is not isIoResizeArray() but is an io array, make sure it has the right size
+void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type)
+{
+ if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel())
+ return;
+
+ assert(! isIoResizeArray(type));
+
+ if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch)
+ return;
+
+ if (language == EShLangTessControl || language == EShLangTessEvaluation) {
+ if (type.getOuterArraySize() != resources.maxPatchVertices) {
+ if (type.isSizedArray())
+ error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", "");
+ type.changeOuterArraySize(resources.maxPatchVertices);
+ }
+ }
+}
+
+// Issue any errors if the non-array object is missing arrayness WRT
+// shader I/O that has array requirements.
+// All arrayness checking is handled in array paths, this is for
+void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (! type.isArray() && ! symbolTable.atBuiltInLevel()) {
+ if (type.getQualifier().isArrayedIo(language)
+#ifdef NV_EXTENSIONS
+ && !type.getQualifier().layoutPassthrough
+#endif
+ )
+ error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str());
+ }
+}
+
+// Handle a dereference of a geometry shader input array or tessellation control output array.
+// See ioArraySymbolResizeList comment in ParseHelper.h.
+//
+void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base)
+{
+ TIntermSymbol* symbolNode = base->getAsSymbolNode();
+ assert(symbolNode);
+ if (! symbolNode)
+ return;
+
+ // fix array size, if it can be fixed and needs to be fixed (will allow variable indexing)
+ if (symbolNode->getType().isUnsizedArray()) {
+ int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier());
+ if (newSize > 0)
+ symbolNode->getWritableType().changeOuterArraySize(newSize);
+ }
+}
+
+// If there has been an input primitive declaration (geometry shader) or an output
+// number of vertices declaration(tessellation shader), make sure all input array types
+// match it in size. Types come either from nodes in the AST or symbols in the
+// symbol table.
+//
+// Types without an array size will be given one.
+// Types already having a size that is wrong will get an error.
+//
+void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly)
+{
+ int requiredSize = 0;
+ TString featureString;
+ size_t listSize = ioArraySymbolResizeList.size();
+ size_t i = 0;
+
+ // If tailOnly = true, only check the last array symbol in the list.
+ if (tailOnly) {
+ i = listSize - 1;
+ }
+ for (bool firstIteration = true; i < listSize; ++i) {
+ TType &type = ioArraySymbolResizeList[i]->getWritableType();
+
+ // As I/O array sizes don't change, fetch requiredSize only once,
+ // except for mesh shaders which could have different I/O array sizes based on type qualifiers.
+ if (firstIteration
+#ifdef NV_EXTENSIONS
+ || (language == EShLangMeshNV)
+#endif
+ )
+ {
+ requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString);
+ if (requiredSize == 0)
+ break;
+ firstIteration = false;
+ }
+
+ checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type,
+ ioArraySymbolResizeList[i]->getName());
+ }
+}
+
+int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const
+{
+ int expectedSize = 0;
+ TString str = "unknown";
+ unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0;
+
+ if (language == EShLangGeometry) {
+ expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive());
+ str = TQualifier::getGeometryString(intermediate.getInputPrimitive());
+ }
+ else if (language == EShLangTessControl) {
+ expectedSize = maxVertices;
+ str = "vertices";
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangFragment) {
+ // Number of vertices for Fragment shader is always three.
+ expectedSize = 3;
+ str = "vertices";
+ }
+ else if (language == EShLangMeshNV) {
+ unsigned int maxPrimitives =
+ intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0;
+ if (qualifier.builtIn == EbvPrimitiveIndicesNV) {
+ expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive());
+ str = "max_primitives*";
+ str += TQualifier::getGeometryString(intermediate.getOutputPrimitive());
+ }
+ else if (qualifier.isPerPrimitive()) {
+ expectedSize = maxPrimitives;
+ str = "max_primitives";
+ }
+ else {
+ expectedSize = maxVertices;
+ str = "max_vertices";
+ }
+ }
+#endif
+ if (featureString)
+ *featureString = str;
+ return expectedSize;
+}
+
+void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name)
+{
+ if (type.isUnsizedArray())
+ type.changeOuterArraySize(requiredSize);
+ else if (type.getOuterArraySize() != requiredSize) {
+ if (language == EShLangGeometry)
+ error(loc, "inconsistent input primitive for array size of", feature, name.c_str());
+ else if (language == EShLangTessControl)
+ error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str());
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangFragment) {
+ if (type.getOuterArraySize() > requiredSize)
+ error(loc, " cannot be greater than 3 for pervertexNV", feature, name.c_str());
+ }
+ else if (language == EShLangMeshNV)
+ error(loc, "inconsistent output array size of", feature, name.c_str());
+#endif
+ else
+ assert(0);
+ }
+}
+
+// Handle seeing a binary node with a math operation.
+// Returns nullptr if not semantically allowed.
+TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right)
+{
+ rValueErrorCheck(loc, str, left->getAsTyped());
+ rValueErrorCheck(loc, str, right->getAsTyped());
+
+ bool allowed = true;
+ switch (op) {
+ // TODO: Bring more source language-specific checks up from intermediate.cpp
+ // to the specific parse helpers for that source language.
+ case EOpLessThan:
+ case EOpGreaterThan:
+ case EOpLessThanEqual:
+ case EOpGreaterThanEqual:
+ if (! left->isScalar() || ! right->isScalar())
+ allowed = false;
+ break;
+ default:
+ break;
+ }
+
+ if (((left->getType().containsBasicType(EbtFloat16) || right->getType().containsBasicType(EbtFloat16)) && !float16Arithmetic()) ||
+ ((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) ||
+ ((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) {
+ allowed = false;
+ }
+
+ TIntermTyped* result = nullptr;
+ if (allowed)
+ result = intermediate.addBinaryMath(op, left, right, loc);
+
+ if (result == nullptr)
+ binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString());
+
+ return result;
+}
+
+// Handle seeing a unary node with a math operation.
+TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode)
+{
+ rValueErrorCheck(loc, str, childNode);
+
+ bool allowed = true;
+ if ((childNode->getType().containsBasicType(EbtFloat16) && !float16Arithmetic()) ||
+ (childNode->getType().contains16BitInt() && !int16Arithmetic()) ||
+ (childNode->getType().contains8BitInt() && !int8Arithmetic())) {
+ allowed = false;
+ }
+
+ TIntermTyped* result = nullptr;
+
+ if (allowed)
+ result = intermediate.addUnaryMath(op, childNode, loc);
+
+ if (result)
+ return result;
+ else
+ unaryOpError(loc, str, childNode->getCompleteString());
+
+ return childNode;
+}
+
+//
+// Handle seeing a base.field dereference in the grammar.
+//
+TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field)
+{
+ variableCheck(base);
+
+ //
+ // .length() can't be resolved until we later see the function-calling syntax.
+ // Save away the name in the AST for now. Processing is completed in
+ // handleLengthMethod().
+ //
+ if (field == "length") {
+ if (base->isArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length");
+ profileRequires(loc, EEsProfile, 300, nullptr, ".length");
+ } else if (base->isVector() || base->isMatrix()) {
+ const char* feature = ".length() on vectors and matrices";
+ requireProfile(loc, ~EEsProfile, feature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature);
+ } else if (!base->getType().isCoopMat()) {
+ error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString().c_str());
+
+ return base;
+ }
+
+ return intermediate.addMethod(base, TType(EbtInt), &field, loc);
+ }
+
+ // It's not .length() if we get to here.
+
+ if (base->isArray()) {
+ error(loc, "cannot apply to an array:", ".", field.c_str());
+
+ return base;
+ }
+
+ if (base->getType().isCoopMat()) {
+ error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str());
+ return base;
+ }
+
+ // It's neither an array nor .length() if we get here,
+ // leaving swizzles and struct/block dereferences.
+
+ TIntermTyped* result = base;
+ if ((base->isVector() || base->isScalar()) &&
+ (base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) {
+ if (base->isScalar()) {
+ const char* dotFeature = "scalar swizzle";
+ requireProfile(loc, ~EEsProfile, dotFeature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature);
+ }
+
+ TSwizzleSelectors<TVectorSelector> selectors;
+ parseSwizzleSelector(loc, field, base->getVectorSize(), selectors);
+
+ if (base->isVector() && selectors.size() != 1 && base->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16");
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16");
+ if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8");
+
+ if (base->isScalar()) {
+ if (selectors.size() == 1)
+ return result;
+ else {
+ TType type(base->getBasicType(), EvqTemporary, selectors.size());
+ // Swizzle operations propagate specialization-constantness
+ if (base->getQualifier().isSpecConstant())
+ type.getQualifier().makeSpecConstant();
+ return addConstructor(loc, base, type);
+ }
+ }
+
+ if (base->getType().getQualifier().isFrontEndConstant())
+ result = intermediate.foldSwizzle(base, selectors, loc);
+ else {
+ if (selectors.size() == 1) {
+ TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc);
+ result = intermediate.addIndex(EOpIndexDirect, base, index, loc);
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision));
+ } else {
+ TIntermTyped* index = intermediate.addSwizzle(selectors, loc);
+ result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc);
+ result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size()));
+ }
+ // Swizzle operations propagate specialization-constantness
+ if (base->getType().getQualifier().isSpecConstant())
+ result->getWritableType().getQualifier().makeSpecConstant();
+ }
+ } else if (base->getBasicType() == EbtStruct ||
+ base->getBasicType() == EbtBlock ||
+ base->getBasicType() == EbtReference) {
+ const TTypeList* fields = base->getBasicType() == EbtReference ?
+ base->getType().getReferentType()->getStruct() :
+ base->getType().getStruct();
+ bool fieldFound = false;
+ int member;
+ for (member = 0; member < (int)fields->size(); ++member) {
+ if ((*fields)[member].type->getFieldName() == field) {
+ fieldFound = true;
+ break;
+ }
+ }
+ if (fieldFound) {
+ if (base->getType().getQualifier().isFrontEndConstant())
+ result = intermediate.foldDereference(base, member, loc);
+ else {
+ blockMemberExtensionCheck(loc, base, member, field);
+ TIntermTyped* index = intermediate.addConstantUnion(member, loc);
+ result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc);
+ result->setType(*(*fields)[member].type);
+ if ((*fields)[member].type->getQualifier().isIo())
+ intermediate.addIoAccessed(field);
+ }
+ } else
+ error(loc, "no such field in structure", field.c_str(), "");
+ } else
+ error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str());
+
+ // Propagate noContraction up the dereference chain
+ if (base->getQualifier().noContraction)
+ result->getWritableType().getQualifier().noContraction = true;
+
+ // Propagate nonuniform
+ if (base->getQualifier().isNonUniform())
+ result->getWritableType().getQualifier().nonUniform = true;
+
+ return result;
+}
+
+void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName)
+{
+ // a block that needs extension checking is either 'base', or if arrayed,
+ // one level removed to the left
+ const TIntermSymbol* baseSymbol = nullptr;
+ if (base->getAsBinaryNode() == nullptr)
+ baseSymbol = base->getAsSymbolNode();
+ else
+ baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode();
+ if (baseSymbol == nullptr)
+ return;
+ const TSymbol* symbol = symbolTable.find(baseSymbol->getName());
+ if (symbol == nullptr)
+ return;
+ const TVariable* variable = symbol->getAsVariable();
+ if (variable == nullptr)
+ return;
+ if (!variable->hasMemberExtensions())
+ return;
+
+ // We now have a variable that is the base of a dot reference
+ // with members that need extension checking.
+ if (variable->getNumMemberExtensions(member) > 0)
+ requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str());
+}
+
+//
+// Handle seeing a function declarator in the grammar. This is the precursor
+// to recognizing a function prototype or function definition.
+//
+TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype)
+{
+ // ES can't declare prototypes inside functions
+ if (! symbolTable.atGlobalLevel())
+ requireProfile(loc, ~EEsProfile, "local function declaration");
+
+ //
+ // Multiple declarations of the same function name are allowed.
+ //
+ // If this is a definition, the definition production code will check for redefinitions
+ // (we don't know at this point if it's a definition or not).
+ //
+ // Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match.
+ // - except ES 100, which only allows a single prototype
+ //
+ // ES 100 does not allow redefining, but does allow overloading of built-in functions.
+ // ES 300 does not allow redefining or overloading of built-in functions.
+ //
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn);
+ if (symbol && symbol->getAsFunction() && builtIn)
+ requireProfile(loc, ~EEsProfile, "redefinition of built-in function");
+ const TFunction* prevDec = symbol ? symbol->getAsFunction() : 0;
+ if (prevDec) {
+ if (prevDec->isPrototyped() && prototype)
+ profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function");
+ if (prevDec->getType() != function.getType())
+ error(loc, "overloaded functions must have the same return type", function.getName().c_str(), "");
+ for (int i = 0; i < prevDec->getParamCount(); ++i) {
+ if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage)
+ error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1);
+
+ if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision)
+ error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1);
+ }
+ }
+
+ arrayObjectCheck(loc, function.getType(), "array in function return type");
+
+ if (prototype) {
+ // All built-in functions are defined, even though they don't have a body.
+ // Count their prototype as a definition instead.
+ if (symbolTable.atBuiltInLevel())
+ function.setDefined();
+ else {
+ if (prevDec && ! builtIn)
+ symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const
+ function.setPrototyped();
+ }
+ }
+
+ // This insert won't actually insert it if it's a duplicate signature, but it will still check for
+ // other forms of name collisions.
+ if (! symbolTable.insert(function))
+ error(loc, "function name is redeclaration of existing name", function.getName().c_str(), "");
+
+ //
+ // If this is a redeclaration, it could also be a definition,
+ // in which case, we need to use the parameter names from this one, and not the one that's
+ // being redeclared. So, pass back this declaration, not the one in the symbol table.
+ //
+ return &function;
+}
+
+//
+// Handle seeing the function prototype in front of a function definition in the grammar.
+// The body is handled after this function returns.
+//
+TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function)
+{
+ currentCaller = function.getMangledName();
+ TSymbol* symbol = symbolTable.find(function.getMangledName());
+ TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr;
+
+ if (! prevDec)
+ error(loc, "can't find function", function.getName().c_str(), "");
+ // Note: 'prevDec' could be 'function' if this is the first time we've seen function
+ // as it would have just been put in the symbol table. Otherwise, we're looking up
+ // an earlier occurrence.
+
+ if (prevDec && prevDec->isDefined()) {
+ // Then this function already has a body.
+ error(loc, "function already has a body", function.getName().c_str(), "");
+ }
+ if (prevDec && ! prevDec->isDefined()) {
+ prevDec->setDefined();
+
+ // Remember the return type for later checking for RETURN statements.
+ currentFunctionType = &(prevDec->getType());
+ } else
+ currentFunctionType = new TType(EbtVoid);
+ functionReturnsValue = false;
+
+ // Check for entry point
+ if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) {
+ intermediate.setEntryPointMangledName(function.getMangledName().c_str());
+ intermediate.incrementEntryPointCount();
+ inMain = true;
+ } else
+ inMain = false;
+
+ //
+ // Raise error message if main function takes any parameters or returns anything other than void
+ //
+ if (inMain) {
+ if (function.getParamCount() > 0)
+ error(loc, "function cannot take any parameter(s)", function.getName().c_str(), "");
+ if (function.getType().getBasicType() != EbtVoid)
+ error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value");
+ }
+
+ //
+ // New symbol table scope for body of function plus its arguments
+ //
+ symbolTable.push();
+
+ //
+ // Insert parameters into the symbol table.
+ // If the parameter has no name, it's not an error, just don't insert it
+ // (could be used for unused args).
+ //
+ // Also, accumulate the list of parameters into the HIL, so lower level code
+ // knows where to find parameters.
+ //
+ TIntermAggregate* paramNodes = new TIntermAggregate;
+ for (int i = 0; i < function.getParamCount(); i++) {
+ TParameter& param = function[i];
+ if (param.name != nullptr) {
+ TVariable *variable = new TVariable(param.name, *param.type);
+
+ // Insert the parameters with name in the symbol table.
+ if (! symbolTable.insert(*variable))
+ error(loc, "redefinition", variable->getName().c_str(), "");
+ else {
+ // Transfer ownership of name pointer to symbol table.
+ param.name = nullptr;
+
+ // Add the parameter to the HIL
+ paramNodes = intermediate.growAggregate(paramNodes,
+ intermediate.addSymbol(*variable, loc),
+ loc);
+ }
+ } else
+ paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc);
+ }
+ intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc);
+ loopNestingLevel = 0;
+ statementNestingLevel = 0;
+ controlFlowNestingLevel = 0;
+ postEntryPointReturn = false;
+
+ return paramNodes;
+}
+
+//
+// Handle seeing function call syntax in the grammar, which could be any of
+// - .length() method
+// - constructor
+// - a call to a built-in function mapped to an operator
+// - a call to a built-in function that will remain a function call (e.g., texturing)
+// - user function
+// - subroutine call (not implemented yet)
+//
+TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments)
+{
+ TIntermTyped* result = nullptr;
+
+ if (function->getBuiltInOp() == EOpArrayLength)
+ result = handleLengthMethod(loc, function, arguments);
+ else if (function->getBuiltInOp() != EOpNull) {
+ //
+ // Then this should be a constructor.
+ // Don't go through the symbol table for constructors.
+ // Their parameters will be verified algorithmically.
+ //
+ TType type(EbtVoid); // use this to get the type back
+ if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) {
+ //
+ // It's a constructor, of type 'type'.
+ //
+ result = addConstructor(loc, arguments, type);
+ if (result == nullptr)
+ error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), "");
+ }
+ } else {
+ //
+ // Find it in the symbol table.
+ //
+ const TFunction* fnCandidate;
+ bool builtIn;
+ fnCandidate = findFunction(loc, *function, builtIn);
+ if (fnCandidate) {
+ // This is a declared function that might map to
+ // - a built-in operator,
+ // - a built-in function not mapped to an operator, or
+ // - a user function.
+
+ // Error check for a function requiring specific extensions present.
+ if (builtIn && fnCandidate->getNumExtensions())
+ requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str());
+
+ if (builtIn && fnCandidate->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage");
+ if (builtIn && fnCandidate->getType().contains16BitInt())
+ requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
+ if (builtIn && fnCandidate->getType().contains8BitInt())
+ requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
+
+ if (arguments != nullptr) {
+ // Make sure qualifications work for these arguments.
+ TIntermAggregate* aggregate = arguments->getAsAggregate();
+ for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
+ // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
+ // is the single argument itself or its children are the arguments. Only one argument
+ // means take 'arguments' itself as the one argument.
+ TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments);
+ TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier();
+ if (formalQualifier.isParamOutput()) {
+ if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped()))
+ error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", "");
+ }
+ TQualifier& argQualifier = arg->getAsTyped()->getQualifier();
+ if (argQualifier.isMemory()) {
+ const char* message = "argument cannot drop memory qualifier when passed to formal parameter";
+ if (argQualifier.volatil && ! formalQualifier.volatil)
+ error(arguments->getLoc(), message, "volatile", "");
+ if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "coherent", "");
+ if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "devicecoherent", "");
+ if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "queuefamilycoherent", "");
+ if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "workgroupcoherent", "");
+ if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent))
+ error(arguments->getLoc(), message, "subgroupcoherent", "");
+ if (argQualifier.readonly && ! formalQualifier.readonly)
+ error(arguments->getLoc(), message, "readonly", "");
+ if (argQualifier.writeonly && ! formalQualifier.writeonly)
+ error(arguments->getLoc(), message, "writeonly", "");
+ }
+
+ if (builtIn && arg->getAsTyped()->getType().containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage");
+ if (builtIn && arg->getAsTyped()->getType().contains16BitInt())
+ requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage");
+ if (builtIn && arg->getAsTyped()->getType().contains8BitInt())
+ requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage");
+
+ // TODO 4.5 functionality: A shader will fail to compile
+ // if the value passed to the memargument of an atomic memory function does not correspond to a buffer or
+ // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the
+ // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or
+ // shared variable.
+ }
+
+ // Convert 'in' arguments
+ addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node
+ }
+
+ if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) {
+ // A function call mapped to a built-in operation.
+ result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate);
+ } else {
+ // This is a function call not mapped to built-in operator.
+ // It could still be a built-in function, but only if PureOperatorBuiltins == false.
+ result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc);
+ TIntermAggregate* call = result->getAsAggregate();
+ call->setName(fnCandidate->getMangledName());
+
+ // this is how we know whether the given function is a built-in function or a user-defined function
+ // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also
+ // if builtIn == true, it's definitely a built-in function with EOpNull
+ if (! builtIn) {
+ call->setUserDefined();
+ if (symbolTable.atGlobalLevel()) {
+ requireProfile(loc, ~EEsProfile, "calling user function from global scope");
+ intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName());
+ } else
+ intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName());
+ }
+
+ if (builtIn)
+ nonOpBuiltInCheck(loc, *fnCandidate, *call);
+ else
+ userFunctionCallCheck(loc, *call);
+ }
+
+ // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore.
+ // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output.
+ // Also, build the qualifier list for user function calls, which are always called with an aggregate.
+ if (result->getAsAggregate()) {
+ TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList();
+ for (int i = 0; i < fnCandidate->getParamCount(); ++i) {
+ TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage;
+ qualifierList.push_back(qual);
+ }
+ result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate());
+ }
+
+ if (result->getAsTyped()->getType().isCoopMat() &&
+ !result->getAsTyped()->getType().isParameterized()) {
+ assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd);
+
+ result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType());
+ }
+ }
+ }
+
+ // generic error recovery
+ // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades
+ if (result == nullptr)
+ result = intermediate.addConstantUnion(0.0, EbtFloat, loc);
+
+ return result;
+}
+
+TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments,
+ const TFunction& function)
+{
+ checkLocation(loc, function.getBuiltInOp());
+ TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(),
+ function.getParamCount() == 1,
+ arguments, function.getType());
+ if (obeyPrecisionQualifiers())
+ computeBuiltinPrecisions(*result, function);
+
+ if (result == nullptr) {
+ if (arguments == nullptr)
+ error(loc, " wrong operand type", "Internal Error",
+ "built in unary operator function. Type: %s", "");
+ else
+ error(arguments->getLoc(), " wrong operand type", "Internal Error",
+ "built in unary operator function. Type: %s",
+ static_cast<TIntermTyped*>(arguments)->getCompleteString().c_str());
+ } else if (result->getAsOperator())
+ builtInOpCheck(loc, function, *result->getAsOperator());
+
+ return result;
+}
+
+// "The operation of a built-in function can have a different precision
+// qualification than the precision qualification of the resulting value.
+// These two precision qualifications are established as follows.
+//
+// The precision qualification of the operation of a built-in function is
+// based on the precision qualification of its input arguments and formal
+// parameters: When a formal parameter specifies a precision qualifier,
+// that is used, otherwise, the precision qualification of the calling
+// argument is used. The highest precision of these will be the precision
+// qualification of the operation of the built-in function. Generally,
+// this is applied across all arguments to a built-in function, with the
+// exceptions being:
+// - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits'
+// arguments.
+// - interpolateAt* functions only look at the 'interpolant' argument.
+//
+// The precision qualification of the result of a built-in function is
+// determined in one of the following ways:
+//
+// - For the texture sampling, image load, and image store functions,
+// the precision of the return type matches the precision of the
+// sampler type
+//
+// Otherwise:
+//
+// - For prototypes that do not specify a resulting precision qualifier,
+// the precision will be the same as the precision of the operation.
+//
+// - For prototypes that do specify a resulting precision qualifier,
+// the specified precision qualifier is the precision qualification of
+// the result."
+//
+void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function)
+{
+ TPrecisionQualifier operationPrecision = EpqNone;
+ TPrecisionQualifier resultPrecision = EpqNone;
+
+ TIntermOperator* opNode = node.getAsOperator();
+ if (opNode == nullptr)
+ return;
+
+ if (TIntermUnary* unaryNode = node.getAsUnaryNode()) {
+ operationPrecision = std::max(function[0].type->getQualifier().precision,
+ unaryNode->getOperand()->getType().getQualifier().precision);
+ if (function.getType().getBasicType() != EbtBool)
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ operationPrecision :
+ function.getType().getQualifier().precision;
+ } else if (TIntermAggregate* agg = node.getAsAggregate()) {
+ TIntermSequence& sequence = agg->getSequence();
+ unsigned int numArgs = (unsigned int)sequence.size();
+ switch (agg->getOp()) {
+ case EOpBitfieldExtract:
+ numArgs = 1;
+ break;
+ case EOpBitfieldInsert:
+ numArgs = 2;
+ break;
+ case EOpInterpolateAtCentroid:
+ case EOpInterpolateAtOffset:
+ case EOpInterpolateAtSample:
+ numArgs = 1;
+ break;
+ default:
+ break;
+ }
+ // find the maximum precision from the arguments and parameters
+ for (unsigned int arg = 0; arg < numArgs; ++arg) {
+ operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision);
+ operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision);
+ }
+ // compute the result precision
+#ifdef AMD_EXTENSIONS
+ if (agg->isSampling() ||
+ agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore ||
+ agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod)
+#else
+ if (agg->isSampling() || agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore)
+#endif
+ resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision;
+ else if (function.getType().getBasicType() != EbtBool)
+ resultPrecision = function.getType().getQualifier().precision == EpqNone ?
+ operationPrecision :
+ function.getType().getQualifier().precision;
+ }
+
+ // Propagate precision through this node and its children. That algorithm stops
+ // when a precision is found, so start by clearing this subroot precision
+ opNode->getQualifier().precision = EpqNone;
+ if (operationPrecision != EpqNone) {
+ opNode->propagatePrecision(operationPrecision);
+ opNode->setOperationPrecision(operationPrecision);
+ }
+ // Now, set the result precision, which might not match
+ opNode->getQualifier().precision = resultPrecision;
+}
+
+TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value)
+{
+ storage16BitAssignmentCheck(loc, value->getType(), "return");
+
+ functionReturnsValue = true;
+ if (currentFunctionType->getBasicType() == EbtVoid) {
+ error(loc, "void function cannot return a value", "return", "");
+ return intermediate.addBranch(EOpReturn, loc);
+ } else if (*currentFunctionType != value->getType()) {
+ TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value);
+ if (converted) {
+ if (*currentFunctionType != converted->getType())
+ error(loc, "cannot convert return value to function return type", "return", "");
+ if (version < 420)
+ warn(loc, "type conversion on return values was not explicitly allowed until version 420", "return", "");
+ return intermediate.addBranch(EOpReturn, converted, loc);
+ } else {
+ error(loc, "type does not match, or is not convertible to, the function's return type", "return", "");
+ return intermediate.addBranch(EOpReturn, value, loc);
+ }
+ } else
+ return intermediate.addBranch(EOpReturn, value, loc);
+}
+
+// See if the operation is being done in an illegal location.
+void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op)
+{
+ switch (op) {
+ case EOpBarrier:
+ if (language == EShLangTessControl) {
+ if (controlFlowNestingLevel > 0)
+ error(loc, "tessellation control barrier() cannot be placed within flow control", "", "");
+ if (! inMain)
+ error(loc, "tessellation control barrier() must be in main()", "", "");
+ else if (postEntryPointReturn)
+ error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", "");
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+// Finish processing object.length(). This started earlier in handleDotDereference(), where
+// the ".length" part was recognized and semantically checked, and finished here where the
+// function syntax "()" is recognized.
+//
+// Return resulting tree node.
+TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode)
+{
+ int length = 0;
+
+ if (function->getParamCount() > 0)
+ error(loc, "method does not accept any arguments", function->getName().c_str(), "");
+ else {
+ const TType& type = intermNode->getAsTyped()->getType();
+ if (type.isArray()) {
+ if (type.isUnsizedArray()) {
+ if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) {
+ // We could be between a layout declaration that gives a built-in io array implicit size and
+ // a user redeclaration of that array, meaning we have to substitute its implicit size here
+ // without actually redeclaring the array. (It is an error to use a member before the
+ // redeclaration, but not an error to use the array name itself.)
+ const TString& name = intermNode->getAsSymbolNode()->getName();
+ if (name == "gl_in" || name == "gl_out"
+#ifdef NV_EXTENSIONS
+ || name == "gl_MeshVerticesNV"
+ || name == "gl_MeshPrimitivesNV"
+#endif
+ )
+ {
+ length = getIoArrayImplicitSize(type.getQualifier());
+ }
+ }
+ if (length == 0) {
+ if (intermNode->getAsSymbolNode() && isIoResizeArray(type))
+ error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier");
+ else if (isRuntimeLength(*intermNode->getAsTyped())) {
+ // Create a unary op and let the back end handle it
+ return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
+ } else
+ error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method");
+ }
+ } else if (type.getOuterArrayNode()) {
+ // If the array's outer size is specified by an intermediate node, it means the array's length
+ // was specified by a specialization constant. In such a case, we should return the node of the
+ // specialization constants to represent the length.
+ return type.getOuterArrayNode();
+ } else
+ length = type.getOuterArraySize();
+ } else if (type.isMatrix())
+ length = type.getMatrixCols();
+ else if (type.isVector())
+ length = type.getVectorSize();
+ else if (type.isCoopMat())
+ return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt));
+ else {
+ // we should not get here, because earlier semantic checking should have prevented this path
+ error(loc, ".length()", "unexpected use of .length()", "");
+ }
+ }
+
+ if (length == 0)
+ length = 1;
+
+ return intermediate.addConstantUnion(length, loc);
+}
+
+//
+// Add any needed implicit conversions for function-call arguments to input parameters.
+//
+void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const
+{
+ TIntermAggregate* aggregate = arguments->getAsAggregate();
+
+ // Process each argument's conversion
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ // At this early point there is a slight ambiguity between whether an aggregate 'arguments'
+ // is the single argument itself or its children are the arguments. Only one argument
+ // means take 'arguments' itself as the one argument.
+ TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped());
+ if (*function[i].type != arg->getType()) {
+ if (function[i].type->getQualifier().isParamInput() &&
+ !function[i].type->isCoopMat()) {
+ // In-qualified arguments just need an extra node added above the argument to
+ // convert to the correct type.
+ arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg);
+ if (arg) {
+ if (function.getParamCount() == 1)
+ arguments = arg;
+ else {
+ if (aggregate)
+ aggregate->getSequence()[i] = arg;
+ else
+ arguments = arg;
+ }
+ }
+ }
+ }
+ }
+}
+
+//
+// Add any needed implicit output conversions for function-call arguments. This
+// can require a new tree topology, complicated further by whether the function
+// has a return value.
+//
+// Returns a node of a subtree that evaluates to the return value of the function.
+//
+TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const
+{
+ TIntermSequence& arguments = intermNode.getSequence();
+
+ // Will there be any output conversions?
+ bool outputConversions = false;
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) {
+ outputConversions = true;
+ break;
+ }
+ }
+
+ if (! outputConversions)
+ return &intermNode;
+
+ // Setup for the new tree, if needed:
+ //
+ // Output conversions need a different tree topology.
+ // Out-qualified arguments need a temporary of the correct type, with the call
+ // followed by an assignment of the temporary to the original argument:
+ // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...)
+ // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet)
+ // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment.
+ TIntermTyped* conversionTree = nullptr;
+ TVariable* tempRet = nullptr;
+ if (intermNode.getBasicType() != EbtVoid) {
+ // do the "tempRet = function(...), " bit from above
+ tempRet = makeInternalVariable("tempReturn", intermNode.getType());
+ TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
+ conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc());
+ } else
+ conversionTree = &intermNode;
+
+ conversionTree = intermediate.makeAggregate(conversionTree);
+
+ // Process each argument's conversion
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ if (*function[i].type != arguments[i]->getAsTyped()->getType()) {
+ if (function[i].type->getQualifier().isParamOutput()) {
+ // Out-qualified arguments need to use the topology set up above.
+ // do the " ...(tempArg, ...), arg = tempArg" bit from above
+ TType paramType;
+ paramType.shallowCopy(*function[i].type);
+ if (arguments[i]->getAsTyped()->getType().isParameterized() &&
+ !paramType.isParameterized()) {
+ paramType.shallowCopy(arguments[i]->getAsTyped()->getType());
+ paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters());
+ }
+ TVariable* tempArg = makeInternalVariable("tempArg", paramType);
+ tempArg->getWritableType().getQualifier().makeTemporary();
+ TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc());
+ TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc());
+ conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc());
+ // replace the argument with another node for the same tempArg variable
+ arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc());
+ }
+ }
+ }
+
+ // Finalize the tree topology (see bigger comment above).
+ if (tempRet) {
+ // do the "..., tempRet" bit from above
+ TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc());
+ conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc());
+ }
+ conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc());
+
+ return conversionTree;
+}
+
+void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode)
+{
+ const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence();
+
+ //const int gl_SemanticsRelaxed = 0x0;
+ const int gl_SemanticsAcquire = 0x2;
+ const int gl_SemanticsRelease = 0x4;
+ const int gl_SemanticsAcquireRelease = 0x8;
+ const int gl_SemanticsMakeAvailable = 0x2000;
+ const int gl_SemanticsMakeVisible = 0x4000;
+
+ //const int gl_StorageSemanticsNone = 0x0;
+ const int gl_StorageSemanticsBuffer = 0x40;
+ const int gl_StorageSemanticsShared = 0x100;
+ const int gl_StorageSemanticsImage = 0x800;
+ const int gl_StorageSemanticsOutput = 0x1000;
+
+
+ unsigned int semantics = 0, storageClassSemantics = 0;
+ unsigned int semantics2 = 0, storageClassSemantics2 = 0;
+
+ // Grab the semantics and storage class semantics from the operands, based on opcode
+ switch (callNode.getOp()) {
+ case EOpAtomicAdd:
+ case EOpAtomicMin:
+ case EOpAtomicMax:
+ case EOpAtomicAnd:
+ case EOpAtomicOr:
+ case EOpAtomicXor:
+ case EOpAtomicExchange:
+ case EOpAtomicStore:
+ storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpAtomicLoad:
+ storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpAtomicCompSwap:
+ storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+
+ case EOpImageAtomicAdd:
+ case EOpImageAtomicMin:
+ case EOpImageAtomicMax:
+ case EOpImageAtomicAnd:
+ case EOpImageAtomicOr:
+ case EOpImageAtomicXor:
+ case EOpImageAtomicExchange:
+ case EOpImageAtomicStore:
+ storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpImageAtomicLoad:
+ storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpImageAtomicCompSwap:
+ storageClassSemantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ storageClassSemantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics2 = (*argp)[8]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+
+ case EOpBarrier:
+ storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ case EOpMemoryBarrier:
+ storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ break;
+ default:
+ break;
+ }
+
+ if ((semantics & gl_SemanticsAcquire) &&
+ (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) {
+ error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsRelease) &&
+ (callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
+ error(loc, "gl_SemanticsRelease must not be used with (image) atomic load",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsAcquireRelease) &&
+ (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore ||
+ callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) {
+ error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store",
+ fnCandidate.getName().c_str(), "");
+ }
+ if (((semantics | semantics2) & ~(gl_SemanticsAcquire |
+ gl_SemanticsRelease |
+ gl_SemanticsAcquireRelease |
+ gl_SemanticsMakeAvailable |
+ gl_SemanticsMakeVisible))) {
+ error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), "");
+ }
+ if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer |
+ gl_StorageSemanticsShared |
+ gl_StorageSemanticsImage |
+ gl_StorageSemanticsOutput))) {
+ error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), "");
+ }
+
+ if (callNode.getOp() == EOpMemoryBarrier) {
+ if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ } else {
+ if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
+ if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ }
+ if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) {
+ if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or "
+ "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), "");
+ }
+ }
+ }
+ if (callNode.getOp() == EOpMemoryBarrier) {
+ if (storageClassSemantics == 0) {
+ error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
+ }
+ }
+ if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) {
+ error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), "");
+ }
+ if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) &&
+ (semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsMakeAvailable) &&
+ !(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) {
+ error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+ if ((semantics & gl_SemanticsMakeVisible) &&
+ !(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) {
+ error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease",
+ fnCandidate.getName().c_str(), "");
+ }
+
+}
+
+
+//
+// Do additional checking of built-in function calls that is not caught
+// by normal semantic checks on argument type, extension tagging, etc.
+//
+// Assumes there has been a semantically correct match to a built-in function prototype.
+//
+void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode)
+{
+ // Set up convenience accessors to the argument(s). There is almost always
+ // multiple arguments for the cases below, but when there might be one,
+ // check the unaryArg first.
+ const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference
+ const TIntermTyped* unaryArg = nullptr;
+ const TIntermTyped* arg0 = nullptr;
+ if (callNode.getAsAggregate()) {
+ argp = &callNode.getAsAggregate()->getSequence();
+ if (argp->size() > 0)
+ arg0 = (*argp)[0]->getAsTyped();
+ } else {
+ assert(callNode.getAsUnaryNode());
+ unaryArg = callNode.getAsUnaryNode()->getOperand();
+ arg0 = unaryArg;
+ }
+
+ TString featureString;
+ const char* feature = nullptr;
+ switch (callNode.getOp()) {
+ case EOpTextureGather:
+ case EOpTextureGatherOffset:
+ case EOpTextureGatherOffsets:
+ {
+ // Figure out which variants are allowed by what extensions,
+ // and what arguments must be constant for which situations.
+
+ featureString = fnCandidate.getName();
+ featureString += "(...)";
+ feature = featureString.c_str();
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ int compArg = -1; // track which argument, if any, is the constant component argument
+ switch (callNode.getOp()) {
+ case EOpTextureGather:
+ // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
+ // otherwise, need GL_ARB_texture_gather.
+ if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 2;
+ } else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ break;
+ case EOpTextureGatherOffset:
+ // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
+ if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
+ profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "non-constant offset argument");
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ break;
+ case EOpTextureGatherOffsets:
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ // check for constant offsets
+ if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion())
+ error(loc, "must be a compile-time constant:", feature, "offsets argument");
+ break;
+ default:
+ break;
+ }
+
+ if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
+ if ((*argp)[compArg]->getAsConstantUnion()) {
+ int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (value < 0 || value > 3)
+ error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
+ } else
+ error(loc, "must be a compile-time constant:", feature, "component argument");
+ }
+
+#ifdef AMD_EXTENSIONS
+ bool bias = false;
+ if (callNode.getOp() == EOpTextureGather)
+ bias = fnCandidate.getParamCount() > 3;
+ else if (callNode.getOp() == EOpTextureGatherOffset ||
+ callNode.getOp() == EOpTextureGatherOffsets)
+ bias = fnCandidate.getParamCount() > 4;
+
+ if (bias) {
+ featureString = fnCandidate.getName();
+ featureString += "with bias argument";
+ feature = featureString.c_str();
+ profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
+ requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
+ }
+#endif
+
+ break;
+ }
+
+#ifdef AMD_EXTENSIONS
+ case EOpSparseTextureGather:
+ case EOpSparseTextureGatherOffset:
+ case EOpSparseTextureGatherOffsets:
+ {
+ bool bias = false;
+ if (callNode.getOp() == EOpSparseTextureGather)
+ bias = fnCandidate.getParamCount() > 4;
+ else if (callNode.getOp() == EOpSparseTextureGatherOffset ||
+ callNode.getOp() == EOpSparseTextureGatherOffsets)
+ bias = fnCandidate.getParamCount() > 5;
+
+ if (bias) {
+ featureString = fnCandidate.getName();
+ featureString += "with bias argument";
+ feature = featureString.c_str();
+ profileRequires(loc, ~EEsProfile, 450, nullptr, feature);
+ requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature);
+ }
+
+ break;
+ }
+
+ case EOpSparseTextureGatherLod:
+ case EOpSparseTextureGatherLodOffset:
+ case EOpSparseTextureGatherLodOffsets:
+ {
+ requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str());
+ break;
+ }
+
+ case EOpSwizzleInvocations:
+ {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "offset", "");
+ else {
+ unsigned offset[4] = {};
+ offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
+ offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
+ offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst();
+ if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3)
+ error(loc, "components must be in the range [0, 3]", "offset", "");
+ }
+
+ break;
+ }
+
+ case EOpSwizzleInvocationsMasked:
+ {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "mask", "");
+ else {
+ unsigned mask[3] = {};
+ mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst();
+ mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst();
+ if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31)
+ error(loc, "components must be in the range [0, 31]", "mask", "");
+ }
+
+ break;
+ }
+#endif
+
+ case EOpTextureOffset:
+ case EOpTextureFetchOffset:
+ case EOpTextureProjOffset:
+ case EOpTextureLodOffset:
+ case EOpTextureProjLodOffset:
+ case EOpTextureGradOffset:
+ case EOpTextureProjGradOffset:
+ {
+ // Handle texture-offset limits checking
+ // Pick which argument has to hold constant offsets
+ int arg = -1;
+ switch (callNode.getOp()) {
+ case EOpTextureOffset: arg = 2; break;
+ case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().dim != EsdRect) ? 3 : 2; break;
+ case EOpTextureProjOffset: arg = 2; break;
+ case EOpTextureLodOffset: arg = 3; break;
+ case EOpTextureProjLodOffset: arg = 3; break;
+ case EOpTextureGradOffset: arg = 4; break;
+ case EOpTextureProjGradOffset: arg = 4; break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (arg > 0) {
+
+#ifdef AMD_EXTENSIONS
+ bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && arg0->getType().getSampler().shadow;
+ if (f16ShadowCompare)
+ ++arg;
+#endif
+ if (! (*argp)[arg]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "texel offset", "");
+ else {
+ const TType& type = (*argp)[arg]->getAsTyped()->getType();
+ for (int c = 0; c < type.getVectorSize(); ++c) {
+ int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
+ if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
+ error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
+ }
+ }
+ }
+
+ break;
+ }
+
+#ifdef NV_EXTENSIONS
+ case EOpTraceNV:
+ if (!(*argp)[10]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "payload number", "");
+ break;
+ case EOpExecuteCallableNV:
+ if (!(*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "callable data number", "");
+ break;
+#endif
+
+ case EOpTextureQuerySamples:
+ case EOpImageQuerySamples:
+ // GL_ARB_shader_texture_image_samples
+ profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
+ break;
+
+ case EOpImageAtomicAdd:
+ case EOpImageAtomicMin:
+ case EOpImageAtomicMax:
+ case EOpImageAtomicAnd:
+ case EOpImageAtomicOr:
+ case EOpImageAtomicXor:
+ case EOpImageAtomicExchange:
+ case EOpImageAtomicCompSwap:
+ case EOpImageAtomicLoad:
+ case EOpImageAtomicStore:
+ {
+ // Make sure the image types have the correct layout() format and correct argument types
+ const TType& imageType = arg0->getType();
+ if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
+ if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
+ error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
+ } else {
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
+ else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
+ error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
+ }
+
+ const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4;
+ if (argp->size() > maxArgs) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ }
+
+ break;
+ }
+
+ case EOpAtomicAdd:
+ case EOpAtomicMin:
+ case EOpAtomicMax:
+ case EOpAtomicAnd:
+ case EOpAtomicOr:
+ case EOpAtomicXor:
+ case EOpAtomicExchange:
+ case EOpAtomicCompSwap:
+ case EOpAtomicLoad:
+ case EOpAtomicStore:
+ {
+ if (argp->size() > 3) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ } else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) {
+#ifdef NV_EXTENSIONS
+ const char* const extensions[2] = { E_GL_NV_shader_atomic_int64,
+ E_GL_EXT_shader_atomic_int64 };
+ requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str());
+#else
+ requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_int64, fnCandidate.getName().c_str());
+#endif
+ }
+ break;
+ }
+
+ case EOpInterpolateAtCentroid:
+ case EOpInterpolateAtSample:
+ case EOpInterpolateAtOffset:
+#ifdef AMD_EXTENSIONS
+ case EOpInterpolateAtVertex:
+#endif
+ // Make sure the first argument is an interpolant, or an array element of an interpolant
+ if (arg0->getType().getQualifier().storage != EvqVaryingIn) {
+ // It might still be an array element.
+ //
+ // We could check more, but the semantics of the first argument are already met; the
+ // only way to turn an array into a float/vec* is array dereference and swizzle.
+ //
+ // ES and desktop 4.3 and earlier: swizzles may not be used
+ // desktop 4.4 and later: swizzles may be used
+ bool swizzleOkay = (profile != EEsProfile) && (version >= 440);
+ const TIntermTyped* base = TIntermediate::findLValueBase(arg0, swizzleOkay);
+ if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn)
+ error(loc, "first argument must be an interpolant, or interpolant-array element", fnCandidate.getName().c_str(), "");
+ }
+
+#ifdef AMD_EXTENSIONS
+ if (callNode.getOp() == EOpInterpolateAtVertex) {
+ if (!arg0->getType().getQualifier().isExplicitInterpolation())
+ error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", "");
+ else {
+ if (! (*argp)[1]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "vertex index", "");
+ else {
+ unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst();
+ if (vertexIdx > 2)
+ error(loc, "must be in the range [0, 2]", "vertex index", "");
+ }
+ }
+ }
+#endif
+
+ break;
+
+ case EOpEmitStreamVertex:
+ case EOpEndStreamPrimitive:
+ intermediate.setMultiStream();
+ break;
+
+ case EOpSubgroupClusteredAdd:
+ case EOpSubgroupClusteredMul:
+ case EOpSubgroupClusteredMin:
+ case EOpSubgroupClusteredMax:
+ case EOpSubgroupClusteredAnd:
+ case EOpSubgroupClusteredOr:
+ case EOpSubgroupClusteredXor:
+ // The <clusterSize> as used in the subgroupClustered<op>() operations must be:
+ // - An integral constant expression.
+ // - At least 1.
+ // - A power of 2.
+ if ((*argp)[1]->getAsConstantUnion() == nullptr)
+ error(loc, "argument must be compile-time constant", "cluster size", "");
+ else {
+ int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (size < 1)
+ error(loc, "argument must be at least 1", "cluster size", "");
+ else if (!IsPow2(size))
+ error(loc, "argument must be a power of 2", "cluster size", "");
+ }
+ break;
+
+ case EOpSubgroupBroadcast:
+ // <id> must be an integral constant expression.
+ if ((*argp)[1]->getAsConstantUnion() == nullptr)
+ error(loc, "argument must be compile-time constant", "id", "");
+ break;
+
+ case EOpBarrier:
+ case EOpMemoryBarrier:
+ if (argp->size() > 0) {
+ requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str());
+ memorySemanticsCheck(loc, fnCandidate, callNode);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ // Texture operations on texture objects (aside from texelFetch on a
+ // textureBuffer) require EXT_samplerless_texture_functions.
+ switch (callNode.getOp()) {
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpTextureFetch:
+ case EOpTextureFetchOffset:
+ {
+ const TSampler& sampler = fnCandidate[0].type->getSampler();
+
+ const bool isTexture = sampler.isTexture() && !sampler.isCombined();
+ const bool isBuffer = sampler.dim == EsdBuffer;
+ const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset;
+
+ if (isTexture && (!isBuffer || !isFetch))
+ requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str());
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (callNode.getOp() > EOpSubgroupGuardStart && callNode.getOp() < EOpSubgroupGuardStop) {
+ // these require SPIR-V 1.3
+ if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3)
+ error(loc, "requires SPIR-V 1.3", "subgroup op", "");
+ }
+}
+
+extern bool PureOperatorBuiltins;
+
+// Deprecated! Use PureOperatorBuiltins == true instead, in which case this
+// functionality is handled in builtInOpCheck() instead of here.
+//
+// Do additional checking of built-in function calls that were not mapped
+// to built-in operations (e.g., texturing functions).
+//
+// Assumes there has been a semantically correct match to a built-in function.
+//
+void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode)
+{
+ // Further maintenance of this function is deprecated, because the "correct"
+ // future-oriented design is to not have to do string compares on function names.
+
+ // If PureOperatorBuiltins == true, then all built-ins should be mapped
+ // to a TOperator, and this function would then never get called.
+
+ assert(PureOperatorBuiltins == false);
+
+ // built-in texturing functions get their return value precision from the precision of the sampler
+ if (fnCandidate.getType().getQualifier().precision == EpqNone &&
+ fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler)
+ callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision;
+
+ if (fnCandidate.getName().compare(0, 7, "texture") == 0) {
+ if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) {
+ TString featureString = fnCandidate.getName() + "(...)";
+ const char* feature = featureString.c_str();
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+
+ int compArg = -1; // track which argument, if any, is the constant component argument
+ if (fnCandidate.getName().compare("textureGatherOffset") == 0) {
+ // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument
+ if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3)
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
+ if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
+ profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5,
+ "non-constant offset argument");
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ } else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 3;
+ // check for constant offsets
+ int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2;
+ if (! callNode.getSequence()[offsetArg]->getAsConstantUnion())
+ error(loc, "must be a compile-time constant:", feature, "offsets argument");
+ } else if (fnCandidate.getName().compare("textureGather") == 0) {
+ // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5,
+ // otherwise, need GL_ARB_texture_gather.
+ if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) {
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature);
+ if (! fnCandidate[0].type->getSampler().shadow)
+ compArg = 2;
+ } else
+ profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature);
+ }
+
+ if (compArg > 0 && compArg < fnCandidate.getParamCount()) {
+ if (callNode.getSequence()[compArg]->getAsConstantUnion()) {
+ int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (value < 0 || value > 3)
+ error(loc, "must be 0, 1, 2, or 3:", feature, "component argument");
+ } else
+ error(loc, "must be a compile-time constant:", feature, "component argument");
+ }
+ } else {
+ // this is only for functions not starting "textureGather"...
+ if (fnCandidate.getName().find("Offset") != TString::npos) {
+
+ // Handle texture-offset limits checking
+ int arg = -1;
+ if (fnCandidate.getName().compare("textureOffset") == 0)
+ arg = 2;
+ else if (fnCandidate.getName().compare("texelFetchOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureProjOffset") == 0)
+ arg = 2;
+ else if (fnCandidate.getName().compare("textureLodOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureProjLodOffset") == 0)
+ arg = 3;
+ else if (fnCandidate.getName().compare("textureGradOffset") == 0)
+ arg = 4;
+ else if (fnCandidate.getName().compare("textureProjGradOffset") == 0)
+ arg = 4;
+
+ if (arg > 0) {
+ if (! callNode.getSequence()[arg]->getAsConstantUnion())
+ error(loc, "argument must be compile-time constant", "texel offset", "");
+ else {
+ const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType();
+ for (int c = 0; c < type.getVectorSize(); ++c) {
+ int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst();
+ if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset)
+ error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]");
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // GL_ARB_shader_texture_image_samples
+ if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0)
+ profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples");
+
+ if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) {
+ const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType();
+ if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
+ if (imageType.getQualifier().layoutFormat != ElfR32i && imageType.getQualifier().layoutFormat != ElfR32ui)
+ error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
+ } else {
+ if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)
+ error(loc, "only supported on integer images", fnCandidate.getName().c_str(), "");
+ else if (imageType.getQualifier().layoutFormat != ElfR32f && profile == EEsProfile)
+ error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), "");
+ }
+ }
+}
+
+//
+// Do any extra checking for a user function call.
+//
+void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode)
+{
+ TIntermSequence& arguments = callNode.getSequence();
+
+ for (int i = 0; i < (int)arguments.size(); ++i)
+ samplerConstructorLocationCheck(loc, "call argument", arguments[i]);
+}
+
+//
+// Emit an error if this is a sampler constructor
+//
+void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node)
+{
+ if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler)
+ error(loc, "sampler constructor must appear at point of use", token, "");
+}
+
+//
+// Handle seeing a built-in constructor in a grammar production.
+//
+TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ TType type(publicType);
+ type.getQualifier().precision = EpqNone;
+
+ if (type.isArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor");
+ profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor");
+ }
+
+ TOperator op = intermediate.mapTypeToConstructorOp(type);
+
+ if (op == EOpNull) {
+ error(loc, "cannot construct this type", type.getBasicString(), "");
+ op = EOpConstructFloat;
+ TType errorType(EbtFloat);
+ type.shallowCopy(errorType);
+ }
+
+ TString empty("");
+
+ return new TFunction(&empty, type, op);
+}
+
+// Handle seeing a precision qualifier in the grammar.
+void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision)
+{
+ if (obeyPrecisionQualifiers())
+ qualifier.precision = precision;
+}
+
+// Check for messages to give on seeing a precision qualifier used in a
+// declaration in the grammar.
+void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier)
+{
+ if (precisionManager.shouldWarnAboutDefaults()) {
+ warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n"
+ " \"precision mediump int; precision highp float;\"", "", "");
+ precisionManager.defaultWarningGiven();
+ }
+}
+
+//
+// Same error message for all places assignments don't work.
+//
+void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right)
+{
+ error(loc, "", op, "cannot convert from '%s' to '%s'",
+ right.c_str(), left.c_str());
+}
+
+//
+// Same error message for all places unary operations don't work.
+//
+void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand)
+{
+ error(loc, " wrong operand type", op,
+ "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)",
+ op, operand.c_str());
+}
+
+//
+// Same error message for all binary operations don't work.
+//
+void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right)
+{
+ error(loc, " wrong operand types:", op,
+ "no operation '%s' exists that takes a left-hand operand of type '%s' and "
+ "a right operand of type '%s' (or there is no acceptable conversion)",
+ op, left.c_str(), right.c_str());
+}
+
+//
+// A basic type of EbtVoid is a key that the name string was seen in the source, but
+// it was not found as a variable in the symbol table. If so, give the error
+// message and insert a dummy variable in the symbol table to prevent future errors.
+//
+void TParseContext::variableCheck(TIntermTyped*& nodePtr)
+{
+ TIntermSymbol* symbol = nodePtr->getAsSymbolNode();
+ if (! symbol)
+ return;
+
+ if (symbol->getType().getBasicType() == EbtVoid) {
+ const char *extraInfoFormat = "";
+ if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") {
+ extraInfoFormat = "(Did you mean gl_VertexIndex?)";
+ } else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") {
+ extraInfoFormat = "(Did you mean gl_InstanceIndex?)";
+ }
+ error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat);
+
+ // Add to symbol table to prevent future error messages on the same name
+ if (symbol->getName().size() > 0) {
+ TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat));
+ symbolTable.insert(*fakeVariable);
+
+ // substitute a symbol node for this new variable
+ nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc());
+ }
+ } else {
+ switch (symbol->getQualifier().storage) {
+ case EvqPointCoord:
+ profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord");
+ break;
+ default: break; // some compilers want this
+ }
+ }
+}
+
+//
+// Both test and if necessary, spit out an error, to see if the node is really
+// an l-value that can be operated on this way.
+//
+// Returns true if there was an error.
+//
+bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ TIntermBinary* binaryNode = node->getAsBinaryNode();
+
+ if (binaryNode) {
+ bool errorReturn = false;
+
+ switch(binaryNode->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ // ... tessellation control shader ...
+ // If a per-vertex output variable is used as an l-value, it is a
+ // compile-time or link-time error if the expression indicating the
+ // vertex index is not the identifier gl_InvocationID.
+ if (language == EShLangTessControl) {
+ const TType& leftType = binaryNode->getLeft()->getType();
+ if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) {
+ // we have a per-vertex output
+ const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode();
+ if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId)
+ error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", "");
+ }
+ }
+
+ break; // left node is checked by base class
+ case EOpIndexDirectStruct:
+ break; // left node is checked by base class
+ case EOpVectorSwizzle:
+ errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft());
+ if (!errorReturn) {
+ int offset[4] = {0,0,0,0};
+
+ TIntermTyped* rightNode = binaryNode->getRight();
+ TIntermAggregate *aggrNode = rightNode->getAsAggregate();
+
+ for (TIntermSequence::iterator p = aggrNode->getSequence().begin();
+ p != aggrNode->getSequence().end(); p++) {
+ int value = (*p)->getAsTyped()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ offset[value]++;
+ if (offset[value] > 1) {
+ error(loc, " l-value of swizzle cannot have duplicate components", op, "", "");
+
+ return true;
+ }
+ }
+ }
+
+ return errorReturn;
+ default:
+ break;
+ }
+
+ if (errorReturn) {
+ error(loc, " l-value required", op, "", "");
+ return true;
+ }
+ }
+
+ if (binaryNode && binaryNode->getOp() == EOpIndexDirectStruct &&
+ binaryNode->getLeft()->getBasicType() == EbtReference)
+ return false;
+
+ // Let the base class check errors
+ if (TParseContextBase::lValueErrorCheck(loc, op, node))
+ return true;
+
+ const char* symbol = nullptr;
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (symNode != nullptr)
+ symbol = symNode->getName().c_str();
+
+ const char* message = nullptr;
+ switch (node->getQualifier().storage) {
+ case EvqVaryingIn: message = "can't modify shader input"; break;
+ case EvqInstanceId: message = "can't modify gl_InstanceID"; break;
+ case EvqVertexId: message = "can't modify gl_VertexID"; break;
+ case EvqFace: message = "can't modify gl_FrontFace"; break;
+ case EvqFragCoord: message = "can't modify gl_FragCoord"; break;
+ case EvqPointCoord: message = "can't modify gl_PointCoord"; break;
+ case EvqFragDepth:
+ intermediate.setDepthReplacing();
+ // "In addition, it is an error to statically write to gl_FragDepth in the fragment shader."
+ if (profile == EEsProfile && intermediate.getEarlyFragmentTests())
+ message = "can't modify gl_FragDepth if using early_fragment_tests";
+ break;
+
+ default:
+ break;
+ }
+
+ if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
+ error(loc, " l-value required", op, "", "");
+
+ return true;
+ }
+
+ //
+ // Everything else is okay, no error.
+ //
+ if (message == nullptr)
+ return false;
+
+ //
+ // If we get here, we have an error and a message.
+ //
+ if (symNode)
+ error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
+ else
+ error(loc, " l-value required", op, "(%s)", message);
+
+ return true;
+}
+
+// Test for and give an error if the node can't be read from.
+void TParseContext::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
+{
+ // Let the base class check errors
+ TParseContextBase::rValueErrorCheck(loc, op, node);
+
+#ifdef AMD_EXTENSIONS
+ TIntermSymbol* symNode = node->getAsSymbolNode();
+ if (!(symNode && symNode->getQualifier().writeonly)) // base class checks
+ if (symNode && symNode->getQualifier().explicitInterp)
+ error(loc, "can't read from explicitly-interpolated object: ", op, symNode->getName().c_str());
+#endif
+}
+
+//
+// Both test, and if necessary spit out an error, to see if the node is really
+// a constant.
+//
+void TParseContext::constantValueCheck(TIntermTyped* node, const char* token)
+{
+ if (! node->getQualifier().isConstant())
+ error(node->getLoc(), "constant expression required", token, "");
+}
+
+//
+// Both test, and if necessary spit out an error, to see if the node is really
+// an integer.
+//
+void TParseContext::integerCheck(const TIntermTyped* node, const char* token)
+{
+ if ((node->getBasicType() == EbtInt || node->getBasicType() == EbtUint) && node->isScalar())
+ return;
+
+ error(node->getLoc(), "scalar integer expression required", token, "");
+}
+
+//
+// Both test, and if necessary spit out an error, to see if we are currently
+// globally scoped.
+//
+void TParseContext::globalCheck(const TSourceLoc& loc, const char* token)
+{
+ if (! symbolTable.atGlobalLevel())
+ error(loc, "not allowed in nested scope", token, "");
+}
+
+//
+// Reserved errors for GLSL.
+//
+void TParseContext::reservedErrorCheck(const TSourceLoc& loc, const TString& identifier)
+{
+ // "Identifiers starting with "gl_" are reserved for use by OpenGL, and may not be
+ // declared in a shader; this results in a compile-time error."
+ if (! symbolTable.atBuiltInLevel()) {
+ if (builtInName(identifier))
+ error(loc, "identifiers starting with \"gl_\" are reserved", identifier.c_str(), "");
+
+ // "__" are not supposed to be an error. ES 310 (and desktop) added the clarification:
+ // "In addition, all identifiers containing two consecutive underscores (__) are
+ // reserved; using such a name does not itself result in an error, but may result
+ // in undefined behavior."
+ // however, before that, ES tests required an error.
+ if (identifier.find("__") != TString::npos) {
+ if (profile == EEsProfile && version <= 300)
+ error(loc, "identifiers containing consecutive underscores (\"__\") are reserved, and an error if version <= 300", identifier.c_str(), "");
+ else
+ warn(loc, "identifiers containing consecutive underscores (\"__\") are reserved", identifier.c_str(), "");
+ }
+ }
+}
+
+//
+// Reserved errors for the preprocessor.
+//
+void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* identifier, const char* op)
+{
+ // "__" are not supposed to be an error. ES 310 (and desktop) added the clarification:
+ // "All macro names containing two consecutive underscores ( __ ) are reserved;
+ // defining such a name does not itself result in an error, but may result in
+ // undefined behavior. All macro names prefixed with "GL_" ("GL" followed by a
+ // single underscore) are also reserved, and defining such a name results in a
+ // compile-time error."
+ // however, before that, ES tests required an error.
+ if (strncmp(identifier, "GL_", 3) == 0)
+ ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier);
+ else if (strncmp(identifier, "defined", 8) == 0)
+ ppError(loc, "\"defined\" can't be (un)defined:", op, identifier);
+ else if (strstr(identifier, "__") != 0) {
+ if (profile == EEsProfile && version >= 300 &&
+ (strcmp(identifier, "__LINE__") == 0 ||
+ strcmp(identifier, "__FILE__") == 0 ||
+ strcmp(identifier, "__VERSION__") == 0))
+ ppError(loc, "predefined names can't be (un)defined:", op, identifier);
+ else {
+ if (profile == EEsProfile && version <= 300)
+ ppError(loc, "names containing consecutive underscores are reserved, and an error if version <= 300:", op, identifier);
+ else
+ ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier);
+ }
+ }
+}
+
+//
+// See if this version/profile allows use of the line-continuation character '\'.
+//
+// Returns true if a line continuation should be done.
+//
+bool TParseContext::lineContinuationCheck(const TSourceLoc& loc, bool endOfComment)
+{
+ const char* message = "line continuation";
+
+ bool lineContinuationAllowed = (profile == EEsProfile && version >= 300) ||
+ (profile != EEsProfile && (version >= 420 || extensionTurnedOn(E_GL_ARB_shading_language_420pack)));
+
+ if (endOfComment) {
+ if (lineContinuationAllowed)
+ warn(loc, "used at end of comment; the following line is still part of the comment", message, "");
+ else
+ warn(loc, "used at end of comment, but this version does not provide line continuation", message, "");
+
+ return lineContinuationAllowed;
+ }
+
+ if (relaxedErrors()) {
+ if (! lineContinuationAllowed)
+ warn(loc, "not allowed in this version", message, "");
+ return true;
+ } else {
+ profileRequires(loc, EEsProfile, 300, nullptr, message);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, message);
+ }
+
+ return lineContinuationAllowed;
+}
+
+bool TParseContext::builtInName(const TString& identifier)
+{
+ return identifier.compare(0, 3, "gl_") == 0;
+}
+
+//
+// Make sure there is enough data and not too many arguments provided to the
+// constructor to build something of the type of the constructor. Also returns
+// the type of the constructor.
+//
+// Part of establishing type is establishing specialization-constness.
+// We don't yet know "top down" whether type is a specialization constant,
+// but a const constructor can becomes a specialization constant if any of
+// its children are, subject to KHR_vulkan_glsl rules:
+//
+// - int(), uint(), and bool() constructors for type conversions
+// from any of the following types to any of the following types:
+// * int
+// * uint
+// * bool
+// - vector versions of the above conversion constructors
+//
+// Returns true if there was an error in construction.
+//
+bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function, TOperator op, TType& type)
+{
+ type.shallowCopy(function.getType());
+
+ bool constructingMatrix = false;
+ switch(op) {
+ case EOpConstructTextureSampler:
+ return constructorTextureSamplerError(loc, function);
+ case EOpConstructMat2x2:
+ case EOpConstructMat2x3:
+ case EOpConstructMat2x4:
+ case EOpConstructMat3x2:
+ case EOpConstructMat3x3:
+ case EOpConstructMat3x4:
+ case EOpConstructMat4x2:
+ case EOpConstructMat4x3:
+ case EOpConstructMat4x4:
+ case EOpConstructDMat2x2:
+ case EOpConstructDMat2x3:
+ case EOpConstructDMat2x4:
+ case EOpConstructDMat3x2:
+ case EOpConstructDMat3x3:
+ case EOpConstructDMat3x4:
+ case EOpConstructDMat4x2:
+ case EOpConstructDMat4x3:
+ case EOpConstructDMat4x4:
+ case EOpConstructF16Mat2x2:
+ case EOpConstructF16Mat2x3:
+ case EOpConstructF16Mat2x4:
+ case EOpConstructF16Mat3x2:
+ case EOpConstructF16Mat3x3:
+ case EOpConstructF16Mat3x4:
+ case EOpConstructF16Mat4x2:
+ case EOpConstructF16Mat4x3:
+ case EOpConstructF16Mat4x4:
+ constructingMatrix = true;
+ break;
+ default:
+ break;
+ }
+
+ //
+ // Walk the arguments for first-pass checks and collection of information.
+ //
+
+ int size = 0;
+ bool constType = true;
+ bool specConstType = false; // value is only valid if constType is true
+ bool full = false;
+ bool overFull = false;
+ bool matrixInMatrix = false;
+ bool arrayArg = false;
+ bool floatArgument = false;
+ for (int arg = 0; arg < function.getParamCount(); ++arg) {
+ if (function[arg].type->isArray()) {
+ if (function[arg].type->isUnsizedArray()) {
+ // Can't construct from an unsized array.
+ error(loc, "array argument must be sized", "constructor", "");
+ return true;
+ }
+ arrayArg = true;
+ }
+ if (constructingMatrix && function[arg].type->isMatrix())
+ matrixInMatrix = true;
+
+ // 'full' will go to true when enough args have been seen. If we loop
+ // again, there is an extra argument.
+ if (full) {
+ // For vectors and matrices, it's okay to have too many components
+ // available, but not okay to have unused arguments.
+ overFull = true;
+ }
+
+ size += function[arg].type->computeNumComponents();
+ if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents())
+ full = true;
+
+ if (! function[arg].type->getQualifier().isConstant())
+ constType = false;
+ if (function[arg].type->getQualifier().isSpecConstant())
+ specConstType = true;
+ if (function[arg].type->isFloatingDomain())
+ floatArgument = true;
+ if (type.isStruct()) {
+ if (function[arg].type->containsBasicType(EbtFloat16)) {
+ requireFloat16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type");
+ }
+ if (function[arg].type->containsBasicType(EbtUint16) ||
+ function[arg].type->containsBasicType(EbtInt16)) {
+ requireInt16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type");
+ }
+ if (function[arg].type->containsBasicType(EbtUint8) ||
+ function[arg].type->containsBasicType(EbtInt8)) {
+ requireInt8Arithmetic(loc, "constructor", "can't construct structure containing 8-bit type");
+ }
+ }
+ }
+
+ switch (op) {
+ case EOpConstructFloat16:
+ case EOpConstructF16Vec2:
+ case EOpConstructF16Vec3:
+ case EOpConstructF16Vec4:
+ if (type.isArray())
+ requireFloat16Arithmetic(loc, "constructor", "16-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireFloat16Arithmetic(loc, "constructor", "16-bit vectors only take vector types");
+ break;
+ case EOpConstructUint16:
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructInt16:
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ if (type.isArray())
+ requireInt16Arithmetic(loc, "constructor", "16-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireInt16Arithmetic(loc, "constructor", "16-bit vectors only take vector types");
+ break;
+ case EOpConstructUint8:
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructInt8:
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ if (type.isArray())
+ requireInt8Arithmetic(loc, "constructor", "8-bit arrays not supported");
+ if (type.isVector() && function.getParamCount() != 1)
+ requireInt8Arithmetic(loc, "constructor", "8-bit vectors only take vector types");
+ break;
+ default:
+ break;
+ }
+
+ // inherit constness from children
+ if (constType) {
+ bool makeSpecConst;
+ // Finish pinning down spec-const semantics
+ if (specConstType) {
+ switch (op) {
+ case EOpConstructInt8:
+ case EOpConstructUint8:
+ case EOpConstructInt16:
+ case EOpConstructUint16:
+ case EOpConstructInt:
+ case EOpConstructUint:
+ case EOpConstructInt64:
+ case EOpConstructUint64:
+ case EOpConstructBool:
+ case EOpConstructBVec2:
+ case EOpConstructBVec3:
+ case EOpConstructBVec4:
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructIVec2:
+ case EOpConstructIVec3:
+ case EOpConstructIVec4:
+ case EOpConstructUVec2:
+ case EOpConstructUVec3:
+ case EOpConstructUVec4:
+ case EOpConstructI64Vec2:
+ case EOpConstructI64Vec3:
+ case EOpConstructI64Vec4:
+ case EOpConstructU64Vec2:
+ case EOpConstructU64Vec3:
+ case EOpConstructU64Vec4:
+ // This was the list of valid ones, if they aren't converting from float
+ // and aren't making an array.
+ makeSpecConst = ! floatArgument && ! type.isArray();
+ break;
+ default:
+ // anything else wasn't white-listed in the spec as a conversion
+ makeSpecConst = false;
+ break;
+ }
+ } else
+ makeSpecConst = false;
+
+ if (makeSpecConst)
+ type.getQualifier().makeSpecConstant();
+ else if (specConstType)
+ type.getQualifier().makeTemporary();
+ else
+ type.getQualifier().storage = EvqConst;
+ }
+
+ if (type.isArray()) {
+ if (function.getParamCount() == 0) {
+ error(loc, "array constructor must have at least one argument", "constructor", "");
+ return true;
+ }
+
+ if (type.isUnsizedArray()) {
+ // auto adapt the constructor type to the number of arguments
+ type.changeOuterArraySize(function.getParamCount());
+ } else if (type.getOuterArraySize() != function.getParamCount()) {
+ error(loc, "array constructor needs one argument per array element", "constructor", "");
+ return true;
+ }
+
+ if (type.isArrayOfArrays()) {
+ // Types have to match, but we're still making the type.
+ // Finish making the type, and the comparison is done later
+ // when checking for conversion.
+ TArraySizes& arraySizes = *type.getArraySizes();
+
+ // At least the dimensionalities have to match.
+ if (! function[0].type->isArray() ||
+ arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) {
+ error(loc, "array constructor argument not correct type to construct array element", "constructor", "");
+ return true;
+ }
+
+ if (arraySizes.isInnerUnsized()) {
+ // "Arrays of arrays ..., and the size for any dimension is optional"
+ // That means we need to adopt (from the first argument) the other array sizes into the type.
+ for (int d = 1; d < arraySizes.getNumDims(); ++d) {
+ if (arraySizes.getDimSize(d) == UnsizedArraySize) {
+ arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1));
+ }
+ }
+ }
+ }
+ }
+
+ if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) {
+ error(loc, "constructing non-array constituent from array argument", "constructor", "");
+ return true;
+ }
+
+ if (matrixInMatrix && ! type.isArray()) {
+ profileRequires(loc, ENoProfile, 120, nullptr, "constructing matrix from matrix");
+
+ // "If a matrix argument is given to a matrix constructor,
+ // it is a compile-time error to have any other arguments."
+ if (function.getParamCount() != 1)
+ error(loc, "matrix constructed from matrix can only have one argument", "constructor", "");
+ return false;
+ }
+
+ if (overFull) {
+ error(loc, "too many arguments", "constructor", "");
+ return true;
+ }
+
+ if (op == EOpConstructStruct && ! type.isArray() && (int)type.getStruct()->size() != function.getParamCount()) {
+ error(loc, "Number of constructor parameters does not match the number of structure fields", "constructor", "");
+ return true;
+ }
+
+ if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) ||
+ (op == EOpConstructStruct && size < type.computeNumComponents())) {
+ error(loc, "not enough data provided for construction", "constructor", "");
+ return true;
+ }
+
+ if (type.isCoopMat() && function.getParamCount() != 1) {
+ error(loc, "wrong number of arguments", "constructor", "");
+ return true;
+ }
+ if (type.isCoopMat() &&
+ !(function[0].type->isScalar() || function[0].type->isCoopMat())) {
+ error(loc, "Cooperative matrix constructor argument must be scalar or cooperative matrix", "constructor", "");
+ return true;
+ }
+
+ TIntermTyped* typed = node->getAsTyped();
+ if (typed == nullptr) {
+ error(loc, "constructor argument does not have a type", "constructor", "");
+ return true;
+ }
+ if (op != EOpConstructStruct && typed->getBasicType() == EbtSampler) {
+ error(loc, "cannot convert a sampler", "constructor", "");
+ return true;
+ }
+ if (op != EOpConstructStruct && typed->getBasicType() == EbtAtomicUint) {
+ error(loc, "cannot convert an atomic_uint", "constructor", "");
+ return true;
+ }
+ if (typed->getBasicType() == EbtVoid) {
+ error(loc, "cannot convert a void", "constructor", "");
+ return true;
+ }
+
+ return false;
+}
+
+// Verify all the correct semantics for constructing a combined texture/sampler.
+// Return true if the semantics are incorrect.
+bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const TFunction& function)
+{
+ TString constructorName = function.getType().getBasicTypeString(); // TODO: performance: should not be making copy; interface needs to change
+ const char* token = constructorName.c_str();
+
+ // exactly two arguments needed
+ if (function.getParamCount() != 2) {
+ error(loc, "sampler-constructor requires two arguments", token, "");
+ return true;
+ }
+
+ // For now, not allowing arrayed constructors, the rest of this function
+ // is set up to allow them, if this test is removed:
+ if (function.getType().isArray()) {
+ error(loc, "sampler-constructor cannot make an array of samplers", token, "");
+ return true;
+ }
+
+ // first argument
+ // * the constructor's first argument must be a texture type
+ // * the dimensionality (1D, 2D, 3D, Cube, Rect, Buffer, MS, and Array)
+ // of the texture type must match that of the constructed sampler type
+ // (that is, the suffixes of the type of the first argument and the
+ // type of the constructor will be spelled the same way)
+ if (function[0].type->getBasicType() != EbtSampler ||
+ ! function[0].type->getSampler().isTexture() ||
+ function[0].type->isArray()) {
+ error(loc, "sampler-constructor first argument must be a scalar textureXXX type", token, "");
+ return true;
+ }
+ // simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=()
+ TSampler texture = function.getType().getSampler();
+ texture.combined = false;
+ texture.shadow = false;
+ if (texture != function[0].type->getSampler()) {
+ error(loc, "sampler-constructor first argument must match type and dimensionality of constructor type", token, "");
+ return true;
+ }
+
+ // second argument
+ // * the constructor's second argument must be a scalar of type
+ // *sampler* or *samplerShadow*
+ if ( function[1].type->getBasicType() != EbtSampler ||
+ ! function[1].type->getSampler().isPureSampler() ||
+ function[1].type->isArray()) {
+ error(loc, "sampler-constructor second argument must be a scalar type 'sampler'", token, "");
+ return true;
+ }
+
+ return false;
+}
+
+// Checks to see if a void variable has been declared and raise an error message for such a case
+//
+// returns true in case of an error
+//
+bool TParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType)
+{
+ if (basicType == EbtVoid) {
+ error(loc, "illegal use of type 'void'", identifier.c_str(), "");
+ return true;
+ }
+
+ return false;
+}
+
+// Checks to see if the node (for the expression) contains a scalar boolean expression or not
+void TParseContext::boolCheck(const TSourceLoc& loc, const TIntermTyped* type)
+{
+ if (type->getBasicType() != EbtBool || type->isArray() || type->isMatrix() || type->isVector())
+ error(loc, "boolean expression expected", "", "");
+}
+
+// This function checks to see if the node (for the expression) contains a scalar boolean expression or not
+void TParseContext::boolCheck(const TSourceLoc& loc, const TPublicType& pType)
+{
+ if (pType.basicType != EbtBool || pType.arraySizes || pType.matrixCols > 1 || (pType.vectorSize > 1))
+ error(loc, "boolean expression expected", "", "");
+}
+
+void TParseContext::samplerCheck(const TSourceLoc& loc, const TType& type, const TString& identifier, TIntermTyped* /*initializer*/)
+{
+ // Check that the appropriate extension is enabled if external sampler is used.
+ // There are two extensions. The correct one must be used based on GLSL version.
+ if (type.getBasicType() == EbtSampler && type.getSampler().external) {
+ if (version < 300) {
+ requireExtensions(loc, 1, &E_GL_OES_EGL_image_external, "samplerExternalOES");
+ } else {
+ requireExtensions(loc, 1, &E_GL_OES_EGL_image_external_essl3, "samplerExternalOES");
+ }
+ }
+ if (type.getSampler().yuv) {
+ requireExtensions(loc, 1, &E_GL_EXT_YUV_target, "__samplerExternal2DY2YEXT");
+ }
+
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtSampler))
+ error(loc, "non-uniform struct contains a sampler or image:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtSampler && type.getQualifier().storage != EvqUniform) {
+ // non-uniform sampler
+ // not yet: okay if it has an initializer
+ // if (! initializer)
+ error(loc, "sampler/image types can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
+ }
+}
+
+void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAtomicUint))
+ error(loc, "non-uniform struct contains an atomic_uint:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtAtomicUint && type.getQualifier().storage != EvqUniform)
+ error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str());
+}
+#ifdef NV_EXTENSIONS
+void TParseContext::accStructNVCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (type.getQualifier().storage == EvqUniform)
+ return;
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStructNV))
+ error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str());
+ else if (type.getBasicType() == EbtAccStructNV && type.getQualifier().storage != EvqUniform)
+ error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:",
+ type.getBasicTypeString().c_str(), identifier.c_str());
+
+}
+#endif
+
+void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& type, const TString& identifier)
+{
+ if (parsingBuiltins)
+ return;
+
+ if (type.getQualifier().storage != EvqUniform)
+ return;
+
+ if (type.containsNonOpaque()) {
+ // Vulkan doesn't allow transparent uniforms outside of blocks
+ if (spvVersion.vulkan > 0)
+ vulkanRemoved(loc, "non-opaque uniforms outside a block");
+ // OpenGL wants locations on these (unless they are getting automapped)
+ if (spvVersion.openGl > 0 && !type.getQualifier().hasLocation() && !intermediate.getAutoMapLocations())
+ error(loc, "non-opaque uniform variables need a layout(location=L)", identifier.c_str(), "");
+ }
+}
+
+//
+// Qualifier checks knowing the qualifier and that it is a member of a struct/block.
+//
+void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType)
+{
+ globalQualifierFixCheck(publicType.loc, publicType.qualifier);
+ checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers);
+ if (publicType.qualifier.isNonUniform()) {
+ error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", "");
+ publicType.qualifier.nonUniform = false;
+ }
+}
+
+//
+// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level.
+//
+void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier)
+{
+ bool nonuniformOkay = false;
+
+ // move from parameter/unknown qualifiers to pipeline in/out qualifiers
+ switch (qualifier.storage) {
+ case EvqIn:
+ profileRequires(loc, ENoProfile, 130, nullptr, "in for stage inputs");
+ profileRequires(loc, EEsProfile, 300, nullptr, "in for stage inputs");
+ qualifier.storage = EvqVaryingIn;
+ nonuniformOkay = true;
+ break;
+ case EvqOut:
+ profileRequires(loc, ENoProfile, 130, nullptr, "out for stage outputs");
+ profileRequires(loc, EEsProfile, 300, nullptr, "out for stage outputs");
+ qualifier.storage = EvqVaryingOut;
+ break;
+ case EvqInOut:
+ qualifier.storage = EvqVaryingIn;
+ error(loc, "cannot use 'inout' at global scope", "", "");
+ break;
+ case EvqGlobal:
+ case EvqTemporary:
+ nonuniformOkay = true;
+ break;
+ default:
+ break;
+ }
+
+ if (!nonuniformOkay && qualifier.nonUniform)
+ error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", "");
+
+ invariantCheck(loc, qualifier);
+}
+
+//
+// Check a full qualifier and type (no variable yet) at global level.
+//
+void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQualifier& qualifier, const TPublicType& publicType)
+{
+ if (! symbolTable.atGlobalLevel())
+ return;
+
+ if (!(publicType.userDef && publicType.userDef->getBasicType() == EbtReference)) {
+ if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) {
+ error(loc, "memory qualifiers cannot be used on this type", "", "");
+ } else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) {
+ error(loc, "memory qualifiers cannot be used on this type", "", "");
+ }
+ }
+
+ if (qualifier.storage == EvqBuffer &&
+ publicType.basicType != EbtBlock &&
+ !qualifier.layoutBufferReference)
+ error(loc, "buffers can be declared only as blocks", "buffer", "");
+
+ if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut)
+ return;
+
+ if (publicType.shaderQualifiers.blendEquation)
+ error(loc, "can only be applied to a standalone 'out'", "blend equation", "");
+
+ // now, knowing it is a shader in/out, do all the in/out semantic checks
+
+ if (publicType.basicType == EbtBool && !parsingBuiltins) {
+ error(loc, "cannot be bool", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+
+ if (isTypeInt(publicType.basicType) || publicType.basicType == EbtDouble)
+ profileRequires(loc, EEsProfile, 300, nullptr, "shader input/output");
+
+ if (!qualifier.flat
+#ifdef AMD_EXTENSIONS
+ && !qualifier.explicitInterp
+#endif
+#ifdef NV_EXTENSIONS
+ && !qualifier.pervertexNV
+#endif
+ ) {
+ if (isTypeInt(publicType.basicType) ||
+ publicType.basicType == EbtDouble ||
+ (publicType.userDef && (publicType.userDef->containsBasicType(EbtInt8) ||
+ publicType.userDef->containsBasicType(EbtUint8) ||
+ publicType.userDef->containsBasicType(EbtInt16) ||
+ publicType.userDef->containsBasicType(EbtUint16) ||
+ publicType.userDef->containsBasicType(EbtInt) ||
+ publicType.userDef->containsBasicType(EbtUint) ||
+ publicType.userDef->containsBasicType(EbtInt64) ||
+ publicType.userDef->containsBasicType(EbtUint64) ||
+ publicType.userDef->containsBasicType(EbtDouble)))) {
+ if (qualifier.storage == EvqVaryingIn && language == EShLangFragment)
+ error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
+ else if (qualifier.storage == EvqVaryingOut && language == EShLangVertex && version == 300)
+ error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage));
+ }
+ }
+
+ if (qualifier.patch && qualifier.isInterpolation())
+ error(loc, "cannot use interpolation qualifiers with patch", "patch", "");
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.perTaskNV && publicType.basicType != EbtBlock)
+ error(loc, "taskNV variables can be declared only as blocks", "taskNV", "");
+#endif
+
+ if (qualifier.storage == EvqVaryingIn) {
+ switch (language) {
+ case EShLangVertex:
+ if (publicType.basicType == EbtStruct) {
+ error(loc, "cannot be a structure or array", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (publicType.arraySizes) {
+ requireProfile(loc, ~EEsProfile, "vertex input arrays");
+ profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
+ }
+ if (publicType.basicType == EbtDouble)
+ profileRequires(loc, ~EEsProfile, 410, nullptr, "vertex-shader `double` type input");
+ if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant)
+ error(loc, "vertex input cannot be further qualified", "", "");
+ break;
+
+ case EShLangTessControl:
+ if (qualifier.patch)
+ error(loc, "can only use on output in tessellation-control shader", "patch", "");
+ break;
+
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ break;
+
+ case EShLangFragment:
+ if (publicType.userDef) {
+ profileRequires(loc, EEsProfile, 300, nullptr, "fragment-shader struct input");
+ profileRequires(loc, ~EEsProfile, 150, nullptr, "fragment-shader struct input");
+ if (publicType.userDef->containsStructure())
+ requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing structure");
+ if (publicType.userDef->containsArray())
+ requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing an array");
+ }
+ break;
+
+ case EShLangCompute:
+ if (! symbolTable.atBuiltInLevel())
+ error(loc, "global storage input qualifier cannot be used in a compute shader", "in", "");
+ break;
+
+ default:
+ break;
+ }
+ } else {
+ // qualifier.storage == EvqVaryingOut
+ switch (language) {
+ case EShLangVertex:
+ if (publicType.userDef) {
+ profileRequires(loc, EEsProfile, 300, nullptr, "vertex-shader struct output");
+ profileRequires(loc, ~EEsProfile, 150, nullptr, "vertex-shader struct output");
+ if (publicType.userDef->containsStructure())
+ requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing structure");
+ if (publicType.userDef->containsArray())
+ requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing an array");
+ }
+
+ break;
+
+ case EShLangTessControl:
+ break;
+
+ case EShLangTessEvaluation:
+ if (qualifier.patch)
+ error(loc, "can only use on input in tessellation-evaluation shader", "patch", "");
+ break;
+
+ case EShLangGeometry:
+ break;
+
+ case EShLangFragment:
+ profileRequires(loc, EEsProfile, 300, nullptr, "fragment shader output");
+ if (publicType.basicType == EbtStruct) {
+ error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (publicType.matrixRows > 0) {
+ error(loc, "cannot be a matrix", GetStorageQualifierString(qualifier.storage), "");
+ return;
+ }
+ if (qualifier.isAuxiliary())
+ error(loc, "can't use auxiliary qualifier on a fragment output", "centroid/sample/patch", "");
+ if (qualifier.isInterpolation())
+ error(loc, "can't use interpolation qualifier on a fragment output", "flat/smooth/noperspective", "");
+ if (publicType.basicType == EbtDouble || publicType.basicType == EbtInt64 || publicType.basicType == EbtUint64)
+ error(loc, "cannot contain a double, int64, or uint64", GetStorageQualifierString(qualifier.storage), "");
+ break;
+
+ case EShLangCompute:
+ error(loc, "global storage output qualifier cannot be used in a compute shader", "out", "");
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+//
+// Merge characteristics of the 'src' qualifier into the 'dst'.
+// If there is duplication, issue error messages, unless 'force'
+// is specified, which means to just override default settings.
+//
+// Also, when force is false, it will be assumed that 'src' follows
+// 'dst', for the purpose of error checking order for versions
+// that require specific orderings of qualifiers.
+//
+void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, const TQualifier& src, bool force)
+{
+ // Multiple auxiliary qualifiers (mostly done later by 'individual qualifiers')
+ if (src.isAuxiliary() && dst.isAuxiliary())
+ error(loc, "can only have one auxiliary qualifier (centroid, patch, and sample)", "", "");
+
+ // Multiple interpolation qualifiers (mostly done later by 'individual qualifiers')
+ if (src.isInterpolation() && dst.isInterpolation())
+#ifdef AMD_EXTENSIONS
+ error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective, __explicitInterpAMD)", "", "");
+#else
+ error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective)", "", "");
+#endif
+
+ // Ordering
+ if (! force && ((profile != EEsProfile && version < 420) ||
+ (profile == EEsProfile && version < 310))
+ && ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) {
+ // non-function parameters
+ if (src.noContraction && (dst.invariant || dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "precise qualifier must appear first", "", "");
+ if (src.invariant && (dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "invariant qualifier must appear before interpolation, storage, and precision qualifiers ", "", "");
+ else if (src.isInterpolation() && (dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "interpolation qualifiers must appear before storage and precision qualifiers", "", "");
+ else if (src.isAuxiliary() && (dst.storage != EvqTemporary || dst.precision != EpqNone))
+ error(loc, "Auxiliary qualifiers (centroid, patch, and sample) must appear before storage and precision qualifiers", "", "");
+ else if (src.storage != EvqTemporary && (dst.precision != EpqNone))
+ error(loc, "precision qualifier must appear as last qualifier", "", "");
+
+ // function parameters
+ if (src.noContraction && (dst.storage == EvqConst || dst.storage == EvqIn || dst.storage == EvqOut))
+ error(loc, "precise qualifier must appear first", "", "");
+ if (src.storage == EvqConst && (dst.storage == EvqIn || dst.storage == EvqOut))
+ error(loc, "in/out must appear before const", "", "");
+ }
+
+ // Storage qualification
+ if (dst.storage == EvqTemporary || dst.storage == EvqGlobal)
+ dst.storage = src.storage;
+ else if ((dst.storage == EvqIn && src.storage == EvqOut) ||
+ (dst.storage == EvqOut && src.storage == EvqIn))
+ dst.storage = EvqInOut;
+ else if ((dst.storage == EvqIn && src.storage == EvqConst) ||
+ (dst.storage == EvqConst && src.storage == EvqIn))
+ dst.storage = EvqConstReadOnly;
+ else if (src.storage != EvqTemporary &&
+ src.storage != EvqGlobal)
+ error(loc, "too many storage qualifiers", GetStorageQualifierString(src.storage), "");
+
+ // Precision qualifiers
+ if (! force && src.precision != EpqNone && dst.precision != EpqNone)
+ error(loc, "only one precision qualifier allowed", GetPrecisionQualifierString(src.precision), "");
+ if (dst.precision == EpqNone || (force && src.precision != EpqNone))
+ dst.precision = src.precision;
+
+ if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent)) ||
+ (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent)) ||
+ (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent)))) {
+ error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent qualifier allowed", GetPrecisionQualifierString(src.precision), "");
+ }
+ // Layout qualifiers
+ mergeObjectLayoutQualifiers(dst, src, false);
+
+ // individual qualifiers
+ bool repeated = false;
+ #define MERGE_SINGLETON(field) repeated |= dst.field && src.field; dst.field |= src.field;
+ MERGE_SINGLETON(invariant);
+ MERGE_SINGLETON(noContraction);
+ MERGE_SINGLETON(centroid);
+ MERGE_SINGLETON(smooth);
+ MERGE_SINGLETON(flat);
+ MERGE_SINGLETON(nopersp);
+#ifdef AMD_EXTENSIONS
+ MERGE_SINGLETON(explicitInterp);
+#endif
+#ifdef NV_EXTENSIONS
+ MERGE_SINGLETON(perPrimitiveNV);
+ MERGE_SINGLETON(perViewNV);
+ MERGE_SINGLETON(perTaskNV);
+#endif
+ MERGE_SINGLETON(patch);
+ MERGE_SINGLETON(sample);
+ MERGE_SINGLETON(coherent);
+ MERGE_SINGLETON(devicecoherent);
+ MERGE_SINGLETON(queuefamilycoherent);
+ MERGE_SINGLETON(workgroupcoherent);
+ MERGE_SINGLETON(subgroupcoherent);
+ MERGE_SINGLETON(nonprivate);
+ MERGE_SINGLETON(volatil);
+ MERGE_SINGLETON(restrict);
+ MERGE_SINGLETON(readonly);
+ MERGE_SINGLETON(writeonly);
+ MERGE_SINGLETON(specConstant);
+ MERGE_SINGLETON(nonUniform);
+
+ if (repeated)
+ error(loc, "replicated qualifiers", "", "");
+}
+
+void TParseContext::setDefaultPrecision(const TSourceLoc& loc, TPublicType& publicType, TPrecisionQualifier qualifier)
+{
+ TBasicType basicType = publicType.basicType;
+
+ if (basicType == EbtSampler) {
+ defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)] = qualifier;
+
+ return; // all is well
+ }
+
+ if (basicType == EbtInt || basicType == EbtFloat) {
+ if (publicType.isScalar()) {
+ defaultPrecision[basicType] = qualifier;
+ if (basicType == EbtInt) {
+ defaultPrecision[EbtUint] = qualifier;
+ precisionManager.explicitIntDefaultSeen();
+ } else
+ precisionManager.explicitFloatDefaultSeen();
+
+ return; // all is well
+ }
+ }
+
+ if (basicType == EbtAtomicUint) {
+ if (qualifier != EpqHigh)
+ error(loc, "can only apply highp to atomic_uint", "precision", "");
+
+ return;
+ }
+
+ error(loc, "cannot apply precision statement to this type; use 'float', 'int' or a sampler type", TType::getBasicString(basicType), "");
+}
+
+// used to flatten the sampler type space into a single dimension
+// correlates with the declaration of defaultSamplerPrecision[]
+int TParseContext::computeSamplerTypeIndex(TSampler& sampler)
+{
+ int arrayIndex = sampler.arrayed ? 1 : 0;
+ int shadowIndex = sampler.shadow ? 1 : 0;
+ int externalIndex = sampler.external? 1 : 0;
+ int imageIndex = sampler.image ? 1 : 0;
+ int msIndex = sampler.ms ? 1 : 0;
+
+ int flattened = EsdNumDims * (EbtNumTypes * (2 * (2 * (2 * (2 * arrayIndex + msIndex) + imageIndex) + shadowIndex) +
+ externalIndex) + sampler.type) + sampler.dim;
+ assert(flattened < maxSamplerIndex);
+
+ return flattened;
+}
+
+TPrecisionQualifier TParseContext::getDefaultPrecision(TPublicType& publicType)
+{
+ if (publicType.basicType == EbtSampler)
+ return defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)];
+ else
+ return defaultPrecision[publicType.basicType];
+}
+
+void TParseContext::precisionQualifierCheck(const TSourceLoc& loc, TBasicType baseType, TQualifier& qualifier)
+{
+ // Built-in symbols are allowed some ambiguous precisions, to be pinned down
+ // later by context.
+ if (! obeyPrecisionQualifiers() || parsingBuiltins)
+ return;
+
+ if (baseType == EbtAtomicUint && qualifier.precision != EpqNone && qualifier.precision != EpqHigh)
+ error(loc, "atomic counters can only be highp", "atomic_uint", "");
+
+ if (baseType == EbtFloat || baseType == EbtUint || baseType == EbtInt || baseType == EbtSampler || baseType == EbtAtomicUint) {
+ if (qualifier.precision == EpqNone) {
+ if (relaxedErrors())
+ warn(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "substituting 'mediump'");
+ else
+ error(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "");
+ qualifier.precision = EpqMedium;
+ defaultPrecision[baseType] = EpqMedium;
+ }
+ } else if (qualifier.precision != EpqNone)
+ error(loc, "type cannot have precision qualifier", TType::getBasicString(baseType), "");
+}
+
+void TParseContext::parameterTypeCheck(const TSourceLoc& loc, TStorageQualifier qualifier, const TType& type)
+{
+ if ((qualifier == EvqOut || qualifier == EvqInOut) && type.isOpaque())
+ error(loc, "samplers and atomic_uints cannot be output parameters", type.getBasicTypeString().c_str(), "");
+
+ if (!parsingBuiltins && type.containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, type.getBasicTypeString().c_str(), "float16 types can only be in uniform block or buffer storage");
+ if (!parsingBuiltins && type.contains16BitInt())
+ requireInt16Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int16 types can only be in uniform block or buffer storage");
+ if (!parsingBuiltins && type.contains8BitInt())
+ requireInt8Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int8 types can only be in uniform block or buffer storage");
+}
+
+bool TParseContext::containsFieldWithBasicType(const TType& type, TBasicType basicType)
+{
+ if (type.getBasicType() == basicType)
+ return true;
+
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& structure = *type.getStruct();
+ for (unsigned int i = 0; i < structure.size(); ++i) {
+ if (containsFieldWithBasicType(*structure[i].type, basicType))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+//
+// Do size checking for an array type's size.
+//
+void TParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair, const char *sizeType)
+{
+ bool isConst = false;
+ sizePair.node = nullptr;
+
+ int size = 1;
+
+ TIntermConstantUnion* constant = expr->getAsConstantUnion();
+ if (constant) {
+ // handle true (non-specialization) constant
+ size = constant->getConstArray()[0].getIConst();
+ isConst = true;
+ } else {
+ // see if it's a specialization constant instead
+ if (expr->getQualifier().isSpecConstant()) {
+ isConst = true;
+ sizePair.node = expr;
+ TIntermSymbol* symbol = expr->getAsSymbolNode();
+ if (symbol && symbol->getConstArray().size() > 0)
+ size = symbol->getConstArray()[0].getIConst();
+ } else if (expr->getAsUnaryNode() &&
+ expr->getAsUnaryNode()->getOp() == glslang::EOpArrayLength &&
+ expr->getAsUnaryNode()->getOperand()->getType().isCoopMat()) {
+ isConst = true;
+ size = 1;
+ sizePair.node = expr->getAsUnaryNode();
+ }
+ }
+
+ sizePair.size = size;
+
+ if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) {
+ error(loc, sizeType, "", "must be a constant integer expression");
+ return;
+ }
+
+ if (size <= 0) {
+ error(loc, sizeType, "", "must be a positive integer");
+ return;
+ }
+}
+
+//
+// See if this qualifier can be an array.
+//
+// Returns true if there is an error.
+//
+bool TParseContext::arrayQualifierError(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (qualifier.storage == EvqConst) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "const array");
+ profileRequires(loc, EEsProfile, 300, nullptr, "const array");
+ }
+
+ if (qualifier.storage == EvqVaryingIn && language == EShLangVertex) {
+ requireProfile(loc, ~EEsProfile, "vertex input arrays");
+ profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays");
+ }
+
+ return false;
+}
+
+//
+// See if this qualifier and type combination can be an array.
+// Assumes arrayQualifierError() was also called to catch the type-invariant tests.
+//
+// Returns true if there is an error.
+//
+bool TParseContext::arrayError(const TSourceLoc& loc, const TType& type)
+{
+ if (type.getQualifier().storage == EvqVaryingOut && language == EShLangVertex) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "vertex-shader array-of-array output");
+ else if (type.isStruct())
+ requireProfile(loc, ~EEsProfile, "vertex-shader array-of-struct output");
+ }
+ if (type.getQualifier().storage == EvqVaryingIn && language == EShLangFragment) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array input");
+ else if (type.isStruct())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-struct input");
+ }
+ if (type.getQualifier().storage == EvqVaryingOut && language == EShLangFragment) {
+ if (type.isArrayOfArrays())
+ requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array output");
+ }
+
+ return false;
+}
+
+//
+// Require array to be completely sized
+//
+void TParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes)
+{
+ if (!parsingBuiltins && arraySizes.hasUnsized())
+ error(loc, "array size required", "", "");
+}
+
+void TParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type)
+{
+ const TTypeList& structure = *type.getStruct();
+ for (int m = 0; m < (int)structure.size(); ++m) {
+ const TType& member = *structure[m].type;
+ if (member.isArray())
+ arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes());
+ }
+}
+
+void TParseContext::arraySizesCheck(const TSourceLoc& loc, const TQualifier& qualifier, TArraySizes* arraySizes,
+ const TIntermTyped* initializer, bool lastMember)
+{
+ assert(arraySizes);
+
+ // always allow special built-in ins/outs sized to topologies
+ if (parsingBuiltins)
+ return;
+
+ // initializer must be a sized array, in which case
+ // allow the initializer to set any unknown array sizes
+ if (initializer != nullptr) {
+ if (initializer->getType().isUnsizedArray())
+ error(loc, "array initializer must be sized", "[]", "");
+ return;
+ }
+
+ // No environment allows any non-outer-dimension to be implicitly sized
+ if (arraySizes->isInnerUnsized()) {
+ error(loc, "only outermost dimension of an array of arrays can be implicitly sized", "[]", "");
+ arraySizes->clearInnerUnsized();
+ }
+
+ if (arraySizes->isInnerSpecialization() &&
+ (qualifier.storage != EvqTemporary && qualifier.storage != EvqGlobal && qualifier.storage != EvqShared && qualifier.storage != EvqConst))
+ error(loc, "only outermost dimension of an array of arrays can be a specialization constant", "[]", "");
+
+ // desktop always allows outer-dimension-unsized variable arrays,
+ if (profile != EEsProfile)
+ return;
+
+ // for ES, if size isn't coming from an initializer, it has to be explicitly declared now,
+ // with very few exceptions
+
+ // last member of ssbo block exception:
+ if (qualifier.storage == EvqBuffer && lastMember)
+ return;
+
+ // implicitly-sized io exceptions:
+ switch (language) {
+ case EShLangGeometry:
+ if (qualifier.storage == EvqVaryingIn)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_geometry_shader, AEP_geometry_shader))
+ return;
+ break;
+ case EShLangTessControl:
+ if ( qualifier.storage == EvqVaryingIn ||
+ (qualifier.storage == EvqVaryingOut && ! qualifier.patch))
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
+ return;
+ break;
+ case EShLangTessEvaluation:
+ if ((qualifier.storage == EvqVaryingIn && ! qualifier.patch) ||
+ qualifier.storage == EvqVaryingOut)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))
+ return;
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ if (qualifier.storage == EvqVaryingOut)
+ if ((profile == EEsProfile && version >= 320) ||
+ extensionTurnedOn(E_GL_NV_mesh_shader))
+ return;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ arraySizeRequiredCheck(loc, *arraySizes);
+}
+
+void TParseContext::arrayOfArrayVersionCheck(const TSourceLoc& loc, const TArraySizes* sizes)
+{
+ if (sizes == nullptr || sizes->getNumDims() == 1)
+ return;
+
+ const char* feature = "arrays of arrays";
+
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature);
+}
+
+//
+// Do all the semantic checking for declaring or redeclaring an array, with and
+// without a size, and make the right changes to the symbol table.
+//
+void TParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type, TSymbol*& symbol)
+{
+ if (symbol == nullptr) {
+ bool currentScope;
+ symbol = symbolTable.find(identifier, nullptr, &currentScope);
+
+ if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) {
+ // bad shader (errors already reported) trying to redeclare a built-in name as an array
+ symbol = nullptr;
+ return;
+ }
+ if (symbol == nullptr || ! currentScope) {
+ //
+ // Successfully process a new definition.
+ // (Redeclarations have to take place at the same scope; otherwise they are hiding declarations)
+ //
+ symbol = new TVariable(&identifier, type);
+ symbolTable.insert(*symbol);
+ if (symbolTable.atGlobalLevel())
+ trackLinkage(*symbol);
+
+ if (! symbolTable.atBuiltInLevel()) {
+ if (isIoResizeArray(type)) {
+ ioArraySymbolResizeList.push_back(symbol);
+ checkIoArraysConsistency(loc, true);
+ } else
+ fixIoArraySize(loc, symbol->getWritableType());
+ }
+
+ return;
+ }
+ if (symbol->getAsAnonMember()) {
+ error(loc, "cannot redeclare a user-block member array", identifier.c_str(), "");
+ symbol = nullptr;
+ return;
+ }
+ }
+
+ //
+ // Process a redeclaration.
+ //
+
+ if (symbol == nullptr) {
+ error(loc, "array variable name expected", identifier.c_str(), "");
+ return;
+ }
+
+ // redeclareBuiltinVariable() should have already done the copyUp()
+ TType& existingType = symbol->getWritableType();
+
+ if (! existingType.isArray()) {
+ error(loc, "redeclaring non-array as array", identifier.c_str(), "");
+ return;
+ }
+
+ if (! existingType.sameElementType(type)) {
+ error(loc, "redeclaration of array with a different element type", identifier.c_str(), "");
+ return;
+ }
+
+ if (! existingType.sameInnerArrayness(type)) {
+ error(loc, "redeclaration of array with a different array dimensions or sizes", identifier.c_str(), "");
+ return;
+ }
+
+ if (existingType.isSizedArray()) {
+ // be more leniant for input arrays to geometry shaders and tessellation control outputs, where the redeclaration is the same size
+ if (! (isIoResizeArray(type) && existingType.getOuterArraySize() == type.getOuterArraySize()))
+ error(loc, "redeclaration of array with size", identifier.c_str(), "");
+ return;
+ }
+
+ arrayLimitCheck(loc, identifier, type.getOuterArraySize());
+
+ existingType.updateArraySizes(type);
+
+ if (isIoResizeArray(type))
+ checkIoArraysConsistency(loc);
+}
+
+// Policy and error check for needing a runtime sized array.
+void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermTyped& base)
+{
+ // runtime length implies runtime sizeable, so no problem
+ if (isRuntimeLength(base))
+ return;
+
+ // Check for last member of a bufferreference type, which is runtime sizeable
+ // but doesn't support runtime length
+ if (base.getType().getQualifier().storage == EvqBuffer) {
+ const TIntermBinary* binary = base.getAsBinaryNode();
+ if (binary != nullptr &&
+ binary->getOp() == EOpIndexDirectStruct &&
+ binary->getLeft()->getBasicType() == EbtReference) {
+
+ const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ const int memberCount = (int)binary->getLeft()->getType().getReferentType()->getStruct()->size();
+ if (index == memberCount - 1)
+ return;
+ }
+ }
+
+ // check for additional things allowed by GL_EXT_nonuniform_qualifier
+ if (base.getBasicType() == EbtSampler ||
+ (base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer()))
+ requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index");
+ else
+ error(loc, "", "[", "array must be redeclared with a size before being indexed with a variable");
+}
+
+// Policy decision for whether a run-time .length() is allowed.
+bool TParseContext::isRuntimeLength(const TIntermTyped& base) const
+{
+ if (base.getType().getQualifier().storage == EvqBuffer) {
+ // in a buffer block
+ const TIntermBinary* binary = base.getAsBinaryNode();
+ if (binary != nullptr && binary->getOp() == EOpIndexDirectStruct) {
+ // is it the last member?
+ const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+
+ if (binary->getLeft()->getBasicType() == EbtReference)
+ return false;
+
+ const int memberCount = (int)binary->getLeft()->getType().getStruct()->size();
+ if (index == memberCount - 1)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#ifdef NV_EXTENSIONS
+// Fix mesh view output array dimension
+void TParseContext::resizeMeshViewDimension(const TSourceLoc& loc, TType& type)
+{
+ // see if member is a per-view attribute
+ if (type.getQualifier().isPerView()) {
+ // since we don't have the maxMeshViewCountNV set during parsing builtins, we hardcode the value
+ int maxViewCount = parsingBuiltins ? 4 : resources.maxMeshViewCountNV;
+
+ if (! type.isArray()) {
+ error(loc, "requires an view array dimension", "perviewNV", "");
+ }
+ else if (!type.isUnsizedArray() && type.getOuterArraySize() != maxViewCount) {
+ error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
+ }
+ else if (type.isUnsizedArray()) {
+ type.changeOuterArraySize(maxViewCount);
+ }
+ }
+}
+#endif
+
+// Returns true if the first argument to the #line directive is the line number for the next line.
+//
+// Desktop, pre-version 3.30: "After processing this directive
+// (including its new-line), the implementation will behave as if it is compiling at line number line+1 and
+// source string number source-string-number."
+//
+// Desktop, version 3.30 and later, and ES: "After processing this directive
+// (including its new-line), the implementation will behave as if it is compiling at line number line and
+// source string number source-string-number.
+bool TParseContext::lineDirectiveShouldSetNextLine() const
+{
+ return profile == EEsProfile || version >= 330;
+}
+
+//
+// Enforce non-initializer type/qualifier rules.
+//
+void TParseContext::nonInitConstCheck(const TSourceLoc& loc, TString& identifier, TType& type)
+{
+ //
+ // Make the qualifier make sense, given that there is not an initializer.
+ //
+ if (type.getQualifier().storage == EvqConst ||
+ type.getQualifier().storage == EvqConstReadOnly) {
+ type.getQualifier().makeTemporary();
+ error(loc, "variables with qualifier 'const' must be initialized", identifier.c_str(), "");
+ }
+}
+
+//
+// See if the identifier is a built-in symbol that can be redeclared, and if so,
+// copy the symbol table's read-only built-in variable to the current
+// global level, where it can be modified based on the passed in type.
+//
+// Returns nullptr if no redeclaration took place; meaning a normal declaration still
+// needs to occur for it, not necessarily an error.
+//
+// Returns a redeclared and type-modified variable if a redeclarated occurred.
+//
+TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TString& identifier,
+ const TQualifier& qualifier, const TShaderQualifiers& publicType)
+{
+ if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel())
+ return nullptr;
+
+ bool nonEsRedecls = (profile != EEsProfile && (version >= 130 || identifier == "gl_TexCoord"));
+ bool esRedecls = (profile == EEsProfile &&
+ (version >= 320 || extensionsTurnedOn(Num_AEP_shader_io_blocks, AEP_shader_io_blocks)));
+ if (! esRedecls && ! nonEsRedecls)
+ return nullptr;
+
+ // Special case when using GL_ARB_separate_shader_objects
+ bool ssoPre150 = false; // means the only reason this variable is redeclared is due to this combination
+ if (profile != EEsProfile && version <= 140 && extensionTurnedOn(E_GL_ARB_separate_shader_objects)) {
+ if (identifier == "gl_Position" ||
+ identifier == "gl_PointSize" ||
+ identifier == "gl_ClipVertex" ||
+ identifier == "gl_FogFragCoord")
+ ssoPre150 = true;
+ }
+
+ // Potentially redeclaring a built-in variable...
+
+ if (ssoPre150 ||
+ (identifier == "gl_FragDepth" && ((nonEsRedecls && version >= 420) || esRedecls)) ||
+ (identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) ||
+ identifier == "gl_ClipDistance" ||
+ identifier == "gl_CullDistance" ||
+ identifier == "gl_FrontColor" ||
+ identifier == "gl_BackColor" ||
+ identifier == "gl_FrontSecondaryColor" ||
+ identifier == "gl_BackSecondaryColor" ||
+ identifier == "gl_SecondaryColor" ||
+ (identifier == "gl_Color" && language == EShLangFragment) ||
+ (identifier == "gl_FragStencilRefARB" && (nonEsRedecls && version >= 140)
+ && language == EShLangFragment) ||
+#ifdef NV_EXTENSIONS
+ identifier == "gl_SampleMask" ||
+ identifier == "gl_Layer" ||
+ identifier == "gl_PrimitiveIndicesNV" ||
+#endif
+ identifier == "gl_TexCoord") {
+
+ // Find the existing symbol, if any.
+ bool builtIn;
+ TSymbol* symbol = symbolTable.find(identifier, &builtIn);
+
+ // If the symbol was not found, this must be a version/profile/stage
+ // that doesn't have it.
+ if (! symbol)
+ return nullptr;
+
+ // If it wasn't at a built-in level, then it's already been redeclared;
+ // that is, this is a redeclaration of a redeclaration; reuse that initial
+ // redeclaration. Otherwise, make the new one.
+ if (builtIn)
+ makeEditable(symbol);
+
+ // Now, modify the type of the copy, as per the type of the current redeclaration.
+
+ TQualifier& symbolQualifier = symbol->getWritableType().getQualifier();
+ if (ssoPre150) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot redeclare after use", identifier.c_str(), "");
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.isMemory() || qualifier.isAuxiliary() || (language == EShLangVertex && qualifier.storage != EvqVaryingOut) ||
+ (language == EShLangFragment && qualifier.storage != EvqVaryingIn))
+ error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
+ if (! qualifier.smooth)
+ error(loc, "cannot change interpolation qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_FrontColor" ||
+ identifier == "gl_BackColor" ||
+ identifier == "gl_FrontSecondaryColor" ||
+ identifier == "gl_BackSecondaryColor" ||
+ identifier == "gl_SecondaryColor" ||
+ identifier == "gl_Color") {
+ symbolQualifier.flat = qualifier.flat;
+ symbolQualifier.smooth = qualifier.smooth;
+ symbolQualifier.nopersp = qualifier.nopersp;
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.isMemory() || qualifier.isAuxiliary() || symbol->getType().getQualifier().storage != qualifier.storage)
+ error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_TexCoord" ||
+ identifier == "gl_ClipDistance" ||
+ identifier == "gl_CullDistance") {
+ if (qualifier.hasLayout() || qualifier.isMemory() || qualifier.isAuxiliary() ||
+ qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ symbolQualifier.storage != qualifier.storage)
+ error(loc, "cannot change qualification of", "redeclaration", symbol->getName().c_str());
+ } else if (identifier == "gl_FragCoord") {
+ if (intermediate.inIoAccessed("gl_FragCoord"))
+ error(loc, "cannot redeclare after use", "gl_FragCoord", "");
+ if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ qualifier.isMemory() || qualifier.isAuxiliary())
+ error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingIn)
+ error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str());
+ if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() ||
+ publicType.originUpperLeft != intermediate.getOriginUpperLeft()))
+ error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str());
+ if (publicType.pixelCenterInteger)
+ intermediate.setPixelCenterInteger();
+ if (publicType.originUpperLeft)
+ intermediate.setOriginUpperLeft();
+ } else if (identifier == "gl_FragDepth") {
+ if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat ||
+ qualifier.isMemory() || qualifier.isAuxiliary())
+ error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
+ if (publicType.layoutDepth != EldNone) {
+ if (intermediate.inIoAccessed("gl_FragDepth"))
+ error(loc, "cannot redeclare after use", "gl_FragDepth", "");
+ if (! intermediate.setDepth(publicType.layoutDepth))
+ error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str());
+ }
+ }
+ else if (
+#ifdef NV_EXTENSIONS
+ identifier == "gl_PrimitiveIndicesNV" ||
+#endif
+ identifier == "gl_FragStencilRefARB") {
+ if (qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str());
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str());
+ }
+#ifdef NV_EXTENSIONS
+ else if (identifier == "gl_SampleMask") {
+ if (!publicType.layoutOverrideCoverage) {
+ error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str());
+ }
+ intermediate.setLayoutOverrideCoverage();
+ }
+ else if (identifier == "gl_Layer") {
+ if (!qualifier.layoutViewportRelative && qualifier.layoutSecondaryViewportRelativeOffset == -2048)
+ error(loc, "redeclaration only allowed for viewport_relative or secondary_view_offset layout", "redeclaration", symbol->getName().c_str());
+ symbolQualifier.layoutViewportRelative = qualifier.layoutViewportRelative;
+ symbolQualifier.layoutSecondaryViewportRelativeOffset = qualifier.layoutSecondaryViewportRelativeOffset;
+ }
+#endif
+
+ // TODO: semantics quality: separate smooth from nothing declared, then use IsInterpolation for several tests above
+
+ return symbol;
+ }
+
+ return nullptr;
+}
+
+//
+// Either redeclare the requested block, or give an error message why it can't be done.
+//
+// TODO: functionality: explicitly sizing members of redeclared blocks is not giving them an explicit size
+void TParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& newTypeList, const TString& blockName,
+ const TString* instanceName, TArraySizes* arraySizes)
+{
+ const char* feature = "built-in block redeclaration";
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+
+ if (blockName != "gl_PerVertex" && blockName != "gl_PerFragment"
+#ifdef NV_EXTENSIONS
+ && blockName != "gl_MeshPerVertexNV" && blockName != "gl_MeshPerPrimitiveNV"
+#endif
+ )
+ {
+ error(loc, "cannot redeclare block: ", "block declaration", blockName.c_str());
+ return;
+ }
+
+ // Redeclaring a built-in block...
+
+ if (instanceName && ! builtInName(*instanceName)) {
+ error(loc, "cannot redeclare a built-in block with a user name", instanceName->c_str(), "");
+ return;
+ }
+
+ // Blocks with instance names are easy to find, lookup the instance name,
+ // Anonymous blocks need to be found via a member.
+ bool builtIn;
+ TSymbol* block;
+ if (instanceName)
+ block = symbolTable.find(*instanceName, &builtIn);
+ else
+ block = symbolTable.find(newTypeList.front().type->getFieldName(), &builtIn);
+
+ // If the block was not found, this must be a version/profile/stage
+ // that doesn't have it, or the instance name is wrong.
+ const char* errorName = instanceName ? instanceName->c_str() : newTypeList.front().type->getFieldName().c_str();
+ if (! block) {
+ error(loc, "no declaration found for redeclaration", errorName, "");
+ return;
+ }
+ // Built-in blocks cannot be redeclared more than once, which if happened,
+ // we'd be finding the already redeclared one here, rather than the built in.
+ if (! builtIn) {
+ error(loc, "can only redeclare a built-in block once, and before any use", blockName.c_str(), "");
+ return;
+ }
+
+ // Copy the block to make a writable version, to insert into the block table after editing.
+ block = symbolTable.copyUpDeferredInsert(block);
+
+ if (block->getType().getBasicType() != EbtBlock) {
+ error(loc, "cannot redeclare a non block as a block", errorName, "");
+ return;
+ }
+
+ // Fix XFB stuff up, it applies to the order of the redeclaration, not
+ // the order of the original members.
+ if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
+ if (!currentBlockQualifier.hasXfbBuffer())
+ currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ if (!currentBlockQualifier.hasStream())
+ currentBlockQualifier.layoutStream = globalOutputDefaults.layoutStream;
+ fixXfbOffsets(currentBlockQualifier, newTypeList);
+ }
+
+ // Edit and error check the container against the redeclaration
+ // - remove unused members
+ // - ensure remaining qualifiers/types match
+
+ TType& type = block->getWritableType();
+
+#ifdef NV_EXTENSIONS
+ // if gl_PerVertex is redeclared for the purpose of passing through "gl_Position"
+ // for passthrough purpose, the redeclared block should have the same qualifers as
+ // the current one
+ if (currentBlockQualifier.layoutPassthrough) {
+ type.getQualifier().layoutPassthrough = currentBlockQualifier.layoutPassthrough;
+ type.getQualifier().storage = currentBlockQualifier.storage;
+ type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
+ type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ }
+#endif
+
+ TTypeList::iterator member = type.getWritableStruct()->begin();
+ size_t numOriginalMembersFound = 0;
+ while (member != type.getStruct()->end()) {
+ // look for match
+ bool found = false;
+ TTypeList::const_iterator newMember;
+ TSourceLoc memberLoc;
+ memberLoc.init();
+ for (newMember = newTypeList.begin(); newMember != newTypeList.end(); ++newMember) {
+ if (member->type->getFieldName() == newMember->type->getFieldName()) {
+ found = true;
+ memberLoc = newMember->loc;
+ break;
+ }
+ }
+
+ if (found) {
+ ++numOriginalMembersFound;
+ // - ensure match between redeclared members' types
+ // - check for things that can't be changed
+ // - update things that can be changed
+ TType& oldType = *member->type;
+ const TType& newType = *newMember->type;
+ if (! newType.sameElementType(oldType))
+ error(memberLoc, "cannot redeclare block member with a different type", member->type->getFieldName().c_str(), "");
+ if (oldType.isArray() != newType.isArray())
+ error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && ! oldType.sameArrayness(newType) && oldType.isSizedArray())
+ error(memberLoc, "cannot change array size of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && newType.isArray())
+ arrayLimitCheck(loc, member->type->getFieldName(), newType.getOuterArraySize());
+#ifdef NV_EXTENSIONS
+ if (oldType.getQualifier().isPerView() && ! newType.getQualifier().isPerView())
+ error(memberLoc, "missing perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerView() && newType.getQualifier().isPerView())
+ error(memberLoc, "cannot add perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (newType.getQualifier().isPerView()) {
+ if (oldType.getArraySizes()->getNumDims() != newType.getArraySizes()->getNumDims())
+ error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! newType.isUnsizedArray() && newType.getOuterArraySize() != resources.maxMeshViewCountNV)
+ error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", "");
+ else if (newType.getArraySizes()->getNumDims() == 2) {
+ int innerDimSize = newType.getArraySizes()->getDimSize(1);
+ arrayLimitCheck(memberLoc, member->type->getFieldName(), innerDimSize);
+ oldType.getArraySizes()->setDimSize(1, innerDimSize);
+ }
+ }
+ if (oldType.getQualifier().isPerPrimitive() && ! newType.getQualifier().isPerPrimitive())
+ error(memberLoc, "missing perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ else if (! oldType.getQualifier().isPerPrimitive() && newType.getQualifier().isPerPrimitive())
+ error(memberLoc, "cannot add perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+#endif
+ if (newType.getQualifier().isMemory())
+ error(memberLoc, "cannot add memory qualifier to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().hasNonXfbLayout())
+ error(memberLoc, "cannot add non-XFB layout to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().patch)
+ error(memberLoc, "cannot add patch to redeclared block member", member->type->getFieldName().c_str(), "");
+ if (newType.getQualifier().hasXfbBuffer() &&
+ newType.getQualifier().layoutXfbBuffer != currentBlockQualifier.layoutXfbBuffer)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
+ if (newType.getQualifier().hasStream() &&
+ newType.getQualifier().layoutStream != currentBlockQualifier.layoutStream)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_stream", "");
+ oldType.getQualifier().centroid = newType.getQualifier().centroid;
+ oldType.getQualifier().sample = newType.getQualifier().sample;
+ oldType.getQualifier().invariant = newType.getQualifier().invariant;
+ oldType.getQualifier().noContraction = newType.getQualifier().noContraction;
+ oldType.getQualifier().smooth = newType.getQualifier().smooth;
+ oldType.getQualifier().flat = newType.getQualifier().flat;
+ oldType.getQualifier().nopersp = newType.getQualifier().nopersp;
+ oldType.getQualifier().layoutXfbOffset = newType.getQualifier().layoutXfbOffset;
+ oldType.getQualifier().layoutXfbBuffer = newType.getQualifier().layoutXfbBuffer;
+ oldType.getQualifier().layoutXfbStride = newType.getQualifier().layoutXfbStride;
+ if (oldType.getQualifier().layoutXfbOffset != TQualifier::layoutXfbBufferEnd) {
+ // If any member has an xfb_offset, then the block's xfb_buffer inherents current xfb_buffer,
+ // and for xfb processing, the member needs it as well, along with xfb_stride.
+ type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ oldType.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer;
+ }
+ if (oldType.isUnsizedArray() && newType.isSizedArray())
+ oldType.changeOuterArraySize(newType.getOuterArraySize());
+
+ // check and process the member's type, which will include managing xfb information
+ layoutTypeCheck(loc, oldType);
+
+ // go to next member
+ ++member;
+ } else {
+ // For missing members of anonymous blocks that have been redeclared,
+ // hide the original (shared) declaration.
+ // Instance-named blocks can just have the member removed.
+ if (instanceName)
+ member = type.getWritableStruct()->erase(member);
+ else {
+ member->type->hideMember();
+ ++member;
+ }
+ }
+ }
+
+ if (spvVersion.vulkan > 0) {
+ // ...then streams apply to built-in blocks, instead of them being only on stream 0
+ type.getQualifier().layoutStream = currentBlockQualifier.layoutStream;
+ }
+
+ if (numOriginalMembersFound < newTypeList.size())
+ error(loc, "block redeclaration has extra members", blockName.c_str(), "");
+ if (type.isArray() != (arraySizes != nullptr) ||
+ (type.isArray() && arraySizes != nullptr && type.getArraySizes()->getNumDims() != arraySizes->getNumDims()))
+ error(loc, "cannot change arrayness of redeclared block", blockName.c_str(), "");
+ else if (type.isArray()) {
+ // At this point, we know both are arrays and both have the same number of dimensions.
+
+ // It is okay for a built-in block redeclaration to be unsized, and keep the size of the
+ // original block declaration.
+ if (!arraySizes->isSized() && type.isSizedArray())
+ arraySizes->changeOuterSize(type.getOuterArraySize());
+
+ // And, okay to be giving a size to the array, by the redeclaration
+ if (!type.isSizedArray() && arraySizes->isSized())
+ type.changeOuterArraySize(arraySizes->getOuterSize());
+
+ // Now, they must match in all dimensions.
+ if (type.isSizedArray() && *type.getArraySizes() != *arraySizes)
+ error(loc, "cannot change array size of redeclared block", blockName.c_str(), "");
+ }
+
+ symbolTable.insert(*block);
+
+ // Check for general layout qualifier errors
+ layoutObjectCheck(loc, *block);
+
+ // Tracking for implicit sizing of array
+ if (isIoResizeArray(block->getType())) {
+ ioArraySymbolResizeList.push_back(block);
+ checkIoArraysConsistency(loc, true);
+ } else if (block->getType().isArray())
+ fixIoArraySize(loc, block->getWritableType());
+
+ // Save it in the AST for linker use.
+ trackLinkage(*block);
+}
+
+void TParseContext::paramCheckFixStorage(const TSourceLoc& loc, const TStorageQualifier& qualifier, TType& type)
+{
+ switch (qualifier) {
+ case EvqConst:
+ case EvqConstReadOnly:
+ type.getQualifier().storage = EvqConstReadOnly;
+ break;
+ case EvqIn:
+ case EvqOut:
+ case EvqInOut:
+ type.getQualifier().storage = qualifier;
+ break;
+ case EvqGlobal:
+ case EvqTemporary:
+ type.getQualifier().storage = EvqIn;
+ break;
+ default:
+ type.getQualifier().storage = EvqIn;
+ error(loc, "storage qualifier not allowed on function parameter", GetStorageQualifierString(qualifier), "");
+ break;
+ }
+}
+
+void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& qualifier, TType& type)
+{
+ if (qualifier.isMemory()) {
+ type.getQualifier().volatil = qualifier.volatil;
+ type.getQualifier().coherent = qualifier.coherent;
+ type.getQualifier().devicecoherent = qualifier.devicecoherent ;
+ type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent;
+ type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent;
+ type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent;
+ type.getQualifier().nonprivate = qualifier.nonprivate;
+ type.getQualifier().readonly = qualifier.readonly;
+ type.getQualifier().writeonly = qualifier.writeonly;
+ type.getQualifier().restrict = qualifier.restrict;
+ }
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isInterpolation())
+ error(loc, "cannot use auxiliary or interpolation qualifiers on a function parameter", "", "");
+ if (qualifier.hasLayout())
+ error(loc, "cannot use layout qualifiers on a function parameter", "", "");
+ if (qualifier.invariant)
+ error(loc, "cannot use invariant qualifier on a function parameter", "", "");
+ if (qualifier.noContraction) {
+ if (qualifier.isParamOutput())
+ type.getQualifier().noContraction = true;
+ else
+ warn(loc, "qualifier has no effect on non-output parameters", "precise", "");
+ }
+ if (qualifier.isNonUniform())
+ type.getQualifier().nonUniform = qualifier.nonUniform;
+
+ paramCheckFixStorage(loc, qualifier.storage, type);
+}
+
+void TParseContext::nestedBlockCheck(const TSourceLoc& loc)
+{
+ if (structNestingLevel > 0)
+ error(loc, "cannot nest a block definition inside a structure or block", "", "");
+ ++structNestingLevel;
+}
+
+void TParseContext::nestedStructCheck(const TSourceLoc& loc)
+{
+ if (structNestingLevel > 0)
+ error(loc, "cannot nest a structure definition inside a structure or block", "", "");
+ ++structNestingLevel;
+}
+
+void TParseContext::arrayObjectCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ // Some versions don't allow comparing arrays or structures containing arrays
+ if (type.containsArray()) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, op);
+ profileRequires(loc, EEsProfile, 300, nullptr, op);
+ }
+}
+
+void TParseContext::opaqueCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (containsFieldWithBasicType(type, EbtSampler))
+ error(loc, "can't use with samplers or structs containing samplers", op, "");
+}
+
+void TParseContext::referenceCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (containsFieldWithBasicType(type, EbtReference))
+ error(loc, "can't use with reference types", op, "");
+}
+
+void TParseContext::storage16BitAssignmentCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtFloat16))
+ requireFloat16Arithmetic(loc, op, "can't use with structs containing float16");
+
+ if (type.isArray() && type.getBasicType() == EbtFloat16)
+ requireFloat16Arithmetic(loc, op, "can't use with arrays containing float16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt16))
+ requireInt16Arithmetic(loc, op, "can't use with structs containing int16");
+
+ if (type.isArray() && type.getBasicType() == EbtInt16)
+ requireInt16Arithmetic(loc, op, "can't use with arrays containing int16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint16))
+ requireInt16Arithmetic(loc, op, "can't use with structs containing uint16");
+
+ if (type.isArray() && type.getBasicType() == EbtUint16)
+ requireInt16Arithmetic(loc, op, "can't use with arrays containing uint16");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt8))
+ requireInt8Arithmetic(loc, op, "can't use with structs containing int8");
+
+ if (type.isArray() && type.getBasicType() == EbtInt8)
+ requireInt8Arithmetic(loc, op, "can't use with arrays containing int8");
+
+ if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint8))
+ requireInt8Arithmetic(loc, op, "can't use with structs containing uint8");
+
+ if (type.isArray() && type.getBasicType() == EbtUint8)
+ requireInt8Arithmetic(loc, op, "can't use with arrays containing uint8");
+}
+
+void TParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op)
+{
+ if (type.containsSpecializationSize())
+ error(loc, "can't use with types containing arrays sized with a specialization constant", op, "");
+}
+
+void TParseContext::structTypeCheck(const TSourceLoc& /*loc*/, TPublicType& publicType)
+{
+ const TTypeList& typeList = *publicType.userDef->getStruct();
+
+ // fix and check for member storage qualifiers and types that don't belong within a structure
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (memberQualifier.isAuxiliary() ||
+ memberQualifier.isInterpolation() ||
+ (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal))
+ error(memberLoc, "cannot use storage or interpolation qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.isMemory())
+ error(memberLoc, "cannot use memory qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.hasLayout()) {
+ error(memberLoc, "cannot use layout qualifiers on structure members", typeList[member].type->getFieldName().c_str(), "");
+ memberQualifier.clearLayout();
+ }
+ if (memberQualifier.invariant)
+ error(memberLoc, "cannot use invariant qualifier on structure members", typeList[member].type->getFieldName().c_str(), "");
+ }
+}
+
+//
+// See if this loop satisfies the limitations for ES 2.0 (version 100) for loops in Appendex A:
+//
+// "The loop index has type int or float.
+//
+// "The for statement has the form:
+// for ( init-declaration ; condition ; expression )
+// init-declaration has the form: type-specifier identifier = constant-expression
+// condition has the form: loop-index relational_operator constant-expression
+// where relational_operator is one of: > >= < <= == or !=
+// expression [sic] has one of the following forms:
+// loop-index++
+// loop-index--
+// loop-index += constant-expression
+// loop-index -= constant-expression
+//
+// The body is handled in an AST traversal.
+//
+void TParseContext::inductiveLoopCheck(const TSourceLoc& loc, TIntermNode* init, TIntermLoop* loop)
+{
+ // loop index init must exist and be a declaration, which shows up in the AST as an aggregate of size 1 of the declaration
+ bool badInit = false;
+ if (! init || ! init->getAsAggregate() || init->getAsAggregate()->getSequence().size() != 1)
+ badInit = true;
+ TIntermBinary* binaryInit = 0;
+ if (! badInit) {
+ // get the declaration assignment
+ binaryInit = init->getAsAggregate()->getSequence()[0]->getAsBinaryNode();
+ if (! binaryInit)
+ badInit = true;
+ }
+ if (badInit) {
+ error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // loop index must be type int or float
+ if (! binaryInit->getType().isScalar() || (binaryInit->getBasicType() != EbtInt && binaryInit->getBasicType() != EbtFloat)) {
+ error(loc, "inductive loop requires a scalar 'int' or 'float' loop index", "limitations", "");
+ return;
+ }
+
+ // init is the form "loop-index = constant"
+ if (binaryInit->getOp() != EOpAssign || ! binaryInit->getLeft()->getAsSymbolNode() || ! binaryInit->getRight()->getAsConstantUnion()) {
+ error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // get the unique id of the loop index
+ int loopIndex = binaryInit->getLeft()->getAsSymbolNode()->getId();
+ inductiveLoopIds.insert(loopIndex);
+
+ // condition's form must be "loop-index relational-operator constant-expression"
+ bool badCond = ! loop->getTest();
+ if (! badCond) {
+ TIntermBinary* binaryCond = loop->getTest()->getAsBinaryNode();
+ badCond = ! binaryCond;
+ if (! badCond) {
+ switch (binaryCond->getOp()) {
+ case EOpGreaterThan:
+ case EOpGreaterThanEqual:
+ case EOpLessThan:
+ case EOpLessThanEqual:
+ case EOpEqual:
+ case EOpNotEqual:
+ break;
+ default:
+ badCond = true;
+ }
+ }
+ if (binaryCond && (! binaryCond->getLeft()->getAsSymbolNode() ||
+ binaryCond->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
+ ! binaryCond->getRight()->getAsConstantUnion()))
+ badCond = true;
+ }
+ if (badCond) {
+ error(loc, "inductive-loop condition requires the form \"loop-index <comparison-op> constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // loop-index++
+ // loop-index--
+ // loop-index += constant-expression
+ // loop-index -= constant-expression
+ bool badTerminal = ! loop->getTerminal();
+ if (! badTerminal) {
+ TIntermUnary* unaryTerminal = loop->getTerminal()->getAsUnaryNode();
+ TIntermBinary* binaryTerminal = loop->getTerminal()->getAsBinaryNode();
+ if (unaryTerminal || binaryTerminal) {
+ switch(loop->getTerminal()->getAsOperator()->getOp()) {
+ case EOpPostDecrement:
+ case EOpPostIncrement:
+ case EOpAddAssign:
+ case EOpSubAssign:
+ break;
+ default:
+ badTerminal = true;
+ }
+ } else
+ badTerminal = true;
+ if (binaryTerminal && (! binaryTerminal->getLeft()->getAsSymbolNode() ||
+ binaryTerminal->getLeft()->getAsSymbolNode()->getId() != loopIndex ||
+ ! binaryTerminal->getRight()->getAsConstantUnion()))
+ badTerminal = true;
+ if (unaryTerminal && (! unaryTerminal->getOperand()->getAsSymbolNode() ||
+ unaryTerminal->getOperand()->getAsSymbolNode()->getId() != loopIndex))
+ badTerminal = true;
+ }
+ if (badTerminal) {
+ error(loc, "inductive-loop termination requires the form \"loop-index++, loop-index--, loop-index += constant-expression, or loop-index -= constant-expression\"", "limitations", "");
+ return;
+ }
+
+ // the body
+ inductiveLoopBodyCheck(loop->getBody(), loopIndex, symbolTable);
+}
+
+// Do limit checks for built-in arrays.
+void TParseContext::arrayLimitCheck(const TSourceLoc& loc, const TString& identifier, int size)
+{
+ if (identifier.compare("gl_TexCoord") == 0)
+ limitCheck(loc, size, "gl_MaxTextureCoords", "gl_TexCoord array size");
+ else if (identifier.compare("gl_ClipDistance") == 0)
+ limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistance array size");
+ else if (identifier.compare("gl_CullDistance") == 0)
+ limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistance array size");
+#ifdef NV_EXTENSIONS
+ else if (identifier.compare("gl_ClipDistancePerViewNV") == 0)
+ limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistancePerViewNV array size");
+ else if (identifier.compare("gl_CullDistancePerViewNV") == 0)
+ limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistancePerViewNV array size");
+#endif
+}
+
+// See if the provided value is less than or equal to the symbol indicated by limit,
+// which should be a constant in the symbol table.
+void TParseContext::limitCheck(const TSourceLoc& loc, int value, const char* limit, const char* feature)
+{
+ TSymbol* symbol = symbolTable.find(limit);
+ assert(symbol->getAsVariable());
+ const TConstUnionArray& constArray = symbol->getAsVariable()->getConstArray();
+ assert(! constArray.empty());
+ if (value > constArray[0].getIConst())
+ error(loc, "must be less than or equal to", feature, "%s (%d)", limit, constArray[0].getIConst());
+}
+
+//
+// Do any additional error checking, etc., once we know the parsing is done.
+//
+void TParseContext::finish()
+{
+ TParseContextBase::finish();
+
+ if (parsingBuiltins)
+ return;
+
+ // Check on array indexes for ES 2.0 (version 100) limitations.
+ for (size_t i = 0; i < needsIndexLimitationChecking.size(); ++i)
+ constantIndexExpressionCheck(needsIndexLimitationChecking[i]);
+
+ // Check for stages that are enabled by extension.
+ // Can't do this at the beginning, it is chicken and egg to add a stage by
+ // extension.
+ // Stage-specific features were correctly tested for already, this is just
+ // about the stage itself.
+ switch (language) {
+ case EShLangGeometry:
+ if (profile == EEsProfile && version == 310)
+ requireExtensions(getCurrentLoc(), Num_AEP_geometry_shader, AEP_geometry_shader, "geometry shaders");
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ if (profile == EEsProfile && version == 310)
+ requireExtensions(getCurrentLoc(), Num_AEP_tessellation_shader, AEP_tessellation_shader, "tessellation shaders");
+ else if (profile != EEsProfile && version < 400)
+ requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_tessellation_shader, "tessellation shaders");
+ break;
+ case EShLangCompute:
+ if (profile != EEsProfile && version < 430)
+ requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_compute_shader, "compute shaders");
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangTaskNV:
+ requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "task shaders");
+ break;
+ case EShLangMeshNV:
+ requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "mesh shaders");
+ break;
+#endif
+ default:
+ break;
+ }
+
+#ifdef NV_EXTENSIONS
+ // Set default outputs for GL_NV_geometry_shader_passthrough
+ if (language == EShLangGeometry && extensionTurnedOn(E_SPV_NV_geometry_shader_passthrough)) {
+ if (intermediate.getOutputPrimitive() == ElgNone) {
+ switch (intermediate.getInputPrimitive()) {
+ case ElgPoints: intermediate.setOutputPrimitive(ElgPoints); break;
+ case ElgLines: intermediate.setOutputPrimitive(ElgLineStrip); break;
+ case ElgTriangles: intermediate.setOutputPrimitive(ElgTriangleStrip); break;
+ default: break;
+ }
+ }
+ if (intermediate.getVertices() == TQualifier::layoutNotSet) {
+ switch (intermediate.getInputPrimitive()) {
+ case ElgPoints: intermediate.setVertices(1); break;
+ case ElgLines: intermediate.setVertices(2); break;
+ case ElgTriangles: intermediate.setVertices(3); break;
+ default: break;
+ }
+ }
+ }
+#endif
+}
+
+//
+// Layout qualifier stuff.
+//
+
+// Put the id's layout qualification into the public type, for qualifiers not having a number set.
+// This is before we know any type information for error checking.
+void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id)
+{
+ std::transform(id.begin(), id.end(), id.begin(), ::tolower);
+
+ if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) {
+ publicType.qualifier.layoutMatrix = ElmColumnMajor;
+ return;
+ }
+ if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) {
+ publicType.qualifier.layoutMatrix = ElmRowMajor;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpPacked)) {
+ if (spvVersion.spv != 0)
+ spvRemoved(loc, "packed");
+ publicType.qualifier.layoutPacking = ElpPacked;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpShared)) {
+ if (spvVersion.spv != 0)
+ spvRemoved(loc, "shared");
+ publicType.qualifier.layoutPacking = ElpShared;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpStd140)) {
+ publicType.qualifier.layoutPacking = ElpStd140;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpStd430)) {
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "std430");
+ profileRequires(loc, EEsProfile, 310, nullptr, "std430");
+ publicType.qualifier.layoutPacking = ElpStd430;
+ return;
+ }
+ if (id == TQualifier::getLayoutPackingString(ElpScalar)) {
+ requireVulkan(loc, "scalar");
+ requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "scalar block layout");
+ publicType.qualifier.layoutPacking = ElpScalar;
+ return;
+ }
+ // TODO: compile-time performance: may need to stop doing linear searches
+ for (TLayoutFormat format = (TLayoutFormat)(ElfNone + 1); format < ElfCount; format = (TLayoutFormat)(format + 1)) {
+ if (id == TQualifier::getLayoutFormatString(format)) {
+ if ((format > ElfEsFloatGuard && format < ElfFloatGuard) ||
+ (format > ElfEsIntGuard && format < ElfIntGuard) ||
+ (format > ElfEsUintGuard && format < ElfCount))
+ requireProfile(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, "image load-store format");
+ profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "image load store");
+ profileRequires(loc, EEsProfile, 310, E_GL_ARB_shader_image_load_store, "image load store");
+ publicType.qualifier.layoutFormat = format;
+ return;
+ }
+ }
+ if (id == "push_constant") {
+ requireVulkan(loc, "push_constant");
+ publicType.qualifier.layoutPushConstant = true;
+ return;
+ }
+ if (id == "buffer_reference") {
+ requireVulkan(loc, "buffer_reference");
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference");
+ publicType.qualifier.layoutBufferReference = true;
+ intermediate.setUseStorageBuffer();
+ intermediate.setUsePhysicalStorageBuffer();
+ return;
+ }
+ if (language == EShLangGeometry || language == EShLangTessEvaluation
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ ) {
+ if (id == TQualifier::getGeometryString(ElgTriangles)) {
+ publicType.shaderQualifiers.geometry = ElgTriangles;
+ return;
+ }
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ ) {
+ if (id == TQualifier::getGeometryString(ElgPoints)) {
+ publicType.shaderQualifiers.geometry = ElgPoints;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgLines)) {
+ publicType.shaderQualifiers.geometry = ElgLines;
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (language == EShLangGeometry)
+#endif
+ {
+ if (id == TQualifier::getGeometryString(ElgLineStrip)) {
+ publicType.shaderQualifiers.geometry = ElgLineStrip;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) {
+ publicType.shaderQualifiers.geometry = ElgLinesAdjacency;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) {
+ publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgTriangleStrip)) {
+ publicType.shaderQualifiers.geometry = ElgTriangleStrip;
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (id == "passthrough") {
+ requireExtensions(loc, 1, &E_SPV_NV_geometry_shader_passthrough, "geometry shader passthrough");
+ publicType.qualifier.layoutPassthrough = true;
+ intermediate.setGeoPassthroughEXT();
+ return;
+ }
+#endif
+ }
+ } else {
+ assert(language == EShLangTessEvaluation);
+
+ // input primitive
+ if (id == TQualifier::getGeometryString(ElgTriangles)) {
+ publicType.shaderQualifiers.geometry = ElgTriangles;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgQuads)) {
+ publicType.shaderQualifiers.geometry = ElgQuads;
+ return;
+ }
+ if (id == TQualifier::getGeometryString(ElgIsolines)) {
+ publicType.shaderQualifiers.geometry = ElgIsolines;
+ return;
+ }
+
+ // vertex spacing
+ if (id == TQualifier::getVertexSpacingString(EvsEqual)) {
+ publicType.shaderQualifiers.spacing = EvsEqual;
+ return;
+ }
+ if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) {
+ publicType.shaderQualifiers.spacing = EvsFractionalEven;
+ return;
+ }
+ if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) {
+ publicType.shaderQualifiers.spacing = EvsFractionalOdd;
+ return;
+ }
+
+ // triangle order
+ if (id == TQualifier::getVertexOrderString(EvoCw)) {
+ publicType.shaderQualifiers.order = EvoCw;
+ return;
+ }
+ if (id == TQualifier::getVertexOrderString(EvoCcw)) {
+ publicType.shaderQualifiers.order = EvoCcw;
+ return;
+ }
+
+ // point mode
+ if (id == "point_mode") {
+ publicType.shaderQualifiers.pointMode = true;
+ return;
+ }
+ }
+ }
+ if (language == EShLangFragment) {
+ if (id == "origin_upper_left") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "origin_upper_left");
+ publicType.shaderQualifiers.originUpperLeft = true;
+ return;
+ }
+ if (id == "pixel_center_integer") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "pixel_center_integer");
+ publicType.shaderQualifiers.pixelCenterInteger = true;
+ return;
+ }
+ if (id == "early_fragment_tests") {
+ profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "early_fragment_tests");
+ profileRequires(loc, EEsProfile, 310, nullptr, "early_fragment_tests");
+ publicType.shaderQualifiers.earlyFragmentTests = true;
+ return;
+ }
+ if (id == "post_depth_coverage") {
+ requireExtensions(loc, Num_post_depth_coverageEXTs, post_depth_coverageEXTs, "post depth coverage");
+ if (extensionTurnedOn(E_GL_ARB_post_depth_coverage)) {
+ publicType.shaderQualifiers.earlyFragmentTests = true;
+ }
+ publicType.shaderQualifiers.postDepthCoverage = true;
+ return;
+ }
+ for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth+1)) {
+ if (id == TQualifier::getLayoutDepthString(depth)) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "depth layout qualifier");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "depth layout qualifier");
+ publicType.shaderQualifiers.layoutDepth = depth;
+ return;
+ }
+ }
+ if (id.compare(0, 13, "blend_support") == 0) {
+ bool found = false;
+ for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
+ if (id == TQualifier::getBlendEquationString(be)) {
+ profileRequires(loc, EEsProfile, 320, E_GL_KHR_blend_equation_advanced, "blend equation");
+ profileRequires(loc, ~EEsProfile, 0, E_GL_KHR_blend_equation_advanced, "blend equation");
+ intermediate.addBlendEquation(be);
+ publicType.shaderQualifiers.blendEquation = true;
+ found = true;
+ break;
+ }
+ }
+ if (! found)
+ error(loc, "unknown blend equation", "blend_support", "");
+ return;
+ }
+#ifdef NV_EXTENSIONS
+ if (id == "override_coverage") {
+ requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage");
+ publicType.shaderQualifiers.layoutOverrideCoverage = true;
+ return;
+ }
+ }
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry ) {
+ if (id == "viewport_relative") {
+ requireExtensions(loc, 1, &E_GL_NV_viewport_array2, "view port array2");
+ publicType.qualifier.layoutViewportRelative = true;
+ return;
+ }
+ } else {
+ if (language == EShLangRayGenNV || language == EShLangIntersectNV ||
+ language == EShLangAnyHitNV || language == EShLangClosestHitNV ||
+ language == EShLangMissNV || language == EShLangCallableNV) {
+ if (id == "shaderrecordnv") {
+ publicType.qualifier.layoutShaderRecordNV = true;
+ return;
+ }
+ }
+ }
+ if (language == EShLangCompute) {
+ if (id.compare(0, 17, "derivative_group_") == 0) {
+ requireExtensions(loc, 1, &E_GL_NV_compute_shader_derivatives, "compute shader derivatives");
+ if (id == "derivative_group_quadsnv") {
+ publicType.shaderQualifiers.layoutDerivativeGroupQuads = true;
+ return;
+ } else if (id == "derivative_group_linearnv") {
+ publicType.shaderQualifiers.layoutDerivativeGroupLinear = true;
+ return;
+ }
+ }
+ }
+#else
+ }
+#endif
+ error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), "");
+}
+
+// Put the id's layout qualifier value into the public type, for qualifiers having a number set.
+// This is before we know any type information for error checking.
+void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id, const TIntermTyped* node)
+{
+ const char* feature = "layout-id value";
+ const char* nonLiteralFeature = "non-literal layout-id value";
+
+ integerCheck(node, feature);
+ const TIntermConstantUnion* constUnion = node->getAsConstantUnion();
+ int value;
+ if (constUnion) {
+ value = constUnion->getConstArray()[0].getIConst();
+ if (! constUnion->isLiteral()) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, nonLiteralFeature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, nonLiteralFeature);
+ }
+ } else {
+ // grammar should have give out the error message
+ value = 0;
+ }
+
+ if (value < 0) {
+ error(loc, "cannot be negative", feature, "");
+ return;
+ }
+
+ std::transform(id.begin(), id.end(), id.begin(), ::tolower);
+
+ if (id == "offset") {
+ // "offset" can be for either
+ // - uniform offsets
+ // - atomic_uint offsets
+ const char* feature = "offset";
+ if (spvVersion.spv == 0) {
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ const char* exts[2] = { E_GL_ARB_enhanced_layouts, E_GL_ARB_shader_atomic_counters };
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ publicType.qualifier.layoutOffset = value;
+ return;
+ } else if (id == "align") {
+ const char* feature = "uniform buffer-member align";
+ if (spvVersion.spv == 0) {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ }
+ // "The specified alignment must be a power of 2, or a compile-time error results."
+ if (! IsPow2(value))
+ error(loc, "must be a power of 2", "align", "");
+ else
+ publicType.qualifier.layoutAlign = value;
+ return;
+ } else if (id == "location") {
+ profileRequires(loc, EEsProfile, 300, nullptr, "location");
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, "location");
+ if ((unsigned int)value >= TQualifier::layoutLocationEnd)
+ error(loc, "location is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutLocation = value;
+ return;
+ } else if (id == "set") {
+ if ((unsigned int)value >= TQualifier::layoutSetEnd)
+ error(loc, "set is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutSet = value;
+ if (value != 0)
+ requireVulkan(loc, "descriptor set");
+ return;
+ } else if (id == "binding") {
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, "binding");
+ profileRequires(loc, EEsProfile, 310, nullptr, "binding");
+ if ((unsigned int)value >= TQualifier::layoutBindingEnd)
+ error(loc, "binding is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutBinding = value;
+ return;
+ } else if (id == "component") {
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, "component");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "component");
+ if ((unsigned)value >= TQualifier::layoutComponentEnd)
+ error(loc, "component is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutComponent = value;
+ return;
+ } else if (id.compare(0, 4, "xfb_") == 0) {
+ // "Any shader making any static use (after preprocessing) of any of these
+ // *xfb_* qualifiers will cause the shader to be in a transform feedback
+ // capturing mode and hence responsible for describing the transform feedback
+ // setup."
+ intermediate.setXfbMode();
+ const char* feature = "transform feedback qualifier";
+ requireStage(loc, (EShLanguageMask)(EShLangVertexMask | EShLangGeometryMask | EShLangTessControlMask | EShLangTessEvaluationMask), feature);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ if (id == "xfb_buffer") {
+ // "It is a compile-time error to specify an *xfb_buffer* that is greater than
+ // the implementation-dependent constant gl_MaxTransformFeedbackBuffers."
+ if (value >= resources.maxTransformFeedbackBuffers)
+ error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers);
+ if (value >= (int)TQualifier::layoutXfbBufferEnd)
+ error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1);
+ else
+ publicType.qualifier.layoutXfbBuffer = value;
+ return;
+ } else if (id == "xfb_offset") {
+ if (value >= (int)TQualifier::layoutXfbOffsetEnd)
+ error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd-1);
+ else
+ publicType.qualifier.layoutXfbOffset = value;
+ return;
+ } else if (id == "xfb_stride") {
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
+ if (value > 4 * resources.maxTransformFeedbackInterleavedComponents) {
+ error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d",
+ resources.maxTransformFeedbackInterleavedComponents);
+ }
+ if (value >= (int)TQualifier::layoutXfbStrideEnd)
+ error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd-1);
+ else
+ publicType.qualifier.layoutXfbStride = value;
+ return;
+ }
+ }
+
+ if (id == "input_attachment_index") {
+ requireVulkan(loc, "input_attachment_index");
+ if (value >= (int)TQualifier::layoutAttachmentEnd)
+ error(loc, "attachment index is too large", id.c_str(), "");
+ else
+ publicType.qualifier.layoutAttachment = value;
+ return;
+ }
+ if (id == "constant_id") {
+ requireSpv(loc, "constant_id");
+ if (value >= (int)TQualifier::layoutSpecConstantIdEnd) {
+ error(loc, "specialization-constant id is too large", id.c_str(), "");
+ } else {
+ publicType.qualifier.layoutSpecConstantId = value;
+ publicType.qualifier.specConstant = true;
+ if (! intermediate.addUsedConstantId(value))
+ error(loc, "specialization-constant id already used", id.c_str(), "");
+ }
+ return;
+ }
+ if (id == "num_views") {
+ requireExtensions(loc, Num_OVR_multiview_EXTs, OVR_multiview_EXTs, "num_views");
+ publicType.shaderQualifiers.numViews = value;
+ return;
+ }
+
+#if NV_EXTENSIONS
+ if (language == EShLangVertex ||
+ language == EShLangTessControl ||
+ language == EShLangTessEvaluation ||
+ language == EShLangGeometry) {
+ if (id == "secondary_view_offset") {
+ requireExtensions(loc, 1, &E_GL_NV_stereo_view_rendering, "stereo view rendering");
+ publicType.qualifier.layoutSecondaryViewportRelativeOffset = value;
+ return;
+ }
+ }
+#endif
+
+ if (id == "buffer_reference_align") {
+ requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference_align");
+ if (! IsPow2(value))
+ error(loc, "must be a power of 2", "buffer_reference_align", "");
+ else
+ publicType.qualifier.layoutBufferReferenceAlign = (unsigned int)std::log2(value);
+ return;
+ }
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ if (id == "vertices") {
+ if (value == 0)
+ error(loc, "must be greater than 0", "vertices", "");
+ else
+ publicType.shaderQualifiers.vertices = value;
+ return;
+ }
+ break;
+
+ case EShLangTessEvaluation:
+ break;
+
+ case EShLangGeometry:
+ if (id == "invocations") {
+ profileRequires(loc, ECompatibilityProfile | ECoreProfile, 400, nullptr, "invocations");
+ if (value == 0)
+ error(loc, "must be at least 1", "invocations", "");
+ else
+ publicType.shaderQualifiers.invocations = value;
+ return;
+ }
+ if (id == "max_vertices") {
+ publicType.shaderQualifiers.vertices = value;
+ if (value > resources.maxGeometryOutputVertices)
+ error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", "");
+ return;
+ }
+ if (id == "stream") {
+ requireProfile(loc, ~EEsProfile, "selecting output stream");
+ publicType.qualifier.layoutStream = value;
+ if (value > 0)
+ intermediate.setMultiStream();
+ return;
+ }
+ break;
+
+ case EShLangFragment:
+ if (id == "index") {
+ requireProfile(loc, ECompatibilityProfile | ECoreProfile, "index layout qualifier on fragment output");
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output");
+
+ // "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1."
+ if (value < 0 || value > 1) {
+ value = 0;
+ error(loc, "value must be 0 or 1", "index", "");
+ }
+
+ publicType.qualifier.layoutIndex = value;
+ return;
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ if (id == "max_vertices") {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_vertices");
+ publicType.shaderQualifiers.vertices = value;
+ if (value > resources.maxMeshOutputVerticesNV)
+ error(loc, "too large, must be less than gl_MaxMeshOutputVerticesNV", "max_vertices", "");
+ return;
+ }
+ if (id == "max_primitives") {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_primitives");
+ publicType.shaderQualifiers.primitives = value;
+ if (value > resources.maxMeshOutputPrimitivesNV)
+ error(loc, "too large, must be less than gl_MaxMeshOutputPrimitivesNV", "max_primitives", "");
+ return;
+ }
+ // Fall through
+
+ case EShLangTaskNV:
+ // Fall through
+#endif
+ case EShLangCompute:
+ if (id.compare(0, 11, "local_size_") == 0) {
+#ifdef NV_EXTENSIONS
+ if (language == EShLangMeshNV || language == EShLangTaskNV) {
+ requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "gl_WorkGroupSize");
+ }
+ else
+#endif
+ {
+ profileRequires(loc, EEsProfile, 310, 0, "gl_WorkGroupSize");
+ profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_compute_shader, "gl_WorkGroupSize");
+ }
+ if (id.size() == 12 && value == 0) {
+ error(loc, "must be at least 1", id.c_str(), "");
+ return;
+ }
+ if (id == "local_size_x") {
+ publicType.shaderQualifiers.localSize[0] = value;
+ return;
+ }
+ if (id == "local_size_y") {
+ publicType.shaderQualifiers.localSize[1] = value;
+ return;
+ }
+ if (id == "local_size_z") {
+ publicType.shaderQualifiers.localSize[2] = value;
+ return;
+ }
+ if (spvVersion.spv != 0) {
+ if (id == "local_size_x_id") {
+ publicType.shaderQualifiers.localSizeSpecId[0] = value;
+ return;
+ }
+ if (id == "local_size_y_id") {
+ publicType.shaderQualifiers.localSizeSpecId[1] = value;
+ return;
+ }
+ if (id == "local_size_z_id") {
+ publicType.shaderQualifiers.localSizeSpecId[2] = value;
+ return;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), "");
+}
+
+// Merge any layout qualifier information from src into dst, leaving everything else in dst alone
+//
+// "More than one layout qualifier may appear in a single declaration.
+// Additionally, the same layout-qualifier-name can occur multiple times
+// within a layout qualifier or across multiple layout qualifiers in the
+// same declaration. When the same layout-qualifier-name occurs
+// multiple times, in a single declaration, the last occurrence overrides
+// the former occurrence(s). Further, if such a layout-qualifier-name
+// will effect subsequent declarations or other observable behavior, it
+// is only the last occurrence that will have any effect, behaving as if
+// the earlier occurrence(s) within the declaration are not present.
+// This is also true for overriding layout-qualifier-names, where one
+// overrides the other (e.g., row_major vs. column_major); only the last
+// occurrence has any effect."
+void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly)
+{
+ if (src.hasMatrix())
+ dst.layoutMatrix = src.layoutMatrix;
+ if (src.hasPacking())
+ dst.layoutPacking = src.layoutPacking;
+
+ if (src.hasStream())
+ dst.layoutStream = src.layoutStream;
+
+ if (src.hasFormat())
+ dst.layoutFormat = src.layoutFormat;
+
+ if (src.hasXfbBuffer())
+ dst.layoutXfbBuffer = src.layoutXfbBuffer;
+
+ if (src.hasAlign())
+ dst.layoutAlign = src.layoutAlign;
+
+ if (src.hasBufferReferenceAlign())
+ dst.layoutBufferReferenceAlign = src.layoutBufferReferenceAlign;
+
+ if (! inheritOnly) {
+ if (src.hasLocation())
+ dst.layoutLocation = src.layoutLocation;
+ if (src.hasComponent())
+ dst.layoutComponent = src.layoutComponent;
+ if (src.hasIndex())
+ dst.layoutIndex = src.layoutIndex;
+
+ if (src.hasOffset())
+ dst.layoutOffset = src.layoutOffset;
+
+ if (src.hasSet())
+ dst.layoutSet = src.layoutSet;
+ if (src.layoutBinding != TQualifier::layoutBindingEnd)
+ dst.layoutBinding = src.layoutBinding;
+
+ if (src.hasXfbStride())
+ dst.layoutXfbStride = src.layoutXfbStride;
+ if (src.hasXfbOffset())
+ dst.layoutXfbOffset = src.layoutXfbOffset;
+ if (src.hasAttachment())
+ dst.layoutAttachment = src.layoutAttachment;
+ if (src.hasSpecConstantId())
+ dst.layoutSpecConstantId = src.layoutSpecConstantId;
+
+ if (src.layoutPushConstant)
+ dst.layoutPushConstant = true;
+
+ if (src.layoutBufferReference)
+ dst.layoutBufferReference = true;
+
+#ifdef NV_EXTENSIONS
+ if (src.layoutPassthrough)
+ dst.layoutPassthrough = true;
+ if (src.layoutViewportRelative)
+ dst.layoutViewportRelative = true;
+ if (src.layoutSecondaryViewportRelativeOffset != -2048)
+ dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset;
+ if (src.layoutShaderRecordNV)
+ dst.layoutShaderRecordNV = true;
+ if (src.pervertexNV)
+ dst.pervertexNV = true;
+#endif
+ }
+}
+
+// Do error layout error checking given a full variable/block declaration.
+void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symbol)
+{
+ const TType& type = symbol.getType();
+ const TQualifier& qualifier = type.getQualifier();
+
+ // first, cross check WRT to just the type
+ layoutTypeCheck(loc, type);
+
+ // now, any remaining error checking based on the object itself
+
+ if (qualifier.hasAnyLocation()) {
+ switch (qualifier.storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ if (symbol.getAsVariable() == nullptr)
+ error(loc, "can only be used on variable declaration", "location", "");
+ break;
+ default:
+ break;
+ }
+ }
+
+ // user-variable location check, which are required for SPIR-V in/out:
+ // - variables have it directly,
+ // - blocks have it on each member (already enforced), so check first one
+ if (spvVersion.spv > 0 && !parsingBuiltins && qualifier.builtIn == EbvNone &&
+ !qualifier.hasLocation() && !intermediate.getAutoMapLocations()) {
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ if (!type.getQualifier().isTaskMemory() &&
+ (type.getBasicType() != EbtBlock ||
+ (!(*type.getStruct())[0].type->getQualifier().hasLocation() &&
+ (*type.getStruct())[0].type->getQualifier().builtIn == EbvNone)))
+ error(loc, "SPIR-V requires location for user input/output", "location", "");
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Check packing and matrix
+ if (qualifier.hasUniformLayout()) {
+ switch (qualifier.storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ if (type.getBasicType() != EbtBlock) {
+ if (qualifier.hasMatrix())
+ error(loc, "cannot specify matrix layout on a variable declaration", "layout", "");
+ if (qualifier.hasPacking())
+ error(loc, "cannot specify packing on a variable declaration", "layout", "");
+ // "The offset qualifier can only be used on block members of blocks..."
+ if (qualifier.hasOffset() && type.getBasicType() != EbtAtomicUint)
+ error(loc, "cannot specify on a variable declaration", "offset", "");
+ // "The align qualifier can only be used on blocks or block members..."
+ if (qualifier.hasAlign())
+ error(loc, "cannot specify on a variable declaration", "align", "");
+ if (qualifier.layoutPushConstant)
+ error(loc, "can only specify on a uniform block", "push_constant", "");
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ error(loc, "can only specify on a buffer block", "shaderRecordNV", "");
+#endif
+ }
+ break;
+ default:
+ // these were already filtered by layoutTypeCheck() (or its callees)
+ break;
+ }
+ }
+}
+
+// "For some blocks declared as arrays, the location can only be applied at the block level:
+// When a block is declared as an array where additional locations are needed for each member
+// for each block array element, it is a compile-time error to specify locations on the block
+// members. That is, when locations would be under specified by applying them on block members,
+// they are not allowed on block members. For arrayed interfaces (those generally having an
+// extra level of arrayness due to interface expansion), the outer array is stripped before
+// applying this rule."
+void TParseContext::layoutMemberLocationArrayCheck(const TSourceLoc& loc, bool memberWithLocation,
+ TArraySizes* arraySizes)
+{
+ if (memberWithLocation && arraySizes != nullptr) {
+ if (arraySizes->getNumDims() > (currentBlockQualifier.isArrayedIo(language) ? 1 : 0))
+ error(loc, "cannot use in a block array where new locations are needed for each block element",
+ "location", "");
+ }
+}
+
+// Do layout error checking with respect to a type.
+void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
+{
+ const TQualifier& qualifier = type.getQualifier();
+
+ // first, intra-layout qualifier-only error checking
+ layoutQualifierCheck(loc, qualifier);
+
+ // now, error checking combining type and qualifier
+
+ if (qualifier.hasAnyLocation()) {
+ if (qualifier.hasLocation()) {
+ if (qualifier.storage == EvqVaryingOut && language == EShLangFragment) {
+ if (qualifier.layoutLocation >= (unsigned int)resources.maxDrawBuffers)
+ error(loc, "too large for fragment output", "location", "");
+ }
+ }
+ if (qualifier.hasComponent()) {
+ // "It is a compile-time error if this sequence of components gets larger than 3."
+ if (qualifier.layoutComponent + type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1) > 4)
+ error(loc, "type overflows the available 4 components", "component", "");
+
+ // "It is a compile-time error to apply the component qualifier to a matrix, a structure, a block, or an array containing any of these."
+ if (type.isMatrix() || type.getBasicType() == EbtBlock || type.getBasicType() == EbtStruct)
+ error(loc, "cannot apply to a matrix, structure, or block", "component", "");
+
+ // " It is a compile-time error to use component 1 or 3 as the beginning of a double or dvec2."
+ if (type.getBasicType() == EbtDouble)
+ if (qualifier.layoutComponent & 1)
+ error(loc, "doubles cannot start on an odd-numbered component", "component", "");
+ }
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ if (type.getBasicType() == EbtBlock)
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "location qualifier on in/out block");
+#ifdef NV_EXTENSIONS
+ if (type.getQualifier().isTaskMemory())
+ error(loc, "cannot apply to taskNV in/out blocks", "location", "");
+#endif
+ break;
+ case EvqUniform:
+ case EvqBuffer:
+ if (type.getBasicType() == EbtBlock)
+ error(loc, "cannot apply to uniform or buffer block", "location", "");
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV:
+ case EvqPayloadInNV:
+ case EvqHitAttrNV:
+ case EvqCallableDataNV:
+ case EvqCallableDataInNV:
+ break;
+#endif
+ default:
+ error(loc, "can only apply to uniform, buffer, in, or out storage qualifiers", "location", "");
+ break;
+ }
+
+ bool typeCollision;
+ int repeated = intermediate.addUsedLocation(qualifier, type, typeCollision);
+ if (repeated >= 0 && ! typeCollision)
+ error(loc, "overlapping use of location", "location", "%d", repeated);
+ // "fragment-shader outputs ... if two variables are placed within the same
+ // location, they must have the same underlying type (floating-point or integer)"
+ if (typeCollision && language == EShLangFragment && qualifier.isPipeOutput())
+ error(loc, "fragment outputs sharing the same location must be the same basic type", "location", "%d", repeated);
+ }
+
+ if (qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()) {
+ int repeated = intermediate.addXfbBufferOffset(type);
+ if (repeated >= 0)
+ error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer);
+
+ // "The offset must be a multiple of the size of the first component of the first
+ // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate
+ // containing a double or 64-bit integer, the offset must also be a multiple of 8..."
+ if ((type.containsBasicType(EbtDouble) || type.containsBasicType(EbtInt64) || type.containsBasicType(EbtUint64)) &&
+ ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8))
+ error(loc, "type contains double or 64-bit integer; xfb_offset must be a multiple of 8", "xfb_offset", "");
+#ifdef AMD_EXTENSIONS
+ else if ((type.containsBasicType(EbtBool) || type.containsBasicType(EbtFloat) ||
+ type.containsBasicType(EbtInt) || type.containsBasicType(EbtUint)) &&
+ ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
+ error(loc, "must be a multiple of size of first component", "xfb_offset", "");
+ // ..., if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2..."
+ else if ((type.containsBasicType(EbtFloat16) || type.containsBasicType(EbtInt16) || type.containsBasicType(EbtUint16)) &&
+ !IsMultipleOfPow2(qualifier.layoutXfbOffset, 2))
+ error(loc, "type contains half float or 16-bit integer; xfb_offset must be a multiple of 2", "xfb_offset", "");
+#else
+ else if (! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
+ error(loc, "must be a multiple of size of first component", "xfb_offset", "");
+#endif
+ }
+
+ if (qualifier.hasXfbStride() && qualifier.hasXfbBuffer()) {
+ if (! intermediate.setXfbBufferStride(qualifier.layoutXfbBuffer, qualifier.layoutXfbStride))
+ error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
+ }
+
+ if (qualifier.hasBinding()) {
+ // Binding checking, from the spec:
+ //
+ // "If the binding point for any uniform or shader storage block instance is less than zero, or greater than or
+ // equal to the implementation-dependent maximum number of uniform buffer bindings, a compile-time
+ // error will occur. When the binding identifier is used with a uniform or shader storage block instanced as
+ // an array of size N, all elements of the array from binding through binding + N - 1 must be within this
+ // range."
+ //
+ if (! type.isOpaque() && type.getBasicType() != EbtBlock)
+ error(loc, "requires block, or sampler/image, or atomic-counter type", "binding", "");
+ if (type.getBasicType() == EbtSampler) {
+ int lastBinding = qualifier.layoutBinding;
+ if (type.isArray()) {
+ if (spvVersion.vulkan > 0)
+ lastBinding += 1;
+ else {
+ if (type.isSizedArray())
+ lastBinding += type.getCumulativeArraySize();
+ else {
+ lastBinding += 1;
+ if (spvVersion.vulkan == 0)
+ warn(loc, "assuming binding count of one for compile-time checking of binding numbers for unsized array", "[]", "");
+ }
+ }
+ }
+ if (spvVersion.vulkan == 0 && lastBinding >= resources.maxCombinedTextureImageUnits)
+ error(loc, "sampler binding not less than gl_MaxCombinedTextureImageUnits", "binding", type.isArray() ? "(using array)" : "");
+ }
+ if (type.getBasicType() == EbtAtomicUint) {
+ if (qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
+ error(loc, "atomic_uint binding is too large; see gl_MaxAtomicCounterBindings", "binding", "");
+ return;
+ }
+ }
+ } else if (!intermediate.getAutoMapBindings()) {
+ // some types require bindings
+
+ // atomic_uint
+ if (type.getBasicType() == EbtAtomicUint)
+ error(loc, "layout(binding=X) is required", "atomic_uint", "");
+
+ // SPIR-V
+ if (spvVersion.spv > 0) {
+ if (qualifier.isUniformOrBuffer()) {
+ if (type.getBasicType() == EbtBlock && !qualifier.layoutPushConstant &&
+#ifdef NV_EXTENSIONS
+ !qualifier.layoutShaderRecordNV &&
+#endif
+ !qualifier.layoutAttachment &&
+ !qualifier.layoutBufferReference)
+ error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", "");
+ else if (spvVersion.vulkan > 0 && type.getBasicType() == EbtSampler)
+ error(loc, "sampler/texture/image requires layout(binding=X)", "binding", "");
+ }
+ }
+ }
+
+ // some things can't have arrays of arrays
+ if (type.isArrayOfArrays()) {
+ if (spvVersion.vulkan > 0) {
+ if (type.isOpaque() || (type.getQualifier().isUniformOrBuffer() && type.getBasicType() == EbtBlock))
+ warn(loc, "Generating SPIR-V array-of-arrays, but Vulkan only supports single array level for this resource", "[][]", "");
+ }
+ }
+
+ // "The offset qualifier can only be used on block members of blocks..."
+ if (qualifier.hasOffset()) {
+ if (type.getBasicType() == EbtBlock)
+ error(loc, "only applies to block members, not blocks", "offset", "");
+ }
+
+ // Image format
+ if (qualifier.hasFormat()) {
+ if (! type.isImage())
+ error(loc, "only apply to images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ else {
+ if (type.getSampler().type == EbtFloat && qualifier.layoutFormat > ElfFloatGuard)
+ error(loc, "does not apply to floating point images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ if (type.getSampler().type == EbtInt && (qualifier.layoutFormat < ElfFloatGuard || qualifier.layoutFormat > ElfIntGuard))
+ error(loc, "does not apply to signed integer images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ if (type.getSampler().type == EbtUint && qualifier.layoutFormat < ElfIntGuard)
+ error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+
+ if (profile == EEsProfile) {
+ // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must
+ // specify either memory qualifier readonly or the memory qualifier writeonly."
+ if (! (qualifier.layoutFormat == ElfR32f || qualifier.layoutFormat == ElfR32i || qualifier.layoutFormat == ElfR32ui)) {
+ if (! qualifier.readonly && ! qualifier.writeonly)
+ error(loc, "format requires readonly or writeonly memory qualifier", TQualifier::getLayoutFormatString(qualifier.layoutFormat), "");
+ }
+ }
+ }
+ } else if (type.isImage() && ! qualifier.writeonly) {
+ const char *explanation = "image variables not declared 'writeonly' and without a format layout qualifier";
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, explanation);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shader_image_load_formatted, explanation);
+ }
+
+ if (qualifier.layoutPushConstant && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "push_constant", "");
+
+ if (qualifier.layoutBufferReference && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "buffer_reference", "");
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV && type.getBasicType() != EbtBlock)
+ error(loc, "can only be used with a block", "shaderRecordNV", "");
+#endif
+
+ // input attachment
+ if (type.isSubpass()) {
+ if (! qualifier.hasAttachment())
+ error(loc, "requires an input_attachment_index layout qualifier", "subpass", "");
+ } else {
+ if (qualifier.hasAttachment())
+ error(loc, "can only be used with a subpass", "input_attachment_index", "");
+ }
+
+ // specialization-constant id
+ if (qualifier.hasSpecConstantId()) {
+ if (type.getQualifier().storage != EvqConst)
+ error(loc, "can only be applied to 'const'-qualified scalar", "constant_id", "");
+ if (! type.isScalar())
+ error(loc, "can only be applied to a scalar", "constant_id", "");
+ switch (type.getBasicType())
+ {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ break;
+ default:
+ error(loc, "cannot be applied to this type", "constant_id", "");
+ break;
+ }
+ }
+}
+
+// Do layout error checking that can be done within a layout qualifier proper, not needing to know
+// if there are blocks, atomic counters, variables, etc.
+void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (qualifier.storage == EvqShared && qualifier.hasLayout())
+ error(loc, "cannot apply layout qualifiers to a shared variable", "shared", "");
+
+ // "It is a compile-time error to use *component* without also specifying the location qualifier (order does not matter)."
+ if (qualifier.hasComponent() && ! qualifier.hasLocation())
+ error(loc, "must specify 'location' to use 'component'", "component", "");
+
+ if (qualifier.hasAnyLocation()) {
+
+ // "As with input layout qualifiers, all shaders except compute shaders
+ // allow *location* layout qualifiers on output variable declarations,
+ // output block declarations, and output block member declarations."
+
+ switch (qualifier.storage) {
+ case EvqVaryingIn:
+ {
+ const char* feature = "location qualifier on input";
+ if (profile == EEsProfile && version < 310)
+ requireStage(loc, EShLangVertex, feature);
+ else
+ requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
+ if (language == EShLangVertex) {
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 300, nullptr, feature);
+ } else {
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ break;
+ }
+ case EvqVaryingOut:
+ {
+ const char* feature = "location qualifier on output";
+ if (profile == EEsProfile && version < 310)
+ requireStage(loc, EShLangFragment, feature);
+ else
+ requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature);
+ if (language == EShLangFragment) {
+ const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location };
+ profileRequires(loc, ~EEsProfile, 330, 2, exts, feature);
+ profileRequires(loc, EEsProfile, 300, nullptr, feature);
+ } else {
+ profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ }
+ break;
+ }
+ case EvqUniform:
+ case EvqBuffer:
+ {
+ const char* feature = "location qualifier on uniform or buffer";
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature);
+ profileRequires(loc, EEsProfile, 310, nullptr, feature);
+ break;
+ }
+ default:
+ break;
+ }
+ if (qualifier.hasIndex()) {
+ if (qualifier.storage != EvqVaryingOut)
+ error(loc, "can only be used on an output", "index", "");
+ if (! qualifier.hasLocation())
+ error(loc, "can only be used with an explicit location", "index", "");
+ }
+ }
+
+ if (qualifier.hasBinding()) {
+ if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory())
+ error(loc, "requires uniform or buffer storage qualifier", "binding", "");
+ }
+ if (qualifier.hasStream()) {
+ if (!qualifier.isPipeOutput())
+ error(loc, "can only be used on an output", "stream", "");
+ }
+ if (qualifier.hasXfb()) {
+ if (!qualifier.isPipeOutput())
+ error(loc, "can only be used on an output", "xfb layout qualifier", "");
+ }
+ if (qualifier.hasUniformLayout()) {
+ if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) {
+ if (qualifier.hasMatrix() || qualifier.hasPacking())
+ error(loc, "matrix or packing qualifiers can only be used on a uniform or buffer", "layout", "");
+ if (qualifier.hasOffset() || qualifier.hasAlign())
+ error(loc, "offset/align can only be used on a uniform or buffer", "layout", "");
+ }
+ }
+ if (qualifier.layoutPushConstant) {
+ if (qualifier.storage != EvqUniform)
+ error(loc, "can only be used with a uniform", "push_constant", "");
+ if (qualifier.hasSet())
+ error(loc, "cannot be used with push_constant", "set", "");
+ }
+ if (qualifier.layoutBufferReference) {
+ if (qualifier.storage != EvqBuffer)
+ error(loc, "can only be used with buffer", "buffer_reference", "");
+ }
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV) {
+ if (qualifier.storage != EvqBuffer)
+ error(loc, "can only be used with a buffer", "shaderRecordNV", "");
+ if (qualifier.hasBinding())
+ error(loc, "cannot be used with shaderRecordNV", "binding", "");
+ if (qualifier.hasSet())
+ error(loc, "cannot be used with shaderRecordNV", "set", "");
+
+ }
+ if (qualifier.storage == EvqHitAttrNV && qualifier.hasLayout()) {
+ error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", "");
+ }
+#endif
+}
+
+// For places that can't have shader-level layout qualifiers
+void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQualifiers& shaderQualifiers)
+{
+ const char* message = "can only apply to a standalone qualifier";
+
+ if (shaderQualifiers.geometry != ElgNone)
+ error(loc, message, TQualifier::getGeometryString(shaderQualifiers.geometry), "");
+ if (shaderQualifiers.spacing != EvsNone)
+ error(loc, message, TQualifier::getVertexSpacingString(shaderQualifiers.spacing), "");
+ if (shaderQualifiers.order != EvoNone)
+ error(loc, message, TQualifier::getVertexOrderString(shaderQualifiers.order), "");
+ if (shaderQualifiers.pointMode)
+ error(loc, message, "point_mode", "");
+ if (shaderQualifiers.invocations != TQualifier::layoutNotSet)
+ error(loc, message, "invocations", "");
+ if (shaderQualifiers.earlyFragmentTests)
+ error(loc, message, "early_fragment_tests", "");
+ if (shaderQualifiers.postDepthCoverage)
+ error(loc, message, "post_depth_coverage", "");
+ for (int i = 0; i < 3; ++i) {
+ if (shaderQualifiers.localSize[i] > 1)
+ error(loc, message, "local_size", "");
+ if (shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ error(loc, message, "local_size id", "");
+ }
+ if (shaderQualifiers.vertices != TQualifier::layoutNotSet) {
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ )
+ error(loc, message, "max_vertices", "");
+ else if (language == EShLangTessControl)
+ error(loc, message, "vertices", "");
+ else
+ assert(0);
+ }
+#ifdef NV_EXTENSIONS
+ if (shaderQualifiers.primitives != TQualifier::layoutNotSet) {
+ if (language == EShLangMeshNV)
+ error(loc, message, "max_primitives", "");
+ else
+ assert(0);
+ }
+#endif
+ if (shaderQualifiers.blendEquation)
+ error(loc, message, "blend equation", "");
+ if (shaderQualifiers.numViews != TQualifier::layoutNotSet)
+ error(loc, message, "num_views", "");
+}
+
+// Correct and/or advance an object's offset layout qualifier.
+void TParseContext::fixOffset(const TSourceLoc& loc, TSymbol& symbol)
+{
+ const TQualifier& qualifier = symbol.getType().getQualifier();
+ if (symbol.getType().getBasicType() == EbtAtomicUint) {
+ if (qualifier.hasBinding() && (int)qualifier.layoutBinding < resources.maxAtomicCounterBindings) {
+
+ // Set the offset
+ int offset;
+ if (qualifier.hasOffset())
+ offset = qualifier.layoutOffset;
+ else
+ offset = atomicUintOffsets[qualifier.layoutBinding];
+ symbol.getWritableType().getQualifier().layoutOffset = offset;
+
+ // Check for overlap
+ int numOffsets = 4;
+ if (symbol.getType().isArray()) {
+ if (symbol.getType().isSizedArray() && !symbol.getType().getArraySizes()->isInnerUnsized())
+ numOffsets *= symbol.getType().getCumulativeArraySize();
+ else {
+ // "It is a compile-time error to declare an unsized array of atomic_uint."
+ error(loc, "array must be explicitly sized", "atomic_uint", "");
+ }
+ }
+ int repeated = intermediate.addUsedOffsets(qualifier.layoutBinding, offset, numOffsets);
+ if (repeated >= 0)
+ error(loc, "atomic counters sharing the same offset:", "offset", "%d", repeated);
+
+ // Bump the default offset
+ atomicUintOffsets[qualifier.layoutBinding] = offset + numOffsets;
+ }
+ }
+}
+
+//
+// Look up a function name in the symbol table, and make sure it is a function.
+//
+// Return the function symbol if found, otherwise nullptr.
+//
+const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ const TFunction* function = nullptr;
+
+ if (symbolTable.isFunctionNameVariable(call.getName())) {
+ error(loc, "can't use function syntax on variable", call.getName().c_str(), "");
+ return nullptr;
+ }
+
+ bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
+ extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64);
+
+ if (profile == EEsProfile || version < 120)
+ function = findFunctionExact(loc, call, builtIn);
+ else if (version < 400)
+ function = findFunction120(loc, call, builtIn);
+ else if (explicitTypesEnabled)
+ function = findFunctionExplicitTypes(loc, call, builtIn);
+ else
+ function = findFunction400(loc, call, builtIn);
+
+ return function;
+}
+
+// Function finding algorithm for ES and desktop 110.
+const TFunction* TParseContext::findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol == nullptr) {
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+
+ return nullptr;
+ }
+
+ return symbol->getAsFunction();
+}
+
+// Function finding algorithm for desktop versions 120 through 330.
+const TFunction* TParseContext::findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // exact match not found, look through a list of overloaded functions of the same name
+
+ // "If no exact match is found, then [implicit conversions] will be applied to find a match. Mismatched types
+ // on input parameters (in or inout or default) must have a conversion from the calling argument type to the
+ // formal parameter type. Mismatched types on output parameters (out or inout) must have a conversion
+ // from the formal parameter type to the calling argument type. When argument conversions are used to find
+ // a match, it is a semantic error if there are multiple ways to apply these conversions to make the call match
+ // more than one function."
+
+ const TFunction* candidate = nullptr;
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
+ const TFunction& function = *(*it);
+
+ // to even be a potential match, number of arguments has to match
+ if (call.getParamCount() != function.getParamCount())
+ continue;
+
+ bool possibleMatch = true;
+ for (int i = 0; i < function.getParamCount(); ++i) {
+ // same types is easy
+ if (*function[i].type == *call[i].type)
+ continue;
+
+ // We have a mismatch in type, see if it is implicitly convertible
+
+ if (function[i].type->isArray() || call[i].type->isArray() ||
+ ! function[i].type->sameElementShape(*call[i].type))
+ possibleMatch = false;
+ else {
+ // do direction-specific checks for conversion of basic type
+ if (function[i].type->getQualifier().isParamInput()) {
+ if (! intermediate.canImplicitlyPromote(call[i].type->getBasicType(), function[i].type->getBasicType()))
+ possibleMatch = false;
+ }
+ if (function[i].type->getQualifier().isParamOutput()) {
+ if (! intermediate.canImplicitlyPromote(function[i].type->getBasicType(), call[i].type->getBasicType()))
+ possibleMatch = false;
+ }
+ }
+ if (! possibleMatch)
+ break;
+ }
+ if (possibleMatch) {
+ if (candidate) {
+ // our second match, meaning ambiguity
+ error(loc, "ambiguous function signature match: multiple signatures match under implicit type conversion", call.getName().c_str(), "");
+ } else
+ candidate = &function;
+ }
+ }
+
+ if (candidate == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+
+ return candidate;
+}
+
+// Function finding algorithm for desktop version 400 and above.
+//
+// "When function calls are resolved, an exact type match for all the arguments
+// is sought. If an exact match is found, all other functions are ignored, and
+// the exact match is used. If no exact match is found, then the implicit
+// conversions in section 4.1.10 Implicit Conversions will be applied to find
+// a match. Mismatched types on input parameters (in or inout or default) must
+// have a conversion from the calling argument type to the formal parameter type.
+// Mismatched types on output parameters (out or inout) must have a conversion
+// from the formal parameter type to the calling argument type.
+//
+// "If implicit conversions can be used to find more than one matching function,
+// a single best-matching function is sought. To determine a best match, the
+// conversions between calling argument and formal parameter types are compared
+// for each function argument and pair of matching functions. After these
+// comparisons are performed, each pair of matching functions are compared.
+// A function declaration A is considered a better match than function
+// declaration B if
+//
+// * for at least one function argument, the conversion for that argument in A
+// is better than the corresponding conversion in B; and
+// * there is no function argument for which the conversion in B is better than
+// the corresponding conversion in A.
+//
+// "If a single function declaration is considered a better match than every
+// other matching function declaration, it will be used. Otherwise, a
+// compile-time semantic error for an ambiguous overloaded function call occurs.
+//
+// "To determine whether the conversion for a single argument in one match is
+// better than that for another match, the following rules are applied, in order:
+//
+// 1. An exact match is better than a match involving any implicit conversion.
+// 2. A match involving an implicit conversion from float to double is better
+// than a match involving any other implicit conversion.
+// 3. A match involving an implicit conversion from either int or uint to float
+// is better than a match involving an implicit conversion from either int
+// or uint to double.
+//
+// "If none of the rules above apply to a particular pair of conversions, neither
+// conversion is considered better than the other."
+//
+const TFunction* TParseContext::findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // no exact match, use the generic selector, parameterized by the GLSL rules
+
+ // create list of candidates to send
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ // can 'from' convert to 'to'?
+ const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
+ if (from == to)
+ return true;
+ if (from.coopMatParameterOK(to))
+ return true;
+ // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
+ if (builtIn && from.isArray() && to.isUnsizedArray()) {
+ TType fromElementType(from, 0);
+ TType toElementType(to, 0);
+ if (fromElementType == toElementType)
+ return true;
+ }
+ if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
+ return false;
+ return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
+ };
+
+ // Is 'to2' a better conversion than 'to1'?
+ // Ties should not be considered as better.
+ // Assumes 'convertible' already said true.
+ const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool {
+ // 1. exact match
+ if (from == to2)
+ return from != to1;
+ if (from == to1)
+ return false;
+
+ // 2. float -> double is better
+ if (from.getBasicType() == EbtFloat) {
+ if (to2.getBasicType() == EbtDouble && to1.getBasicType() != EbtDouble)
+ return true;
+ }
+
+ // 3. -> float is better than -> double
+ return to2.getBasicType() == EbtFloat && to1.getBasicType() == EbtDouble;
+ };
+
+ // for ambiguity reporting
+ bool tie = false;
+
+ // send to the generic selector
+ const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
+
+ if (bestMatch == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+ else if (tie)
+ error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
+
+ return bestMatch;
+}
+
+// "To determine whether the conversion for a single argument in one match
+// is better than that for another match, the conversion is assigned of the
+// three ranks ordered from best to worst:
+// 1. Exact match: no conversion.
+// 2. Promotion: integral or floating-point promotion.
+// 3. Conversion: integral conversion, floating-point conversion,
+// floating-integral conversion.
+// A conversion C1 is better than a conversion C2 if the rank of C1 is
+// better than the rank of C2."
+const TFunction* TParseContext::findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn)
+{
+ // first, look for an exact match
+ TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn);
+ if (symbol)
+ return symbol->getAsFunction();
+
+ // no exact match, use the generic selector, parameterized by the GLSL rules
+
+ // create list of candidates to send
+ TVector<const TFunction*> candidateList;
+ symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn);
+
+ // can 'from' convert to 'to'?
+ const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool {
+ if (from == to)
+ return true;
+ if (from.coopMatParameterOK(to))
+ return true;
+ // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions
+ if (builtIn && from.isArray() && to.isUnsizedArray()) {
+ TType fromElementType(from, 0);
+ TType toElementType(to, 0);
+ if (fromElementType == toElementType)
+ return true;
+ }
+ if (from.isArray() || to.isArray() || ! from.sameElementShape(to))
+ return false;
+ return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType());
+ };
+
+ // Is 'to2' a better conversion than 'to1'?
+ // Ties should not be considered as better.
+ // Assumes 'convertible' already said true.
+ const auto better = [this](const TType& from, const TType& to1, const TType& to2) -> bool {
+ // 1. exact match
+ if (from == to2)
+ return from != to1;
+ if (from == to1)
+ return false;
+
+ // 2. Promotion (integral, floating-point) is better
+ TBasicType from_type = from.getBasicType();
+ TBasicType to1_type = to1.getBasicType();
+ TBasicType to2_type = to2.getBasicType();
+ bool isPromotion1 = (intermediate.isIntegralPromotion(from_type, to1_type) ||
+ intermediate.isFPPromotion(from_type, to1_type));
+ bool isPromotion2 = (intermediate.isIntegralPromotion(from_type, to2_type) ||
+ intermediate.isFPPromotion(from_type, to2_type));
+ if (isPromotion2)
+ return !isPromotion1;
+ if(isPromotion1)
+ return false;
+
+ // 3. Conversion (integral, floating-point , floating-integral)
+ bool isConversion1 = (intermediate.isIntegralConversion(from_type, to1_type) ||
+ intermediate.isFPConversion(from_type, to1_type) ||
+ intermediate.isFPIntegralConversion(from_type, to1_type));
+ bool isConversion2 = (intermediate.isIntegralConversion(from_type, to2_type) ||
+ intermediate.isFPConversion(from_type, to2_type) ||
+ intermediate.isFPIntegralConversion(from_type, to2_type));
+
+ return isConversion2 && !isConversion1;
+ };
+
+ // for ambiguity reporting
+ bool tie = false;
+
+ // send to the generic selector
+ const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie);
+
+ if (bestMatch == nullptr)
+ error(loc, "no matching overloaded function found", call.getName().c_str(), "");
+ else if (tie)
+ error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), "");
+
+ return bestMatch;
+}
+
+// When a declaration includes a type, but not a variable name, it can be
+// to establish defaults.
+void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ if (publicType.basicType == EbtAtomicUint && publicType.qualifier.hasBinding() && publicType.qualifier.hasOffset()) {
+ if (publicType.qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) {
+ error(loc, "atomic_uint binding is too large", "binding", "");
+ return;
+ }
+ atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset;
+ return;
+ }
+
+ if (publicType.qualifier.hasLayout() && !publicType.qualifier.layoutBufferReference)
+ warn(loc, "useless application of layout qualifier", "layout", "");
+}
+
+//
+// Do everything necessary to handle a variable (non-block) declaration.
+// Either redeclaring a variable, or making a new one, updating the symbol
+// table, and all error checking.
+//
+// Returns a subtree node that computes an initializer, if needed.
+// Returns nullptr if there is no code to execute for initialization.
+//
+// 'publicType' is the type part of the declaration (to the left)
+// 'arraySizes' is the arrayness tagged on the identifier (to the right)
+//
+TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType,
+ TArraySizes* arraySizes, TIntermTyped* initializer)
+{
+ // Make a fresh type that combines the characteristics from the individual
+ // identifier syntax and the declaration-type syntax.
+ TType type(publicType);
+ type.transferArraySizes(arraySizes);
+ type.copyArrayInnerSizes(publicType.arraySizes);
+ arrayOfArrayVersionCheck(loc, type.getArraySizes());
+
+ if (type.isCoopMat()) {
+ intermediate.setUseVulkanMemoryModel();
+ intermediate.setUseStorageBuffer();
+
+ if (!publicType.typeParameters || publicType.typeParameters->getNumDims() != 4) {
+ error(loc, "expected four type parameters", identifier.c_str(), "");
+ }
+ if (publicType.typeParameters &&
+ publicType.typeParameters->getDimSize(0) != 16 &&
+ publicType.typeParameters->getDimSize(0) != 32 &&
+ publicType.typeParameters->getDimSize(0) != 64) {
+ error(loc, "expected 16, 32, or 64 bits for first type parameter", identifier.c_str(), "");
+ }
+ } else {
+ if (publicType.typeParameters && publicType.typeParameters->getNumDims() != 0) {
+ error(loc, "unexpected type parameters", identifier.c_str(), "");
+ }
+ }
+
+ if (voidErrorCheck(loc, identifier, type.getBasicType()))
+ return nullptr;
+
+ if (initializer)
+ rValueErrorCheck(loc, "initializer", initializer);
+ else
+ nonInitConstCheck(loc, identifier, type);
+
+ samplerCheck(loc, type, identifier, initializer);
+ atomicUintCheck(loc, type, identifier);
+ transparentOpaqueCheck(loc, type, identifier);
+#ifdef NV_EXTENSIONS
+ accStructNVCheck(loc, type, identifier);
+#endif
+ if (type.getQualifier().storage == EvqConst && type.containsBasicType(EbtReference)) {
+ error(loc, "variables with reference type can't have qualifier 'const'", "qualifier", "");
+ }
+
+ if (type.getQualifier().storage != EvqUniform && type.getQualifier().storage != EvqBuffer) {
+ if (type.containsBasicType(EbtFloat16))
+ requireFloat16Arithmetic(loc, "qualifier", "float16 types can only be in uniform block or buffer storage");
+ if (type.contains16BitInt())
+ requireInt16Arithmetic(loc, "qualifier", "(u)int16 types can only be in uniform block or buffer storage");
+ if (type.contains8BitInt())
+ requireInt8Arithmetic(loc, "qualifier", "(u)int8 types can only be in uniform block or buffer storage");
+ }
+
+ if (type.getQualifier().storage == EvqShared &&
+ type.containsCoopMat())
+ error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", "");
+
+ if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger))
+ error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", "");
+ if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.layoutDepth != EldNone)
+ error(loc, "can only apply depth layout to gl_FragDepth", "layout qualifier", "");
+
+ // Check for redeclaration of built-ins and/or attempting to declare a reserved name
+ TSymbol* symbol = redeclareBuiltinVariable(loc, identifier, type.getQualifier(), publicType.shaderQualifiers);
+ if (symbol == nullptr)
+ reservedErrorCheck(loc, identifier);
+
+ inheritGlobalDefaults(type.getQualifier());
+
+ // Declare the variable
+ if (type.isArray()) {
+ // Check that implicit sizing is only where allowed.
+ arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false);
+
+ if (! arrayQualifierError(loc, type.getQualifier()) && ! arrayError(loc, type))
+ declareArray(loc, identifier, type, symbol);
+
+ if (initializer) {
+ profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "initializer");
+ profileRequires(loc, EEsProfile, 300, nullptr, "initializer");
+ }
+ } else {
+ // non-array case
+ if (symbol == nullptr)
+ symbol = declareNonArray(loc, identifier, type);
+ else if (type != symbol->getType())
+ error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str());
+ }
+
+ if (symbol == nullptr)
+ return nullptr;
+
+ // Deal with initializer
+ TIntermNode* initNode = nullptr;
+ if (symbol != nullptr && initializer) {
+ TVariable* variable = symbol->getAsVariable();
+ if (! variable) {
+ error(loc, "initializer requires a variable, not a member", identifier.c_str(), "");
+ return nullptr;
+ }
+ initNode = executeInitializer(loc, initializer, variable);
+ }
+
+ // look for errors in layout qualifier use
+ layoutObjectCheck(loc, *symbol);
+
+ // fix up
+ fixOffset(loc, *symbol);
+
+ return initNode;
+}
+
+// Pick up global defaults from the provide global defaults into dst.
+void TParseContext::inheritGlobalDefaults(TQualifier& dst) const
+{
+ if (dst.storage == EvqVaryingOut) {
+ if (! dst.hasStream() && language == EShLangGeometry)
+ dst.layoutStream = globalOutputDefaults.layoutStream;
+ if (! dst.hasXfbBuffer())
+ dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ }
+}
+
+//
+// Make an internal-only variable whose name is for debug purposes only
+// and won't be searched for. Callers will only use the return value to use
+// the variable, not the name to look it up. It is okay if the name
+// is the same as other names; there won't be any conflict.
+//
+TVariable* TParseContext::makeInternalVariable(const char* name, const TType& type) const
+{
+ TString* nameString = NewPoolTString(name);
+ TVariable* variable = new TVariable(nameString, type);
+ symbolTable.makeInternalVariable(*variable);
+
+ return variable;
+}
+
+//
+// Declare a non-array variable, the main point being there is no redeclaration
+// for resizing allowed.
+//
+// Return the successfully declared variable.
+//
+TVariable* TParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type)
+{
+ // make a new variable
+ TVariable* variable = new TVariable(&identifier, type);
+
+ ioArrayCheck(loc, type, identifier);
+
+ // add variable to symbol table
+ if (symbolTable.insert(*variable)) {
+ if (symbolTable.atGlobalLevel())
+ trackLinkage(*variable);
+ return variable;
+ }
+
+ error(loc, "redefinition", variable->getName().c_str(), "");
+ return nullptr;
+}
+
+//
+// Handle all types of initializers from the grammar.
+//
+// Returning nullptr just means there is no code to execute to handle the
+// initializer, which will, for example, be the case for constant initializers.
+//
+TIntermNode* TParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable)
+{
+ //
+ // Identifier must be of type constant, a global, or a temporary, and
+ // starting at version 120, desktop allows uniforms to have initializers.
+ //
+ TStorageQualifier qualifier = variable->getType().getQualifier().storage;
+ if (! (qualifier == EvqTemporary || qualifier == EvqGlobal || qualifier == EvqConst ||
+ (qualifier == EvqUniform && profile != EEsProfile && version >= 120))) {
+ error(loc, " cannot initialize this type of qualifier ", variable->getType().getStorageQualifierString(), "");
+ return nullptr;
+ }
+ arrayObjectCheck(loc, variable->getType(), "array initializer");
+
+ //
+ // If the initializer was from braces { ... }, we convert the whole subtree to a
+ // constructor-style subtree, allowing the rest of the code to operate
+ // identically for both kinds of initializers.
+ //
+ // Type can't be deduced from the initializer list, so a skeletal type to
+ // follow has to be passed in. Constness and specialization-constness
+ // should be deduced bottom up, not dictated by the skeletal type.
+ //
+ TType skeletalType;
+ skeletalType.shallowCopy(variable->getType());
+ skeletalType.getQualifier().makeTemporary();
+ initializer = convertInitializerList(loc, skeletalType, initializer);
+ if (! initializer) {
+ // error recovery; don't leave const without constant values
+ if (qualifier == EvqConst)
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // Fix outer arrayness if variable is unsized, getting size from the initializer
+ if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray())
+ variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize());
+
+ // Inner arrayness can also get set by an initializer
+ if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() &&
+ initializer->getType().getArraySizes()->getNumDims() ==
+ variable->getType().getArraySizes()->getNumDims()) {
+ // adopt unsized sizes from the initializer's sizes
+ for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) {
+ if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) {
+ variable->getWritableType().getArraySizes()->setDimSize(d,
+ initializer->getType().getArraySizes()->getDimSize(d));
+ }
+ }
+ }
+
+ // Uniforms require a compile-time constant initializer
+ if (qualifier == EvqUniform && ! initializer->getType().getQualifier().isFrontEndConstant()) {
+ error(loc, "uniform initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str());
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+ // Global consts require a constant initializer (specialization constant is okay)
+ if (qualifier == EvqConst && symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
+ error(loc, "global const initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str());
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // Const variables require a constant initializer, depending on version
+ if (qualifier == EvqConst) {
+ if (! initializer->getType().getQualifier().isConstant()) {
+ const char* initFeature = "non-constant initializer";
+ requireProfile(loc, ~EEsProfile, initFeature);
+ profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ variable->getWritableType().getQualifier().storage = EvqConstReadOnly;
+ qualifier = EvqConstReadOnly;
+ }
+ } else {
+ // Non-const global variables in ES need a const initializer.
+ //
+ // "In declarations of global variables with no storage qualifier or with a const
+ // qualifier any initializer must be a constant expression."
+ if (symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) {
+ const char* initFeature = "non-constant global initializer (needs GL_EXT_shader_non_constant_global_initializers)";
+ if (profile == EEsProfile) {
+ if (relaxedErrors() && ! extensionTurnedOn(E_GL_EXT_shader_non_constant_global_initializers))
+ warn(loc, "not allowed in this version", initFeature, "");
+ else
+ profileRequires(loc, EEsProfile, 0, E_GL_EXT_shader_non_constant_global_initializers, initFeature);
+ }
+ }
+ }
+
+ if (qualifier == EvqConst || qualifier == EvqUniform) {
+ // Compile-time tagging of the variable with its constant value...
+
+ initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer);
+ if (! initializer || ! initializer->getType().getQualifier().isConstant() || variable->getType() != initializer->getType()) {
+ error(loc, "non-matching or non-convertible constant type for const initializer",
+ variable->getType().getStorageQualifierString(), "");
+ variable->getWritableType().getQualifier().makeTemporary();
+ return nullptr;
+ }
+
+ // We either have a folded constant in getAsConstantUnion, or we have to use
+ // the initializer's subtree in the AST to represent the computation of a
+ // specialization constant.
+ assert(initializer->getAsConstantUnion() || initializer->getType().getQualifier().isSpecConstant());
+ if (initializer->getAsConstantUnion())
+ variable->setConstArray(initializer->getAsConstantUnion()->getConstArray());
+ else {
+ // It's a specialization constant.
+ variable->getWritableType().getQualifier().makeSpecConstant();
+
+ // Keep the subtree that computes the specialization constant with the variable.
+ // Later, a symbol node will adopt the subtree from the variable.
+ variable->setConstSubtree(initializer);
+ }
+ } else {
+ // normal assigning of a value to a variable...
+ specializationCheck(loc, initializer->getType(), "initializer");
+ TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc);
+ TIntermTyped* initNode = intermediate.addAssign(EOpAssign, intermSymbol, initializer, loc);
+ if (! initNode)
+ assignError(loc, "=", intermSymbol->getCompleteString(), initializer->getCompleteString());
+
+ return initNode;
+ }
+
+ return nullptr;
+}
+
+//
+// Reprocess any initializer-list (the "{ ... }" syntax) parts of the
+// initializer.
+//
+// Need to hierarchically assign correct types and implicit
+// conversions. Will do this mimicking the same process used for
+// creating a constructor-style initializer, ensuring we get the
+// same form. However, it has to in parallel walk the 'type'
+// passed in, as type cannot be deduced from an initializer list.
+//
+TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type, TIntermTyped* initializer)
+{
+ // Will operate recursively. Once a subtree is found that is constructor style,
+ // everything below it is already good: Only the "top part" of the initializer
+ // can be an initializer list, where "top part" can extend for several (or all) levels.
+
+ // see if we have bottomed out in the tree within the initializer-list part
+ TIntermAggregate* initList = initializer->getAsAggregate();
+ if (! initList || initList->getOp() != EOpNull)
+ return initializer;
+
+ // Of the initializer-list set of nodes, need to process bottom up,
+ // so recurse deep, then process on the way up.
+
+ // Go down the tree here...
+ if (type.isArray()) {
+ // The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate.
+ // Later on, initializer execution code will deal with array size logic.
+ TType arrayType;
+ arrayType.shallowCopy(type); // sharing struct stuff is fine
+ arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below
+
+ // edit array sizes to fill in unsized dimensions
+ arrayType.changeOuterArraySize((int)initList->getSequence().size());
+ TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped();
+ if (arrayType.isArrayOfArrays() && firstInit->getType().isArray() &&
+ arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) {
+ for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) {
+ if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize)
+ arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1));
+ }
+ }
+
+ TType elementType(arrayType, 0); // dereferenced type
+ for (size_t i = 0; i < initList->getSequence().size(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, elementType, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+
+ return addConstructor(loc, initList, arrayType);
+ } else if (type.isStruct()) {
+ if (type.getStruct()->size() != initList->getSequence().size()) {
+ error(loc, "wrong number of structure members", "initializer list", "");
+ return nullptr;
+ }
+ for (size_t i = 0; i < type.getStruct()->size(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+ } else if (type.isMatrix()) {
+ if (type.getMatrixCols() != (int)initList->getSequence().size()) {
+ error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+ TType vectorType(type, 0); // dereferenced type
+ for (int i = 0; i < type.getMatrixCols(); ++i) {
+ initList->getSequence()[i] = convertInitializerList(loc, vectorType, initList->getSequence()[i]->getAsTyped());
+ if (initList->getSequence()[i] == nullptr)
+ return nullptr;
+ }
+ } else if (type.isVector()) {
+ if (type.getVectorSize() != (int)initList->getSequence().size()) {
+ error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+ } else {
+ error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str());
+ return nullptr;
+ }
+
+ // Now that the subtree is processed, process this node as if the
+ // initializer list is a set of arguments to a constructor.
+ TIntermNode* emulatedConstructorArguments;
+ if (initList->getSequence().size() == 1)
+ emulatedConstructorArguments = initList->getSequence()[0];
+ else
+ emulatedConstructorArguments = initList;
+ return addConstructor(loc, emulatedConstructorArguments, type);
+}
+
+//
+// Test for the correctness of the parameters passed to various constructor functions
+// and also convert them to the right data type, if allowed and required.
+//
+// 'node' is what to construct from.
+// 'type' is what type to construct.
+//
+// Returns nullptr for an error or the constructed node (aggregate or typed) for no error.
+//
+TIntermTyped* TParseContext::addConstructor(const TSourceLoc& loc, TIntermNode* node, const TType& type)
+{
+ if (node == nullptr || node->getAsTyped() == nullptr)
+ return nullptr;
+ rValueErrorCheck(loc, "constructor", node->getAsTyped());
+
+ TIntermAggregate* aggrNode = node->getAsAggregate();
+ TOperator op = intermediate.mapTypeToConstructorOp(type);
+
+ // Combined texture-sampler constructors are completely semantic checked
+ // in constructorTextureSamplerError()
+ if (op == EOpConstructTextureSampler) {
+ if (aggrNode->getSequence()[1]->getAsTyped()->getType().getSampler().shadow) {
+ // Transfer depth into the texture (SPIR-V image) type, as a hint
+ // for tools to know this texture/image is a depth image.
+ aggrNode->getSequence()[0]->getAsTyped()->getWritableType().getSampler().shadow = true;
+ }
+ return intermediate.setAggregateOperator(aggrNode, op, type, loc);
+ }
+
+ TTypeList::const_iterator memberTypes;
+ if (op == EOpConstructStruct)
+ memberTypes = type.getStruct()->begin();
+
+ TType elementType;
+ if (type.isArray()) {
+ TType dereferenced(type, 0);
+ elementType.shallowCopy(dereferenced);
+ } else
+ elementType.shallowCopy(type);
+
+ bool singleArg;
+ if (aggrNode) {
+ if (aggrNode->getOp() != EOpNull)
+ singleArg = true;
+ else
+ singleArg = false;
+ } else
+ singleArg = true;
+
+ TIntermTyped *newNode;
+ if (singleArg) {
+ // If structure constructor or array constructor is being called
+ // for only one parameter inside the structure, we need to call constructAggregate function once.
+ if (type.isArray())
+ newNode = constructAggregate(node, elementType, 1, node->getLoc());
+ else if (op == EOpConstructStruct)
+ newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc());
+ else
+ newNode = constructBuiltIn(type, op, node->getAsTyped(), node->getLoc(), false);
+
+ if (newNode && (type.isArray() || op == EOpConstructStruct))
+ newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc);
+
+ return newNode;
+ }
+
+ //
+ // Handle list of arguments.
+ //
+ TIntermSequence &sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor
+ // if the structure constructor contains more than one parameter, then construct
+ // each parameter
+
+ int paramCount = 0; // keeps track of the constructor parameter number being checked
+
+ // for each parameter to the constructor call, check to see if the right type is passed or convert them
+ // to the right type if possible (and allowed).
+ // for structure constructors, just check if the right type is passed, no conversion is allowed.
+ for (TIntermSequence::iterator p = sequenceVector.begin();
+ p != sequenceVector.end(); p++, paramCount++) {
+ if (type.isArray())
+ newNode = constructAggregate(*p, elementType, paramCount+1, node->getLoc());
+ else if (op == EOpConstructStruct)
+ newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount+1, node->getLoc());
+ else
+ newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true);
+
+ if (newNode)
+ *p = newNode;
+ else
+ return nullptr;
+ }
+
+ return intermediate.setAggregateOperator(aggrNode, op, type, loc);
+}
+
+// Function for constructor implementation. Calls addUnaryMath with appropriate EOp value
+// for the parameter to the constructor (passed to this function). Essentially, it converts
+// the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a
+// float, then float is converted to int.
+//
+// Returns nullptr for an error or the constructed node.
+//
+TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node, const TSourceLoc& loc,
+ bool subset)
+{
+ // If we are changing a matrix in both domain of basic type and to a non matrix,
+ // do the shape change first (by default, below, basic type is changed before shape).
+ // This avoids requesting a matrix of a new type that is going to be discarded anyway.
+ // TODO: This could be generalized to more type combinations, but that would require
+ // more extensive testing and full algorithm rework. For now, the need to do two changes makes
+ // the recursive call work, and avoids the most aggregious case of creating integer matrices.
+ if (node->getType().isMatrix() && (type.isScalar() || type.isVector()) &&
+ type.isFloatingDomain() != node->getType().isFloatingDomain()) {
+ TType transitionType(node->getBasicType(), glslang::EvqTemporary, type.getVectorSize(), 0, 0, node->isVector());
+ TOperator transitionOp = intermediate.mapTypeToConstructorOp(transitionType);
+ node = constructBuiltIn(transitionType, transitionOp, node, loc, false);
+ }
+
+ TIntermTyped* newNode;
+ TOperator basicOp;
+
+ //
+ // First, convert types as needed.
+ //
+ switch (op) {
+ case EOpConstructVec2:
+ case EOpConstructVec3:
+ case EOpConstructVec4:
+ case EOpConstructMat2x2:
+ case EOpConstructMat2x3:
+ case EOpConstructMat2x4:
+ case EOpConstructMat3x2:
+ case EOpConstructMat3x3:
+ case EOpConstructMat3x4:
+ case EOpConstructMat4x2:
+ case EOpConstructMat4x3:
+ case EOpConstructMat4x4:
+ case EOpConstructFloat:
+ basicOp = EOpConstructFloat;
+ break;
+
+ case EOpConstructDVec2:
+ case EOpConstructDVec3:
+ case EOpConstructDVec4:
+ case EOpConstructDMat2x2:
+ case EOpConstructDMat2x3:
+ case EOpConstructDMat2x4:
+ case EOpConstructDMat3x2:
+ case EOpConstructDMat3x3:
+ case EOpConstructDMat3x4:
+ case EOpConstructDMat4x2:
+ case EOpConstructDMat4x3:
+ case EOpConstructDMat4x4:
+ case EOpConstructDouble:
+ basicOp = EOpConstructDouble;
+ break;
+
+ case EOpConstructF16Vec2:
+ case EOpConstructF16Vec3:
+ case EOpConstructF16Vec4:
+ case EOpConstructF16Mat2x2:
+ case EOpConstructF16Mat2x3:
+ case EOpConstructF16Mat2x4:
+ case EOpConstructF16Mat3x2:
+ case EOpConstructF16Mat3x3:
+ case EOpConstructF16Mat3x4:
+ case EOpConstructF16Mat4x2:
+ case EOpConstructF16Mat4x3:
+ case EOpConstructF16Mat4x4:
+ case EOpConstructFloat16:
+ basicOp = EOpConstructFloat16;
+ break;
+
+ case EOpConstructI8Vec2:
+ case EOpConstructI8Vec3:
+ case EOpConstructI8Vec4:
+ case EOpConstructInt8:
+ basicOp = EOpConstructInt8;
+ break;
+
+ case EOpConstructU8Vec2:
+ case EOpConstructU8Vec3:
+ case EOpConstructU8Vec4:
+ case EOpConstructUint8:
+ basicOp = EOpConstructUint8;
+ break;
+
+ case EOpConstructI16Vec2:
+ case EOpConstructI16Vec3:
+ case EOpConstructI16Vec4:
+ case EOpConstructInt16:
+ basicOp = EOpConstructInt16;
+ break;
+
+ case EOpConstructU16Vec2:
+ case EOpConstructU16Vec3:
+ case EOpConstructU16Vec4:
+ case EOpConstructUint16:
+ basicOp = EOpConstructUint16;
+ break;
+
+ case EOpConstructIVec2:
+ case EOpConstructIVec3:
+ case EOpConstructIVec4:
+ case EOpConstructInt:
+ basicOp = EOpConstructInt;
+ break;
+
+ case EOpConstructUVec2:
+ case EOpConstructUVec3:
+ case EOpConstructUVec4:
+ case EOpConstructUint:
+ basicOp = EOpConstructUint;
+ break;
+
+ case EOpConstructI64Vec2:
+ case EOpConstructI64Vec3:
+ case EOpConstructI64Vec4:
+ case EOpConstructInt64:
+ basicOp = EOpConstructInt64;
+ break;
+
+ case EOpConstructUint64:
+ if (type.isScalar() && node->getType().getBasicType() == EbtReference) {
+ TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUint64, true, node, type);
+ return newNode;
+ }
+ // fall through
+ case EOpConstructU64Vec2:
+ case EOpConstructU64Vec3:
+ case EOpConstructU64Vec4:
+ basicOp = EOpConstructUint64;
+ break;
+
+ case EOpConstructBVec2:
+ case EOpConstructBVec3:
+ case EOpConstructBVec4:
+ case EOpConstructBool:
+ basicOp = EOpConstructBool;
+ break;
+
+ case EOpConstructNonuniform:
+ node->getWritableType().getQualifier().nonUniform = true;
+ return node;
+ break;
+
+ case EOpConstructReference:
+ // construct reference from reference
+ if (node->getType().getBasicType() == EbtReference) {
+ newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructReference, true, node, type);
+ return newNode;
+ // construct reference from uint64
+ } else if (node->getType().isScalar() && node->getType().getBasicType() == EbtUint64) {
+ TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToPtr, true, node, type);
+ return newNode;
+ } else {
+ return nullptr;
+ }
+
+ case EOpConstructCooperativeMatrix:
+ if (!node->getType().isCoopMat()) {
+ if (type.getBasicType() != node->getType().getBasicType()) {
+ node = intermediate.addConversion(type.getBasicType(), node);
+ }
+ node = intermediate.setAggregateOperator(node, EOpConstructCooperativeMatrix, type, node->getLoc());
+ } else {
+ switch (type.getBasicType()) {
+ default:
+ assert(0);
+ break;
+ case EbtFloat:
+ assert(node->getType().getBasicType() == EbtFloat16);
+ node = intermediate.addUnaryNode(EOpConvFloat16ToFloat, node, node->getLoc(), type);
+ break;
+ case EbtFloat16:
+ assert(node->getType().getBasicType() == EbtFloat);
+ node = intermediate.addUnaryNode(EOpConvFloatToFloat16, node, node->getLoc(), type);
+ break;
+ }
+ // If it's a (non-specialization) constant, it must be folded.
+ if (node->getAsUnaryNode()->getOperand()->getAsConstantUnion())
+ return node->getAsUnaryNode()->getOperand()->getAsConstantUnion()->fold(op, node->getType());
+ }
+
+ return node;
+
+ default:
+ error(loc, "unsupported construction", "", "");
+
+ return nullptr;
+ }
+ newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc());
+ if (newNode == nullptr) {
+ error(loc, "can't convert", "constructor", "");
+ return nullptr;
+ }
+
+ //
+ // Now, if there still isn't an operation to do the construction, and we need one, add one.
+ //
+
+ // Otherwise, skip out early.
+ if (subset || (newNode != node && newNode->getType() == type))
+ return newNode;
+
+ // setAggregateOperator will insert a new node for the constructor, as needed.
+ return intermediate.setAggregateOperator(newNode, op, type, loc);
+}
+
+// This function tests for the type of the parameters to the structure or array constructor. Raises
+// an error message if the expected type does not match the parameter passed to the constructor.
+//
+// Returns nullptr for an error or the input node itself if the expected and the given parameter types match.
+//
+TIntermTyped* TParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount, const TSourceLoc& loc)
+{
+ TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped());
+ if (! converted || converted->getType() != type) {
+ error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount,
+ node->getAsTyped()->getType().getCompleteString().c_str(), type.getCompleteString().c_str());
+
+ return nullptr;
+ }
+
+ return converted;
+}
+
+//
+// Do everything needed to add an interface block.
+//
+void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, const TString* instanceName,
+ TArraySizes* arraySizes)
+{
+ blockStageIoCheck(loc, currentBlockQualifier);
+ blockQualifierCheck(loc, currentBlockQualifier, instanceName != nullptr);
+ if (arraySizes != nullptr) {
+ arraySizesCheck(loc, currentBlockQualifier, arraySizes, nullptr, false);
+ arrayOfArrayVersionCheck(loc, arraySizes);
+ if (arraySizes->getNumDims() > 1)
+ requireProfile(loc, ~EEsProfile, "array-of-array of block");
+ }
+
+ // fix and check for member storage qualifiers and types that don't belong within a block
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TType& memberType = *typeList[member].type;
+ TQualifier& memberQualifier = memberType.getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ globalQualifierFixCheck(memberLoc, memberQualifier);
+ if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage)
+ error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), "");
+ memberQualifier.storage = currentBlockQualifier.storage;
+#ifdef NV_EXTENSIONS
+ if (currentBlockQualifier.perPrimitiveNV)
+ memberQualifier.perPrimitiveNV = currentBlockQualifier.perPrimitiveNV;
+ if (currentBlockQualifier.perViewNV)
+ memberQualifier.perViewNV = currentBlockQualifier.perViewNV;
+ if (currentBlockQualifier.perTaskNV)
+ memberQualifier.perTaskNV = currentBlockQualifier.perTaskNV;
+#endif
+ if ((currentBlockQualifier.storage == EvqUniform || currentBlockQualifier.storage == EvqBuffer) && (memberQualifier.isInterpolation() || memberQualifier.isAuxiliary()))
+ error(memberLoc, "member of uniform or buffer block cannot have an auxiliary or interpolation qualifier", memberType.getFieldName().c_str(), "");
+ if (memberType.isArray())
+ arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1);
+ if (memberQualifier.hasOffset()) {
+ if (spvVersion.spv == 0) {
+ requireProfile(memberLoc, ~EEsProfile, "offset on block member");
+ profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "offset on block member");
+ }
+ }
+
+ if (memberType.containsOpaque())
+ error(memberLoc, "member of block cannot be or contain a sampler, image, or atomic_uint type", typeList[member].type->getFieldName().c_str(), "");
+
+ if (memberType.containsCoopMat())
+ error(memberLoc, "member of block cannot be or contain a cooperative matrix type", typeList[member].type->getFieldName().c_str(), "");
+ }
+
+ // This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will
+ // do all the rest.
+ if (! symbolTable.atBuiltInLevel() && builtInName(*blockName)) {
+ redeclareBuiltinBlock(loc, typeList, *blockName, instanceName, arraySizes);
+ return;
+ }
+
+ // Not a redeclaration of a built-in; check that all names are user names.
+ reservedErrorCheck(loc, *blockName);
+ if (instanceName)
+ reservedErrorCheck(loc, *instanceName);
+ for (unsigned int member = 0; member < typeList.size(); ++member)
+ reservedErrorCheck(typeList[member].loc, typeList[member].type->getFieldName());
+
+ // Make default block qualification, and adjust the member qualifications
+
+ TQualifier defaultQualification;
+ switch (currentBlockQualifier.storage) {
+ case EvqUniform: defaultQualification = globalUniformDefaults; break;
+ case EvqBuffer: defaultQualification = globalBufferDefaults; break;
+ case EvqVaryingIn: defaultQualification = globalInputDefaults; break;
+ case EvqVaryingOut: defaultQualification = globalOutputDefaults; break;
+ default: defaultQualification.clear(); break;
+ }
+
+ // Special case for "push_constant uniform", which has a default of std430,
+ // contrary to normal uniform defaults, and can't have a default tracked for it.
+ if ((currentBlockQualifier.layoutPushConstant && !currentBlockQualifier.hasPacking())
+#ifdef NV_EXTENSIONS
+ || (currentBlockQualifier.layoutShaderRecordNV && !currentBlockQualifier.hasPacking())
+#endif
+ )
+ currentBlockQualifier.layoutPacking = ElpStd430;
+
+#ifdef NV_EXTENSIONS
+ // Special case for "taskNV in/out", which has a default of std430,
+ if (currentBlockQualifier.perTaskNV && !currentBlockQualifier.hasPacking())
+ currentBlockQualifier.layoutPacking = ElpStd430;
+#endif
+
+ // fix and check for member layout qualifiers
+
+ mergeObjectLayoutQualifiers(defaultQualification, currentBlockQualifier, true);
+
+ // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
+ if (currentBlockQualifier.hasAlign()) {
+ if (defaultQualification.layoutPacking != ElpStd140 &&
+ defaultQualification.layoutPacking != ElpStd430 &&
+ defaultQualification.layoutPacking != ElpScalar) {
+ error(loc, "can only be used with std140, std430, or scalar layout packing", "align", "");
+ defaultQualification.layoutAlign = -1;
+ }
+ }
+
+ bool memberWithLocation = false;
+ bool memberWithoutLocation = false;
+#ifdef NV_EXTENSIONS
+ bool memberWithPerViewQualifier = false;
+#endif
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (memberQualifier.hasStream()) {
+ if (defaultQualification.layoutStream != memberQualifier.layoutStream)
+ error(memberLoc, "member cannot contradict block", "stream", "");
+ }
+
+ // "This includes a block's inheritance of the
+ // current global default buffer, a block member's inheritance of the block's
+ // buffer, and the requirement that any *xfb_buffer* declared on a block
+ // member must match the buffer inherited from the block."
+ if (memberQualifier.hasXfbBuffer()) {
+ if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer)
+ error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", "");
+ }
+
+ if (memberQualifier.hasPacking())
+ error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), "");
+ if (memberQualifier.hasLocation()) {
+ const char* feature = "location on block member";
+ switch (currentBlockQualifier.storage) {
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ requireProfile(memberLoc, ECoreProfile | ECompatibilityProfile | EEsProfile, feature);
+ profileRequires(memberLoc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature);
+ profileRequires(memberLoc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature);
+ memberWithLocation = true;
+ break;
+ default:
+ error(memberLoc, "can only use in an in/out block", feature, "");
+ break;
+ }
+ } else
+ memberWithoutLocation = true;
+
+ // "The offset qualifier can only be used on block members of blocks declared with std140 or std430 layouts."
+ // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts."
+ if (memberQualifier.hasAlign() || memberQualifier.hasOffset()) {
+ if (defaultQualification.layoutPacking != ElpStd140 &&
+ defaultQualification.layoutPacking != ElpStd430 &&
+ defaultQualification.layoutPacking != ElpScalar)
+ error(memberLoc, "can only be used with std140, std430, or scalar layout packing", "offset/align", "");
+ }
+
+#ifdef NV_EXTENSIONS
+ if (memberQualifier.isPerView()) {
+ memberWithPerViewQualifier = true;
+ }
+#endif
+
+ TQualifier newMemberQualification = defaultQualification;
+ mergeQualifiers(memberLoc, newMemberQualification, memberQualifier, false);
+ memberQualifier = newMemberQualification;
+ }
+
+ layoutMemberLocationArrayCheck(loc, memberWithLocation, arraySizes);
+
+ // Ensure that the block has an XfbBuffer assigned. This is needed
+ // because if the block has a XfbOffset assigned, then it is
+ // assumed that it has implicitly assigned the current global
+ // XfbBuffer, and because it's members need to be assigned a
+ // XfbOffset if they lack it.
+ if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) {
+ if (!currentBlockQualifier.hasXfbBuffer() && currentBlockQualifier.hasXfbOffset())
+ currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer;
+ }
+
+ // Process the members
+ fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation);
+ fixXfbOffsets(currentBlockQualifier, typeList);
+ fixBlockUniformOffsets(currentBlockQualifier, typeList);
+ for (unsigned int member = 0; member < typeList.size(); ++member)
+ layoutTypeCheck(typeList[member].loc, *typeList[member].type);
+
+#ifdef NV_EXTENSIONS
+ if (memberWithPerViewQualifier) {
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ resizeMeshViewDimension(typeList[member].loc, *typeList[member].type);
+ }
+ }
+#endif
+
+ // reverse merge, so that currentBlockQualifier now has all layout information
+ // (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers)
+ mergeObjectLayoutQualifiers(currentBlockQualifier, defaultQualification, true);
+
+ //
+ // Build and add the interface block as a new type named 'blockName'
+ //
+
+ TType blockType(&typeList, *blockName, currentBlockQualifier);
+ if (arraySizes != nullptr)
+ blockType.transferArraySizes(arraySizes);
+ else
+ ioArrayCheck(loc, blockType, instanceName ? *instanceName : *blockName);
+
+ if (currentBlockQualifier.layoutBufferReference) {
+
+ if (currentBlockQualifier.storage != EvqBuffer)
+ error(loc, "can only be used with buffer", "buffer_reference", "");
+
+ // Create the block reference type. If it was forward-declared, detect that
+ // as a referent struct type with no members. Replace the referent type with
+ // blockType.
+ TType blockNameType(EbtReference, blockType, *blockName);
+ TVariable* blockNameVar = new TVariable(blockName, blockNameType, true);
+ if (! symbolTable.insert(*blockNameVar)) {
+ TSymbol* existingName = symbolTable.find(*blockName);
+ if (existingName->getType().getBasicType() == EbtReference &&
+ existingName->getType().getReferentType()->getStruct() &&
+ existingName->getType().getReferentType()->getStruct()->size() == 0 &&
+ existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
+ existingName->getType().getReferentType()->deepCopy(blockType);
+ } else {
+ error(loc, "block name cannot be redefined", blockName->c_str(), "");
+ }
+ }
+ if (!instanceName) {
+ return;
+ }
+ } else {
+ //
+ // Don't make a user-defined type out of block name; that will cause an error
+ // if the same block name gets reused in a different interface.
+ //
+ // "Block names have no other use within a shader
+ // beyond interface matching; it is a compile-time error to use a block name at global scope for anything
+ // other than as a block name (e.g., use of a block name for a global variable name or function name is
+ // currently reserved)."
+ //
+ // Use the symbol table to prevent normal reuse of the block's name, as a variable entry,
+ // whose type is EbtBlock, but without all the structure; that will come from the type
+ // the instances point to.
+ //
+ TType blockNameType(EbtBlock, blockType.getQualifier().storage);
+ TVariable* blockNameVar = new TVariable(blockName, blockNameType);
+ if (! symbolTable.insert(*blockNameVar)) {
+ TSymbol* existingName = symbolTable.find(*blockName);
+ if (existingName->getType().getBasicType() == EbtBlock) {
+ if (existingName->getType().getQualifier().storage == blockType.getQualifier().storage) {
+ error(loc, "Cannot reuse block name within the same interface:", blockName->c_str(), blockType.getStorageQualifierString());
+ return;
+ }
+ } else {
+ error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
+ return;
+ }
+ }
+ }
+
+ // Add the variable, as anonymous or named instanceName.
+ // Make an anonymous variable if no name was provided.
+ if (! instanceName)
+ instanceName = NewPoolTString("");
+
+ TVariable& variable = *new TVariable(instanceName, blockType);
+ if (! symbolTable.insert(variable)) {
+ if (*instanceName == "")
+ error(loc, "nameless block contains a member that already has a name at global scope", blockName->c_str(), "");
+ else
+ error(loc, "block instance name redefinition", variable.getName().c_str(), "");
+
+ return;
+ }
+
+ // Check for general layout qualifier errors
+ layoutObjectCheck(loc, variable);
+
+ // fix up
+ if (isIoResizeArray(blockType)) {
+ ioArraySymbolResizeList.push_back(&variable);
+ checkIoArraysConsistency(loc, true);
+ } else
+ fixIoArraySize(loc, variable.getWritableType());
+
+ // Save it in the AST for linker use.
+ trackLinkage(variable);
+}
+
+// Do all block-declaration checking regarding the combination of in/out/uniform/buffer
+// with a particular stage.
+void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ switch (qualifier.storage) {
+ case EvqUniform:
+ profileRequires(loc, EEsProfile, 300, nullptr, "uniform block");
+ profileRequires(loc, ENoProfile, 140, nullptr, "uniform block");
+ if (currentBlockQualifier.layoutPacking == ElpStd430 && ! currentBlockQualifier.layoutPushConstant)
+ requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "std430 requires the buffer storage qualifier");
+ break;
+ case EvqBuffer:
+ requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block");
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, "buffer block");
+ profileRequires(loc, EEsProfile, 310, nullptr, "buffer block");
+ break;
+ case EvqVaryingIn:
+ profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "input block");
+ // It is a compile-time error to have an input block in a vertex shader or an output block in a fragment shader
+ // "Compute shaders do not permit user-defined input variables..."
+ requireStage(loc, (EShLanguageMask)(EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask|EShLangFragmentMask
+#ifdef NV_EXTENSIONS
+ |EShLangMeshNVMask
+#endif
+ ), "input block");
+ if (language == EShLangFragment) {
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "fragment input block");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV && ! qualifier.isTaskMemory()) {
+ error(loc, "input blocks cannot be used in a mesh shader", "out", "");
+ }
+#endif
+ break;
+ case EvqVaryingOut:
+ profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "output block");
+ requireStage(loc, (EShLanguageMask)(EShLangVertexMask|EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask
+#ifdef NV_EXTENSIONS
+ |EShLangMeshNVMask|EShLangTaskNVMask
+#endif
+ ), "output block");
+ // ES 310 can have a block before shader_io is turned on, so skip this test for built-ins
+ if (language == EShLangVertex && ! parsingBuiltins) {
+ profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "vertex output block");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV && qualifier.isTaskMemory()) {
+ error(loc, "can only use on input blocks in mesh shader", "taskNV", "");
+ }
+ else if (language == EShLangTaskNV && ! qualifier.isTaskMemory()) {
+ error(loc, "output blocks cannot be used in a task shader", "out", "");
+ }
+#endif
+ break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
+ "rayPayloadNV block");
+ break;
+ case EvqPayloadInNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangAnyHitNVMask | EShLangClosestHitNVMask | EShLangMissNVMask),
+ "rayPayloadInNV block");
+ break;
+ case EvqHitAttrNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangAnyHitNVMask | EShLangClosestHitNVMask), "hitAttributeNV block");
+ break;
+ case EvqCallableDataNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask),
+ "callableDataNV block");
+ break;
+ case EvqCallableDataInNV:
+ profileRequires(loc, ~EEsProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV block");
+ requireStage(loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV block");
+ break;
+#endif
+ default:
+ error(loc, "only uniform, buffer, in, or out blocks are supported", blockName->c_str(), "");
+ break;
+ }
+}
+
+// Do all block-declaration checking regarding its qualifiers.
+void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier, bool /*instanceName*/)
+{
+ // The 4.5 specification says:
+ //
+ // interface-block :
+ // layout-qualifieropt interface-qualifier block-name { member-list } instance-nameopt ;
+ //
+ // interface-qualifier :
+ // in
+ // out
+ // patch in
+ // patch out
+ // uniform
+ // buffer
+ //
+ // Note however memory qualifiers aren't included, yet the specification also says
+ //
+ // "...memory qualifiers may also be used in the declaration of shader storage blocks..."
+
+ if (qualifier.isInterpolation())
+ error(loc, "cannot use interpolation qualifiers on an interface block", "flat/smooth/noperspective", "");
+ if (qualifier.centroid)
+ error(loc, "cannot use centroid qualifier on an interface block", "centroid", "");
+ if (qualifier.sample)
+ error(loc, "cannot use sample qualifier on an interface block", "sample", "");
+ if (qualifier.invariant)
+ error(loc, "cannot use invariant qualifier on an interface block", "invariant", "");
+ if (qualifier.layoutPushConstant)
+ intermediate.addPushConstantCount();
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ intermediate.addShaderRecordNVCount();
+ if (qualifier.perTaskNV)
+ intermediate.addTaskNVCount();
+#endif
+}
+
+//
+// "For a block, this process applies to the entire block, or until the first member
+// is reached that has a location layout qualifier. When a block member is declared with a location
+// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level
+// declaration. Subsequent members are again assigned consecutive locations, based on the newest location,
+// until the next member declared with a location qualifier. The values used for locations do not have to be
+// declared in increasing order."
+void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation)
+{
+ // "If a block has no block-level location layout qualifier, it is required that either all or none of its members
+ // have a location layout qualifier, or a compile-time error results."
+ if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation)
+ error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", "");
+ else {
+ if (memberWithLocation) {
+ // remove any block-level location and make it per *every* member
+ int nextLocation = 0; // by the rule above, initial value is not relevant
+ if (qualifier.hasAnyLocation()) {
+ nextLocation = qualifier.layoutLocation;
+ qualifier.layoutLocation = TQualifier::layoutLocationEnd;
+ if (qualifier.hasComponent()) {
+ // "It is a compile-time error to apply the *component* qualifier to a ... block"
+ error(loc, "cannot apply to a block", "component", "");
+ }
+ if (qualifier.hasIndex()) {
+ error(loc, "cannot apply to a block", "index", "");
+ }
+ }
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+ if (! memberQualifier.hasLocation()) {
+ if (nextLocation >= (int)TQualifier::layoutLocationEnd)
+ error(memberLoc, "location is too large", "location", "");
+ memberQualifier.layoutLocation = nextLocation;
+ memberQualifier.layoutComponent = TQualifier::layoutComponentEnd;
+ }
+ nextLocation = memberQualifier.layoutLocation + intermediate.computeTypeLocationSize(
+ *typeList[member].type, language);
+ }
+ }
+ }
+}
+
+void TParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
+{
+ // "If a block is qualified with xfb_offset, all its
+ // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any
+ // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer
+ // offsets."
+
+ if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset())
+ return;
+
+ int nextOffset = qualifier.layoutXfbOffset;
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ bool contains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool contains32BitType = false;
+ bool contains16BitType = false;
+ int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType);
+#else
+ int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType);
+#endif
+ // see if we need to auto-assign an offset to this member
+ if (! memberQualifier.hasXfbOffset()) {
+ // "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8"
+ if (contains64BitType)
+ RoundToPow2(nextOffset, 8);
+#ifdef AMD_EXTENSIONS
+ else if (contains32BitType)
+ RoundToPow2(nextOffset, 4);
+ else if (contains16BitType)
+ RoundToPow2(nextOffset, 2);
+#endif
+ memberQualifier.layoutXfbOffset = nextOffset;
+ } else
+ nextOffset = memberQualifier.layoutXfbOffset;
+ nextOffset += memberSize;
+ }
+
+ // The above gave all block members an offset, so we can take it off the block now,
+ // which will avoid double counting the offset usage.
+ qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd;
+}
+
+// Calculate and save the offset of each block member, using the recursively
+// defined block offset rules and the user-provided offset and align.
+//
+// Also, compute and save the total size of the block. For the block's size, arrayness
+// is not taken into account, as each element is backed by a separate buffer.
+//
+void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList)
+{
+ if (!qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory())
+ return;
+ if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar)
+ return;
+
+ int offset = 0;
+ int memberSize;
+ for (unsigned int member = 0; member < typeList.size(); ++member) {
+ TQualifier& memberQualifier = typeList[member].type->getQualifier();
+ const TSourceLoc& memberLoc = typeList[member].loc;
+
+ // "When align is applied to an array, it effects only the start of the array, not the array's internal stride."
+
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix;
+ int dummyStride;
+ int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking,
+ subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor);
+ if (memberQualifier.hasOffset()) {
+ // "The specified offset must be a multiple
+ // of the base alignment of the type of the block member it qualifies, or a compile-time error results."
+ if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment))
+ error(memberLoc, "must be a multiple of the member's alignment", "offset", "");
+
+ // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous
+ // member in the block or that lies within the previous member of the block"
+ if (spvVersion.spv == 0) {
+ if (memberQualifier.layoutOffset < offset)
+ error(memberLoc, "cannot lie in previous members", "offset", "");
+
+ // "The offset qualifier forces the qualified member to start at or after the specified
+ // integral-constant expression, which will be its byte offset from the beginning of the buffer.
+ // "The actual offset of a member is computed as
+ // follows: If offset was declared, start with that offset, otherwise start with the next available offset."
+ offset = std::max(offset, memberQualifier.layoutOffset);
+ } else {
+ // TODO: Vulkan: "It is a compile-time error to have any offset, explicit or assigned,
+ // that lies within another member of the block."
+
+ offset = memberQualifier.layoutOffset;
+ }
+ }
+
+ // "The actual alignment of a member will be the greater of the specified align alignment and the standard
+ // (e.g., std140) base alignment for the member's type."
+ if (memberQualifier.hasAlign())
+ memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign);
+
+ // "If the resulting offset is not a multiple of the actual alignment,
+ // increase it to the first offset that is a multiple of
+ // the actual alignment."
+ RoundToPow2(offset, memberAlignment);
+ typeList[member].type->getQualifier().layoutOffset = offset;
+ offset += memberSize;
+ }
+}
+
+// For an identifier that is already declared, add more qualification to it.
+void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier)
+{
+ TSymbol* symbol = symbolTable.find(identifier);
+
+ // A forward declaration of a block reference looks to the grammar like adding
+ // a qualifier to an existing symbol. Detect this and create the block reference
+ // type with an empty type list, which will be filled in later in
+ // TParseContext::declareBlock.
+ if (!symbol && qualifier.layoutBufferReference) {
+ TTypeList typeList;
+ TType blockType(&typeList, identifier, qualifier);;
+ TType blockNameType(EbtReference, blockType, identifier);
+ TVariable* blockNameVar = new TVariable(&identifier, blockNameType, true);
+ if (! symbolTable.insert(*blockNameVar)) {
+ error(loc, "block name cannot redefine a non-block name", blockName->c_str(), "");
+ }
+ return;
+ }
+
+ if (! symbol) {
+ error(loc, "identifier not previously declared", identifier.c_str(), "");
+ return;
+ }
+ if (symbol->getAsFunction()) {
+ error(loc, "cannot re-qualify a function name", identifier.c_str(), "");
+ return;
+ }
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isMemory() ||
+ qualifier.isInterpolation() ||
+ qualifier.hasLayout() ||
+ qualifier.storage != EvqTemporary ||
+ qualifier.precision != EpqNone) {
+ error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), "");
+ return;
+ }
+
+ // For read-only built-ins, add a new symbol for holding the modified qualifier.
+ // This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block)
+ if (symbol->isReadOnly())
+ symbol = symbolTable.copyUp(symbol);
+
+ if (qualifier.invariant) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot change qualification after use", "invariant", "");
+ symbol->getWritableType().getQualifier().invariant = true;
+ invariantCheck(loc, symbol->getType().getQualifier());
+ } else if (qualifier.noContraction) {
+ if (intermediate.inIoAccessed(identifier))
+ error(loc, "cannot change qualification after use", "precise", "");
+ symbol->getWritableType().getQualifier().noContraction = true;
+ } else if (qualifier.specConstant) {
+ symbol->getWritableType().getQualifier().makeSpecConstant();
+ if (qualifier.hasSpecConstantId())
+ symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId;
+ } else
+ warn(loc, "unknown requalification", "", "");
+}
+
+void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers)
+{
+ for (unsigned int i = 0; i < identifiers.size(); ++i)
+ addQualifierToExisting(loc, qualifier, *identifiers[i]);
+}
+
+// Make sure 'invariant' isn't being applied to a non-allowed object.
+void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qualifier)
+{
+ if (! qualifier.invariant)
+ return;
+
+ bool pipeOut = qualifier.isPipeOutput();
+ bool pipeIn = qualifier.isPipeInput();
+ if (version >= 300 || (profile != EEsProfile && version >= 420)) {
+ if (! pipeOut)
+ error(loc, "can only apply to an output", "invariant", "");
+ } else {
+ if ((language == EShLangVertex && pipeIn) || (! pipeOut && ! pipeIn))
+ error(loc, "can only apply to an output, or to an input in a non-vertex stage\n", "invariant", "");
+ }
+}
+
+//
+// Updating default qualifier for the case of a declaration with just a qualifier,
+// no type, block, or identifier.
+//
+void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType)
+{
+ if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) {
+#ifdef NV_EXTENSIONS
+ assert(language == EShLangTessControl || language == EShLangGeometry || language == EShLangMeshNV);
+#else
+ assert(language == EShLangTessControl || language == EShLangGeometry);
+#endif
+ const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices";
+
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", id, "");
+ if (! intermediate.setVertices(publicType.shaderQualifiers.vertices))
+ error(loc, "cannot change previously set layout value", id, "");
+
+ if (language == EShLangTessControl)
+ checkIoArraysConsistency(loc);
+ }
+#ifdef NV_EXTENSIONS
+ if (publicType.shaderQualifiers.primitives != TQualifier::layoutNotSet) {
+ assert(language == EShLangMeshNV);
+ const char* id = "max_primitives";
+
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", id, "");
+ if (! intermediate.setPrimitives(publicType.shaderQualifiers.primitives))
+ error(loc, "cannot change previously set layout value", id, "");
+ }
+#endif
+ if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) {
+ if (publicType.qualifier.storage != EvqVaryingIn)
+ error(loc, "can only apply to 'in'", "invocations", "");
+ if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations))
+ error(loc, "cannot change previously set layout value", "invocations", "");
+ }
+ if (publicType.shaderQualifiers.geometry != ElgNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ switch (publicType.shaderQualifiers.geometry) {
+ case ElgPoints:
+ case ElgLines:
+ case ElgLinesAdjacency:
+ case ElgTriangles:
+ case ElgTrianglesAdjacency:
+ case ElgQuads:
+ case ElgIsolines:
+#ifdef NV_EXTENSIONS
+ if (language == EShLangMeshNV) {
+ error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ }
+#endif
+ if (intermediate.setInputPrimitive(publicType.shaderQualifiers.geometry)) {
+ if (language == EShLangGeometry)
+ checkIoArraysConsistency(loc);
+ } else
+ error(loc, "cannot change previously set input primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ default:
+ error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ }
+ } else if (publicType.qualifier.storage == EvqVaryingOut) {
+ switch (publicType.shaderQualifiers.geometry) {
+#ifdef NV_EXTENSIONS
+ case ElgLines:
+ case ElgTriangles:
+ if (language != EShLangMeshNV) {
+ error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ }
+#endif
+ // Fall through
+ case ElgPoints:
+ case ElgLineStrip:
+ case ElgTriangleStrip:
+ if (! intermediate.setOutputPrimitive(publicType.shaderQualifiers.geometry))
+ error(loc, "cannot change previously set output primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ break;
+ default:
+ error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), "");
+ }
+ } else
+ error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), GetStorageQualifierString(publicType.qualifier.storage));
+ }
+ if (publicType.shaderQualifiers.spacing != EvsNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing))
+ error(loc, "cannot change previously set vertex spacing", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
+ } else
+ error(loc, "can only apply to 'in'", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), "");
+ }
+ if (publicType.shaderQualifiers.order != EvoNone) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setVertexOrder(publicType.shaderQualifiers.order))
+ error(loc, "cannot change previously set vertex order", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
+ } else
+ error(loc, "can only apply to 'in'", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), "");
+ }
+ if (publicType.shaderQualifiers.pointMode) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setPointMode();
+ else
+ error(loc, "can only apply to 'in'", "point_mode", "");
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (publicType.shaderQualifiers.localSize[i] > 1) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setLocalSize(i, publicType.shaderQualifiers.localSize[i]))
+ error(loc, "cannot change previously set size", "local_size", "");
+ else {
+ int max = 0;
+ if (language == EShLangCompute) {
+ switch (i) {
+ case 0: max = resources.maxComputeWorkGroupSizeX; break;
+ case 1: max = resources.maxComputeWorkGroupSizeY; break;
+ case 2: max = resources.maxComputeWorkGroupSizeZ; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", "");
+ }
+#ifdef NV_EXTENSIONS
+ else if (language == EShLangMeshNV) {
+ switch (i) {
+ case 0: max = resources.maxMeshWorkGroupSizeX_NV; break;
+ case 1: max = resources.maxMeshWorkGroupSizeY_NV; break;
+ case 2: max = resources.maxMeshWorkGroupSizeZ_NV; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxMeshWorkGroupSizeNV", "local_size", "");
+ }
+ else if (language == EShLangTaskNV) {
+ switch (i) {
+ case 0: max = resources.maxTaskWorkGroupSizeX_NV; break;
+ case 1: max = resources.maxTaskWorkGroupSizeY_NV; break;
+ case 2: max = resources.maxTaskWorkGroupSizeZ_NV; break;
+ default: break;
+ }
+ if (intermediate.getLocalSize(i) > (unsigned int)max)
+ error(loc, "too large; see gl_MaxTaskWorkGroupSizeNV", "local_size", "");
+ }
+#endif
+ else {
+ assert(0);
+ }
+
+ // Fix the existing constant gl_WorkGroupSize with this new information.
+ TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
+ if (workGroupSize != nullptr)
+ workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i));
+ }
+ } else
+ error(loc, "can only apply to 'in'", "local_size", "");
+ }
+ if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if (! intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i]))
+ error(loc, "cannot change previously set size", "local_size", "");
+ } else
+ error(loc, "can only apply to 'in'", "local_size id", "");
+ // Set the workgroup built-in variable as a specialization constant
+ TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize");
+ if (workGroupSize != nullptr)
+ workGroupSize->getWritableType().getQualifier().specConstant = true;
+ }
+ }
+ if (publicType.shaderQualifiers.earlyFragmentTests) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setEarlyFragmentTests();
+ else
+ error(loc, "can only apply to 'in'", "early_fragment_tests", "");
+ }
+ if (publicType.shaderQualifiers.postDepthCoverage) {
+ if (publicType.qualifier.storage == EvqVaryingIn)
+ intermediate.setPostDepthCoverage();
+ else
+ error(loc, "can only apply to 'in'", "post_coverage_coverage", "");
+ }
+ if (publicType.shaderQualifiers.blendEquation) {
+ if (publicType.qualifier.storage != EvqVaryingOut)
+ error(loc, "can only apply to 'out'", "blend equation", "");
+ }
+
+#ifdef NV_EXTENSIONS
+ if (publicType.shaderQualifiers.layoutDerivativeGroupQuads &&
+ publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
+ error(loc, "cannot be both specified", "derivative_group_quadsNV and derivative_group_linearNV", "");
+ }
+
+ if (publicType.shaderQualifiers.layoutDerivativeGroupQuads) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if ((intermediate.getLocalSize(0) & 1) ||
+ (intermediate.getLocalSize(1) & 1))
+ error(loc, "requires local_size_x and local_size_y to be multiple of two", "derivative_group_quadsNV", "");
+ else
+ intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupQuads);
+ }
+ else
+ error(loc, "can only apply to 'in'", "derivative_group_quadsNV", "");
+ }
+ if (publicType.shaderQualifiers.layoutDerivativeGroupLinear) {
+ if (publicType.qualifier.storage == EvqVaryingIn) {
+ if((intermediate.getLocalSize(0) *
+ intermediate.getLocalSize(1) *
+ intermediate.getLocalSize(2)) % 4 != 0)
+ error(loc, "requires total group size to be multiple of four", "derivative_group_linearNV", "");
+ else
+ intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupLinear);
+ }
+ else
+ error(loc, "can only apply to 'in'", "derivative_group_linearNV", "");
+ }
+ // Check mesh out array sizes, once all the necessary out qualifiers are defined.
+ if ((language == EShLangMeshNV) &&
+ (intermediate.getVertices() != TQualifier::layoutNotSet) &&
+ (intermediate.getPrimitives() != TQualifier::layoutNotSet) &&
+ (intermediate.getOutputPrimitive() != ElgNone))
+ {
+ checkIoArraysConsistency(loc);
+ }
+#endif
+ const TQualifier& qualifier = publicType.qualifier;
+
+ if (qualifier.isAuxiliary() ||
+ qualifier.isMemory() ||
+ qualifier.isInterpolation() ||
+ qualifier.precision != EpqNone)
+ error(loc, "cannot use auxiliary, memory, interpolation, or precision qualifier in a default qualifier declaration (declaration with no type)", "qualifier", "");
+ // "The offset qualifier can only be used on block members of blocks..."
+ // "The align qualifier can only be used on blocks or block members..."
+ if (qualifier.hasOffset() ||
+ qualifier.hasAlign())
+ error(loc, "cannot use offset or align qualifiers in a default qualifier declaration (declaration with no type)", "layout qualifier", "");
+
+ layoutQualifierCheck(loc, qualifier);
+
+ switch (qualifier.storage) {
+ case EvqUniform:
+ if (qualifier.hasMatrix())
+ globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix;
+ if (qualifier.hasPacking())
+ globalUniformDefaults.layoutPacking = qualifier.layoutPacking;
+ break;
+ case EvqBuffer:
+ if (qualifier.hasMatrix())
+ globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix;
+ if (qualifier.hasPacking())
+ globalBufferDefaults.layoutPacking = qualifier.layoutPacking;
+ break;
+ case EvqVaryingIn:
+ break;
+ case EvqVaryingOut:
+ if (qualifier.hasStream())
+ globalOutputDefaults.layoutStream = qualifier.layoutStream;
+ if (qualifier.hasXfbBuffer())
+ globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer;
+ if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) {
+ if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride))
+ error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer);
+ }
+ break;
+ default:
+ error(loc, "default qualifier requires 'uniform', 'buffer', 'in', or 'out' storage qualification", "", "");
+ return;
+ }
+
+ if (qualifier.hasBinding())
+ error(loc, "cannot declare a default, include a type or full declaration", "binding", "");
+ if (qualifier.hasAnyLocation())
+ error(loc, "cannot declare a default, use a full declaration", "location/component/index", "");
+ if (qualifier.hasXfbOffset())
+ error(loc, "cannot declare a default, use a full declaration", "xfb_offset", "");
+ if (qualifier.layoutPushConstant)
+ error(loc, "cannot declare a default, can only be used on a block", "push_constant", "");
+ if (qualifier.layoutBufferReference)
+ error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", "");
+ if (qualifier.hasSpecConstantId())
+ error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", "");
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutShaderRecordNV)
+ error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", "");
+#endif
+}
+
+//
+// Take the sequence of statements that has been built up since the last case/default,
+// put it on the list of top-level nodes for the current (inner-most) switch statement,
+// and follow that by the case/default we are on now. (See switch topology comment on
+// TIntermSwitch.)
+//
+void TParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode)
+{
+ TIntermSequence* switchSequence = switchSequenceStack.back();
+
+ if (statements) {
+ if (switchSequence->size() == 0)
+ error(statements->getLoc(), "cannot have statements before first case/default label", "switch", "");
+ statements->setOperator(EOpSequence);
+ switchSequence->push_back(statements);
+ }
+ if (branchNode) {
+ // check all previous cases for the same label (or both are 'default')
+ for (unsigned int s = 0; s < switchSequence->size(); ++s) {
+ TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode();
+ if (prevBranch) {
+ TIntermTyped* prevExpression = prevBranch->getExpression();
+ TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression();
+ if (prevExpression == nullptr && newExpression == nullptr)
+ error(branchNode->getLoc(), "duplicate label", "default", "");
+ else if (prevExpression != nullptr &&
+ newExpression != nullptr &&
+ prevExpression->getAsConstantUnion() &&
+ newExpression->getAsConstantUnion() &&
+ prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() ==
+ newExpression->getAsConstantUnion()->getConstArray()[0].getIConst())
+ error(branchNode->getLoc(), "duplicated value", "case", "");
+ }
+ }
+ switchSequence->push_back(branchNode);
+ }
+}
+
+//
+// Turn the top-level node sequence built up of wrapupSwitchSubsequence9)
+// into a switch node.
+//
+TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression, TIntermAggregate* lastStatements)
+{
+ profileRequires(loc, EEsProfile, 300, nullptr, "switch statements");
+ profileRequires(loc, ENoProfile, 130, nullptr, "switch statements");
+
+ wrapupSwitchSubsequence(lastStatements, nullptr);
+
+ if (expression == nullptr ||
+ (expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) ||
+ expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector())
+ error(loc, "condition must be a scalar integer expression", "switch", "");
+
+ // If there is nothing to do, drop the switch but still execute the expression
+ TIntermSequence* switchSequence = switchSequenceStack.back();
+ if (switchSequence->size() == 0)
+ return expression;
+
+ if (lastStatements == nullptr) {
+ // This was originally an ERRROR, because early versions of the specification said
+ // "it is an error to have no statement between a label and the end of the switch statement."
+ // The specifications were updated to remove this (being ill-defined what a "statement" was),
+ // so, this became a warning. However, 3.0 tests still check for the error.
+ if (profile == EEsProfile && version <= 300 && ! relaxedErrors())
+ error(loc, "last case/default label not followed by statements", "switch", "");
+ else
+ warn(loc, "last case/default label not followed by statements", "switch", "");
+
+ // emulate a break for error recovery
+ lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc));
+ lastStatements->setOperator(EOpSequence);
+ switchSequence->push_back(lastStatements);
+ }
+
+ TIntermAggregate* body = new TIntermAggregate(EOpSequence);
+ body->getSequence() = *switchSequenceStack.back();
+ body->setLoc(loc);
+
+ TIntermSwitch* switchNode = new TIntermSwitch(expression, body);
+ switchNode->setLoc(loc);
+
+ return switchNode;
+}
+
+} // end namespace glslang
+
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.h b/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.h
new file mode 100644
index 0000000..a1ffe64
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/ParseHelper.h
@@ -0,0 +1,510 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This header defines a two-level parse-helper hierarchy, derived from
+// TParseVersions:
+// - TParseContextBase: sharable across multiple parsers
+// - TParseContext: GLSL specific helper
+//
+
+#ifndef _PARSER_HELPER_INCLUDED_
+#define _PARSER_HELPER_INCLUDED_
+
+#include <cstdarg>
+#include <functional>
+
+#include "parseVersions.h"
+#include "../Include/ShHandle.h"
+#include "SymbolTable.h"
+#include "localintermediate.h"
+#include "Scan.h"
+#include "attribute.h"
+
+namespace glslang {
+
+struct TPragma {
+ TPragma(bool o, bool d) : optimize(o), debug(d) { }
+ bool optimize;
+ bool debug;
+ TPragmaTable pragmaTable;
+};
+
+class TScanContext;
+class TPpContext;
+
+typedef std::set<int> TIdSetType;
+
+//
+// Sharable code (as well as what's in TParseVersions) across
+// parse helpers.
+//
+class TParseContextBase : public TParseVersions {
+public:
+ TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version,
+ EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
+ const TString* entryPoint = nullptr)
+ : TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
+ scopeMangler("::"),
+ symbolTable(symbolTable),
+ statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
+ postEntryPointReturn(false),
+ contextPragma(true, false),
+ parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
+ limits(resources.limits),
+ globalUniformBlock(nullptr),
+ globalUniformBinding(TQualifier::layoutBindingEnd),
+ globalUniformSet(TQualifier::layoutSetEnd)
+ {
+ if (entryPoint != nullptr)
+ sourceEntryPointName = *entryPoint;
+ }
+ virtual ~TParseContextBase() { }
+
+ virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+ virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...);
+
+ virtual void setLimits(const TBuiltInResource&) = 0;
+
+ void checkIndex(const TSourceLoc&, const TType&, int& index);
+
+ EShLanguage getLanguage() const { return language; }
+ void setScanContext(TScanContext* c) { scanContext = c; }
+ TScanContext* getScanContext() const { return scanContext; }
+ void setPpContext(TPpContext* c) { ppContext = c; }
+ TPpContext* getPpContext() const { return ppContext; }
+
+ virtual void setLineCallback(const std::function<void(int, int, bool, int, const char*)>& func) { lineCallback = func; }
+ virtual void setExtensionCallback(const std::function<void(int, const char*, const char*)>& func) { extensionCallback = func; }
+ virtual void setVersionCallback(const std::function<void(int, int, const char*)>& func) { versionCallback = func; }
+ virtual void setPragmaCallback(const std::function<void(int, const TVector<TString>&)>& func) { pragmaCallback = func; }
+ virtual void setErrorCallback(const std::function<void(int, const char*)>& func) { errorCallback = func; }
+
+ virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0;
+ virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0;
+ virtual bool lineDirectiveShouldSetNextLine() const = 0;
+ virtual void handlePragma(const TSourceLoc&, const TVector<TString>&) = 0;
+
+ virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0;
+
+ virtual void notifyVersion(int line, int version, const char* type_string)
+ {
+ if (versionCallback)
+ versionCallback(line, version, type_string);
+ }
+ virtual void notifyErrorDirective(int line, const char* error_message)
+ {
+ if (errorCallback)
+ errorCallback(line, error_message);
+ }
+ virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName)
+ {
+ if (lineCallback)
+ lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName);
+ }
+ virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior)
+ {
+ if (extensionCallback)
+ extensionCallback(line, extension, behavior);
+ }
+
+ // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
+ virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
+
+ // Potentially rename shader entry point function
+ void renameShaderFunction(TString*& name) const
+ {
+ // Replace the entry point name given in the shader with the real entry point name,
+ // if there is a substitution.
+ if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0)
+ name = NewPoolTString(intermediate.getEntryPointName().c_str());
+ }
+
+ virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
+ virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
+
+ const char* const scopeMangler;
+
+ // Basic parsing state, easily accessible to the grammar
+
+ TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
+ int statementNestingLevel; // 0 if outside all flow control or compound statements
+ int loopNestingLevel; // 0 if outside all loops
+ int structNestingLevel; // 0 if outside blocks and structures
+ int controlFlowNestingLevel; // 0 if outside all flow control
+ const TType* currentFunctionType; // the return type of the function that's currently being parsed
+ bool functionReturnsValue; // true if a non-void function has a return
+ // if inside a function, true if the function is the entry point and this is after a return statement
+ bool postEntryPointReturn;
+ // case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting
+ TList<TIntermSequence*> switchSequenceStack;
+ // the statementNestingLevel the current switch statement is at, which must match the level of its case statements
+ TList<int> switchLevel;
+ struct TPragma contextPragma;
+
+protected:
+ TParseContextBase(TParseContextBase&);
+ TParseContextBase& operator=(TParseContextBase&);
+
+ const bool parsingBuiltins; // true if parsing built-in symbols/functions
+ TVector<TSymbol*> linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving
+ TScanContext* scanContext;
+ TPpContext* ppContext;
+ TBuiltInResource resources;
+ TLimits& limits;
+ TString sourceEntryPointName;
+
+ // These, if set, will be called when a line, pragma ... is preprocessed.
+ // They will be called with any parameters to the original directive.
+ std::function<void(int, int, bool, int, const char*)> lineCallback;
+ std::function<void(int, const TVector<TString>&)> pragmaCallback;
+ std::function<void(int, int, const char*)> versionCallback;
+ std::function<void(int, const char*, const char*)> extensionCallback;
+ std::function<void(int, const char*)> errorCallback;
+
+ // see implementation for detail
+ const TFunction* selectFunction(const TVector<const TFunction*>, const TFunction&,
+ std::function<bool(const TType&, const TType&, TOperator, int arg)>,
+ std::function<bool(const TType&, const TType&, const TType&)>,
+ /* output */ bool& tie);
+
+ virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size,
+ TSwizzleSelectors<TVectorSelector>&);
+
+ // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
+ TVariable* globalUniformBlock; // the actual block, inserted into the symbol table
+ unsigned int globalUniformBinding; // the block's binding number
+ unsigned int globalUniformSet; // the block's set number
+ int firstNewMember; // the index of the first member not yet inserted into the symbol table
+ // override this to set the language-specific name
+ virtual const char* getGlobalUniformBlockName() const { return ""; }
+ virtual void setUniformBlockDefaults(TType&) const { }
+ virtual void finalizeGlobalUniformBlockLayout(TVariable&) { }
+ virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, TPrefixType prefix,
+ va_list args);
+ virtual void trackLinkage(TSymbol& symbol);
+ virtual void makeEditable(TSymbol*&);
+ virtual TVariable* getEditableVariable(const char* name);
+ virtual void finish();
+};
+
+//
+// Manage the state for when to respect precision qualifiers and when to warn about
+// the defaults being different than might be expected.
+//
+class TPrecisionManager {
+public:
+ TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ }
+ virtual ~TPrecisionManager() {}
+
+ void respectPrecisionQualifiers() { obey = true; }
+ bool respectingPrecisionQualifiers() const { return obey; }
+ bool shouldWarnAboutDefaults() const { return warn; }
+ void defaultWarningGiven() { warn = false; }
+ void warnAboutDefaults() { warn = true; }
+ void explicitIntDefaultSeen()
+ {
+ explicitIntDefault = true;
+ if (explicitFloatDefault)
+ warn = false;
+ }
+ void explicitFloatDefaultSeen()
+ {
+ explicitFloatDefault = true;
+ if (explicitIntDefault)
+ warn = false;
+ }
+
+protected:
+ bool obey; // respect precision qualifiers
+ bool warn; // need to give a warning about the defaults
+ bool explicitIntDefault; // user set the default for int/uint
+ bool explicitFloatDefault; // user set the default for float
+};
+
+//
+// GLSL-specific parse helper. Should have GLSL in the name, but that's
+// too big of a change for comparing branches at the moment, and perhaps
+// impacts downstream consumers as well.
+//
+class TParseContext : public TParseContextBase {
+public:
+ TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
+ bool forwardCompatible = false, EShMessages messages = EShMsgDefault,
+ const TString* entryPoint = nullptr);
+ virtual ~TParseContext();
+
+ bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
+ void setPrecisionDefaults();
+
+ void setLimits(const TBuiltInResource&) override;
+ bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
+ void parserError(const char* s); // for bison's yyerror
+
+ void reservedErrorCheck(const TSourceLoc&, const TString&);
+ void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override;
+ bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override;
+ bool lineDirectiveShouldSetNextLine() const override;
+ bool builtInName(const TString&);
+
+ void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
+ TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string);
+ TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
+ void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
+
+ void makeEditable(TSymbol*&) override;
+ bool isIoResizeArray(const TType&) const;
+ void fixIoArraySize(const TSourceLoc&, TType&);
+ void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
+ void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
+ int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
+ void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
+
+ TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
+ TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
+ TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
+ void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName);
+ TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
+ TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
+ TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
+ TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function);
+ void computeBuiltinPrecisions(TIntermTyped&, const TFunction&);
+ TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
+ void checkLocation(const TSourceLoc&, TOperator);
+ TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*);
+ void addInputArgumentConversions(const TFunction&, TIntermNode*&) const;
+ TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const;
+ void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
+ void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&);
+ void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&);
+ void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*);
+ TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
+ void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
+ void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
+ void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
+
+ void assignError(const TSourceLoc&, const char* op, TString left, TString right);
+ void unaryOpError(const TSourceLoc&, const char* op, TString operand);
+ void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
+ void variableCheck(TIntermTyped*& nodePtr);
+ bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
+ void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
+ void constantValueCheck(TIntermTyped* node, const char* token);
+ void integerCheck(const TIntermTyped* node, const char* token);
+ void globalCheck(const TSourceLoc&, const char* token);
+ bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
+ bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
+ void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType);
+ bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
+ bool arrayError(const TSourceLoc&, const TType&);
+ void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
+ void structArrayCheck(const TSourceLoc&, const TType& structure);
+ void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember);
+ void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*);
+ bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
+ void boolCheck(const TSourceLoc&, const TIntermTyped*);
+ void boolCheck(const TSourceLoc&, const TPublicType&);
+ void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
+ void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void accStructNVCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
+ void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
+ void memberQualifierCheck(glslang::TPublicType&);
+ void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
+ void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
+ bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
+ void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);
+ void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier);
+ int computeSamplerTypeIndex(TSampler&);
+ TPrecisionQualifier getDefaultPrecision(TPublicType&);
+ void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&);
+ void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type);
+ bool containsFieldWithBasicType(const TType& type ,TBasicType basicType);
+ TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
+ void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes);
+ void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type);
+ void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type);
+ void nestedBlockCheck(const TSourceLoc&);
+ void nestedStructCheck(const TSourceLoc&);
+ void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
+ void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
+ void referenceCheck(const TSourceLoc&, const TType&, const char* op);
+ void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
+ void specializationCheck(const TSourceLoc&, const TType&, const char* op);
+ void structTypeCheck(const TSourceLoc&, TPublicType&);
+ void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop);
+ void arrayLimitCheck(const TSourceLoc&, const TString&, int size);
+ void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature);
+
+ void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&);
+ void constantIndexExpressionCheck(TIntermNode*);
+
+ void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&);
+ void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*);
+ void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
+ void layoutObjectCheck(const TSourceLoc&, const TSymbol&);
+ void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes);
+ void layoutTypeCheck(const TSourceLoc&, const TType&);
+ void layoutQualifierCheck(const TSourceLoc&, const TQualifier&);
+ void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
+ void fixOffset(const TSourceLoc&, TSymbol&);
+
+ const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
+ void declareTypeDefaults(const TSourceLoc&, const TPublicType&);
+ TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0);
+ TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
+ TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
+ TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
+ void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
+ void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
+ void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
+ void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
+ void fixXfbOffsets(TQualifier&, TTypeList&);
+ void fixBlockUniformOffsets(TQualifier&, TTypeList&);
+ void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
+ void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
+ void invariantCheck(const TSourceLoc&, const TQualifier&);
+ void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
+ void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
+ TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
+
+ TAttributeType attributeFromName(const TString& name) const;
+ TAttributes* makeAttributes(const TString& identifier) const;
+ TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
+ TAttributes* mergeAttributes(TAttributes*, TAttributes*) const;
+
+ // Determine selection control from attributes
+ void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
+ void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
+
+ // Determine loop control from attributes
+ void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
+
+ void resizeMeshViewDimension(const TSourceLoc&, TType&);
+
+protected:
+ void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
+ void inheritGlobalDefaults(TQualifier& dst) const;
+ TVariable* makeInternalVariable(const char* name, const TType&) const;
+ TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&);
+ void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&);
+ void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&);
+ bool isRuntimeLength(const TIntermTyped&) const;
+ TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
+ TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
+ void finish() override;
+
+public:
+ //
+ // Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access
+ //
+
+ // Current state of parsing
+ bool inMain; // if inside a function, true if the function is main
+ const TString* blockName;
+ TQualifier currentBlockQualifier;
+ TPrecisionQualifier defaultPrecision[EbtNumTypes];
+ TBuiltInResource resources;
+ TLimits& limits;
+
+protected:
+ TParseContext(TParseContext&);
+ TParseContext& operator=(TParseContext&);
+
+ static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex()
+ TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex];
+ TPrecisionManager precisionManager;
+ TQualifier globalBufferDefaults;
+ TQualifier globalUniformDefaults;
+ TQualifier globalInputDefaults;
+ TQualifier globalOutputDefaults;
+ int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
+ TString currentCaller; // name of last function body entered (not valid when at global scope)
+ TIdSetType inductiveLoopIds;
+ bool anyIndexLimits;
+ TVector<TIntermTyped*> needsIndexLimitationChecking;
+
+ //
+ // Geometry shader input arrays:
+ // - array sizing is based on input primitive and/or explicit size
+ //
+ // Tessellation control output arrays:
+ // - array sizing is based on output layout(vertices=...) and/or explicit size
+ //
+ // Both:
+ // - array sizing is retroactive
+ // - built-in block redeclarations interact with this
+ //
+ // Design:
+ // - use a per-context "resize-list", a list of symbols whose array sizes
+ // can be fixed
+ //
+ // - the resize-list starts empty at beginning of user-shader compilation, it does
+ // not have built-ins in it
+ //
+ // - on built-in array use: copyUp() symbol and add it to the resize-list
+ //
+ // - on user array declaration: add it to the resize-list
+ //
+ // - on block redeclaration: copyUp() symbol and add it to the resize-list
+ // * note, that appropriately gives an error if redeclaring a block that
+ // was already used and hence already copied-up
+ //
+ // - on seeing a layout declaration that sizes the array, fix everything in the
+ // resize-list, giving errors for mismatch
+ //
+ // - on seeing an array size declaration, give errors on mismatch between it and previous
+ // array-sizing declarations
+ //
+ TVector<TSymbol*> ioArraySymbolResizeList;
+};
+
+} // end namespace glslang
+
+#endif // _PARSER_HELPER_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp
new file mode 100644
index 0000000..84c40f4
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/PoolAlloc.cpp
@@ -0,0 +1,315 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/PoolAlloc.h"
+
+#include "../Include/InitializeGlobals.h"
+#include "../OSDependent/osinclude.h"
+
+namespace glslang {
+
+// Process-wide TLS index
+OS_TLSIndex PoolIndex;
+
+// Return the thread-specific current pool.
+TPoolAllocator& GetThreadPoolAllocator()
+{
+ return *static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
+}
+
+// Set the thread-specific current pool.
+void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
+{
+ OS_SetTLSValue(PoolIndex, poolAllocator);
+}
+
+// Process-wide set up of the TLS pool storage.
+bool InitializePoolIndex()
+{
+ // Allocate a TLS index.
+ if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
+ return false;
+
+ return true;
+}
+
+//
+// Implement the functionality of the TPoolAllocator class, which
+// is documented in PoolAlloc.h.
+//
+TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
+ pageSize(growthIncrement),
+ alignment(allocationAlignment),
+ freeList(nullptr),
+ inUseList(nullptr),
+ numCalls(0)
+{
+ //
+ // Don't allow page sizes we know are smaller than all common
+ // OS page sizes.
+ //
+ if (pageSize < 4*1024)
+ pageSize = 4*1024;
+
+ //
+ // A large currentPageOffset indicates a new page needs to
+ // be obtained to allocate memory.
+ //
+ currentPageOffset = pageSize;
+
+ //
+ // Adjust alignment to be at least pointer aligned and
+ // power of 2.
+ //
+ size_t minAlign = sizeof(void*);
+ alignment &= ~(minAlign - 1);
+ if (alignment < minAlign)
+ alignment = minAlign;
+ size_t a = 1;
+ while (a < alignment)
+ a <<= 1;
+ alignment = a;
+ alignmentMask = a - 1;
+
+ //
+ // Align header skip
+ //
+ headerSkip = minAlign;
+ if (headerSkip < sizeof(tHeader)) {
+ headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
+ }
+
+ push();
+}
+
+TPoolAllocator::~TPoolAllocator()
+{
+ while (inUseList) {
+ tHeader* next = inUseList->nextPage;
+ inUseList->~tHeader();
+ delete [] reinterpret_cast<char*>(inUseList);
+ inUseList = next;
+ }
+
+ //
+ // Always delete the free list memory - it can't be being
+ // (correctly) referenced, whether the pool allocator was
+ // global or not. We should not check the guard blocks
+ // here, because we did it already when the block was
+ // placed into the free list.
+ //
+ while (freeList) {
+ tHeader* next = freeList->nextPage;
+ delete [] reinterpret_cast<char*>(freeList);
+ freeList = next;
+ }
+}
+
+const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
+const unsigned char TAllocation::guardBlockEndVal = 0xfe;
+const unsigned char TAllocation::userDataFill = 0xcd;
+
+# ifdef GUARD_BLOCKS
+ const size_t TAllocation::guardBlockSize = 16;
+# else
+ const size_t TAllocation::guardBlockSize = 0;
+# endif
+
+//
+// Check a single guard block for damage
+//
+#ifdef GUARD_BLOCKS
+void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
+#else
+void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
+#endif
+{
+#ifdef GUARD_BLOCKS
+ for (size_t x = 0; x < guardBlockSize; x++) {
+ if (blockMem[x] != val) {
+ const int maxSize = 80;
+ char assertMsg[maxSize];
+
+ // We don't print the assert message. It's here just to be helpful.
+ snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
+ locText, size, data());
+ assert(0 && "PoolAlloc: Damage in guard block");
+ }
+ }
+#else
+ assert(guardBlockSize == 0);
+#endif
+}
+
+void TPoolAllocator::push()
+{
+ tAllocState state = { currentPageOffset, inUseList };
+
+ stack.push_back(state);
+
+ //
+ // Indicate there is no current page to allocate from.
+ //
+ currentPageOffset = pageSize;
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred since the last push(), or since the
+// last pop(), or since the object's creation.
+//
+// The deallocated pages are saved for future allocations.
+//
+void TPoolAllocator::pop()
+{
+ if (stack.size() < 1)
+ return;
+
+ tHeader* page = stack.back().page;
+ currentPageOffset = stack.back().offset;
+
+ while (inUseList != page) {
+ tHeader* nextInUse = inUseList->nextPage;
+ size_t pageCount = inUseList->pageCount;
+
+ // This technically ends the lifetime of the header as C++ object,
+ // but we will still control the memory and reuse it.
+ inUseList->~tHeader(); // currently, just a debug allocation checker
+
+ if (pageCount > 1) {
+ delete [] reinterpret_cast<char*>(inUseList);
+ } else {
+ inUseList->nextPage = freeList;
+ freeList = inUseList;
+ }
+ inUseList = nextInUse;
+ }
+
+ stack.pop_back();
+}
+
+//
+// Do a mass-deallocation of all the individual allocations
+// that have occurred.
+//
+void TPoolAllocator::popAll()
+{
+ while (stack.size() > 0)
+ pop();
+}
+
+void* TPoolAllocator::allocate(size_t numBytes)
+{
+ // If we are using guard blocks, all allocations are bracketed by
+ // them: [guardblock][allocation][guardblock]. numBytes is how
+ // much memory the caller asked for. allocationSize is the total
+ // size including guard blocks. In release build,
+ // guardBlockSize=0 and this all gets optimized away.
+ size_t allocationSize = TAllocation::allocationSize(numBytes);
+
+ //
+ // Just keep some interesting statistics.
+ //
+ ++numCalls;
+ totalBytes += numBytes;
+
+ //
+ // Do the allocation, most likely case first, for efficiency.
+ // This step could be moved to be inline sometime.
+ //
+ if (currentPageOffset + allocationSize <= pageSize) {
+ //
+ // Safe to allocate from currentPageOffset.
+ //
+ unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
+ currentPageOffset += allocationSize;
+ currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, memory, numBytes);
+ }
+
+ if (allocationSize + headerSkip > pageSize) {
+ //
+ // Do a multi-page allocation. Don't mix these with the others.
+ // The OS is efficient and allocating and free-ing multiple pages.
+ //
+ size_t numBytesToAlloc = allocationSize + headerSkip;
+ tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
+ if (memory == 0)
+ return 0;
+
+ // Use placement-new to initialize header
+ new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
+ inUseList = memory;
+
+ currentPageOffset = pageSize; // make next allocation come from a new page
+
+ // No guard blocks for multi-page allocations (yet)
+ return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
+ }
+
+ //
+ // Need a simple page to allocate from.
+ //
+ tHeader* memory;
+ if (freeList) {
+ memory = freeList;
+ freeList = freeList->nextPage;
+ } else {
+ memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
+ if (memory == 0)
+ return 0;
+ }
+
+ // Use placement-new to initialize header
+ new(memory) tHeader(inUseList, 1);
+ inUseList = memory;
+
+ unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
+ currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
+
+ return initializeAllocation(inUseList, ret, numBytes);
+}
+
+//
+// Check all allocations in a list for damage by calling check on each.
+//
+void TAllocation::checkAllocList() const
+{
+ for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
+ alloc->check();
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp
new file mode 100644
index 0000000..1d33bfd
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.cpp
@@ -0,0 +1,118 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/intermediate.h"
+#include "RemoveTree.h"
+
+namespace glslang {
+
+//
+// Code to recursively delete the intermediate tree.
+//
+struct TRemoveTraverser : TIntermTraverser {
+ TRemoveTraverser() : TIntermTraverser(false, false, true, false) {}
+
+ virtual void visitSymbol(TIntermSymbol* node)
+ {
+ delete node;
+ }
+
+ virtual bool visitBinary(TVisit /* visit*/ , TIntermBinary* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitUnary(TVisit /* visit */, TIntermUnary* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitAggregate(TVisit /* visit*/ , TIntermAggregate* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitSelection(TVisit /* visit*/ , TIntermSelection* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitSwitch(TVisit /* visit*/ , TIntermSwitch* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual void visitConstantUnion(TIntermConstantUnion* node)
+ {
+ delete node;
+ }
+
+ virtual bool visitLoop(TVisit /* visit*/ , TIntermLoop* node)
+ {
+ delete node;
+
+ return true;
+ }
+
+ virtual bool visitBranch(TVisit /* visit*/ , TIntermBranch* node)
+ {
+ delete node;
+
+ return true;
+ }
+};
+
+//
+// Entry point.
+//
+void RemoveAllTreeNodes(TIntermNode* root)
+{
+ TRemoveTraverser it;
+
+ root->traverse(&it);
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.h b/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.h
new file mode 100644
index 0000000..1ed0156
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/RemoveTree.h
@@ -0,0 +1,41 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#pragma once
+
+namespace glslang {
+
+void RemoveAllTreeNodes(TIntermNode*);
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Scan.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/Scan.cpp
new file mode 100644
index 0000000..482f6ba
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Scan.cpp
@@ -0,0 +1,1793 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// GLSL scanning, leveraging the scanning done by the preprocessor.
+//
+
+#include <cstring>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "../Include/Types.h"
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "attribute.h"
+#include "glslang_tab.cpp.h"
+#include "ScanContext.h"
+#include "Scan.h"
+
+// preprocessor includes
+#include "preprocessor/PpContext.h"
+#include "preprocessor/PpTokens.h"
+
+// Required to avoid missing prototype warnings for some compilers
+int yylex(YYSTYPE*, glslang::TParseContext&);
+
+namespace glslang {
+
+// read past any white space
+void TInputScanner::consumeWhiteSpace(bool& foundNonSpaceTab)
+{
+ int c = peek(); // don't accidentally consume anything other than whitespace
+ while (c == ' ' || c == '\t' || c == '\r' || c == '\n') {
+ if (c == '\r' || c == '\n')
+ foundNonSpaceTab = true;
+ get();
+ c = peek();
+ }
+}
+
+// return true if a comment was actually consumed
+bool TInputScanner::consumeComment()
+{
+ if (peek() != '/')
+ return false;
+
+ get(); // consume the '/'
+ int c = peek();
+ if (c == '/') {
+
+ // a '//' style comment
+ get(); // consume the second '/'
+ c = get();
+ do {
+ while (c != EndOfInput && c != '\\' && c != '\r' && c != '\n')
+ c = get();
+
+ if (c == EndOfInput || c == '\r' || c == '\n') {
+ while (c == '\r' || c == '\n')
+ c = get();
+
+ // we reached the end of the comment
+ break;
+ } else {
+ // it's a '\', so we need to keep going, after skipping what's escaped
+
+ // read the skipped character
+ c = get();
+
+ // if it's a two-character newline, skip both characters
+ if (c == '\r' && peek() == '\n')
+ get();
+ c = get();
+ }
+ } while (true);
+
+ // put back the last non-comment character
+ if (c != EndOfInput)
+ unget();
+
+ return true;
+ } else if (c == '*') {
+
+ // a '/*' style comment
+ get(); // consume the '*'
+ c = get();
+ do {
+ while (c != EndOfInput && c != '*')
+ c = get();
+ if (c == '*') {
+ c = get();
+ if (c == '/')
+ break; // end of comment
+ // not end of comment
+ } else // end of input
+ break;
+ } while (true);
+
+ return true;
+ } else {
+ // it's not a comment, put the '/' back
+ unget();
+
+ return false;
+ }
+}
+
+// skip whitespace, then skip a comment, rinse, repeat
+void TInputScanner::consumeWhitespaceComment(bool& foundNonSpaceTab)
+{
+ do {
+ consumeWhiteSpace(foundNonSpaceTab);
+
+ // if not starting a comment now, then done
+ int c = peek();
+ if (c != '/' || c == EndOfInput)
+ return;
+
+ // skip potential comment
+ foundNonSpaceTab = true;
+ if (! consumeComment())
+ return;
+
+ } while (true);
+}
+
+// Returns true if there was non-white space (e.g., a comment, newline) before the #version
+// or no #version was found; otherwise, returns false. There is no error case, it always
+// succeeds, but will leave version == 0 if no #version was found.
+//
+// Sets notFirstToken based on whether tokens (beyond white space and comments)
+// appeared before the #version.
+//
+// N.B. does not attempt to leave input in any particular known state. The assumption
+// is that scanning will start anew, following the rules for the chosen version/profile,
+// and with a corresponding parsing context.
+//
+bool TInputScanner::scanVersion(int& version, EProfile& profile, bool& notFirstToken)
+{
+ // This function doesn't have to get all the semantics correct,
+ // just find the #version if there is a correct one present.
+ // The preprocessor will have the responsibility of getting all the semantics right.
+
+ bool versionNotFirst = false; // means not first WRT comments and white space, nothing more
+ notFirstToken = false; // means not first WRT to real tokens
+ version = 0; // means not found
+ profile = ENoProfile;
+
+ bool foundNonSpaceTab = false;
+ bool lookingInMiddle = false;
+ int c;
+ do {
+ if (lookingInMiddle) {
+ notFirstToken = true;
+ // make forward progress by finishing off the current line plus extra new lines
+ if (peek() == '\n' || peek() == '\r') {
+ while (peek() == '\n' || peek() == '\r')
+ get();
+ } else
+ do {
+ c = get();
+ } while (c != EndOfInput && c != '\n' && c != '\r');
+ while (peek() == '\n' || peek() == '\r')
+ get();
+ if (peek() == EndOfInput)
+ return true;
+ }
+ lookingInMiddle = true;
+
+ // Nominal start, skipping the desktop allowed comments and white space, but tracking if
+ // something else was found for ES:
+ consumeWhitespaceComment(foundNonSpaceTab);
+ if (foundNonSpaceTab)
+ versionNotFirst = true;
+
+ // "#"
+ if (get() != '#') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ do {
+ c = get();
+ } while (c == ' ' || c == '\t');
+
+ // "version"
+ if ( c != 'v' ||
+ get() != 'e' ||
+ get() != 'r' ||
+ get() != 's' ||
+ get() != 'i' ||
+ get() != 'o' ||
+ get() != 'n') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ do {
+ c = get();
+ } while (c == ' ' || c == '\t');
+
+ // version number
+ while (c >= '0' && c <= '9') {
+ version = 10 * version + (c - '0');
+ c = get();
+ }
+ if (version == 0) {
+ versionNotFirst = true;
+ continue;
+ }
+
+ // whitespace
+ while (c == ' ' || c == '\t')
+ c = get();
+
+ // profile
+ const int maxProfileLength = 13; // not including any 0
+ char profileString[maxProfileLength];
+ int profileLength;
+ for (profileLength = 0; profileLength < maxProfileLength; ++profileLength) {
+ if (c == EndOfInput || c == ' ' || c == '\t' || c == '\n' || c == '\r')
+ break;
+ profileString[profileLength] = (char)c;
+ c = get();
+ }
+ if (c != EndOfInput && c != ' ' && c != '\t' && c != '\n' && c != '\r') {
+ versionNotFirst = true;
+ continue;
+ }
+
+ if (profileLength == 2 && strncmp(profileString, "es", profileLength) == 0)
+ profile = EEsProfile;
+ else if (profileLength == 4 && strncmp(profileString, "core", profileLength) == 0)
+ profile = ECoreProfile;
+ else if (profileLength == 13 && strncmp(profileString, "compatibility", profileLength) == 0)
+ profile = ECompatibilityProfile;
+
+ return versionNotFirst;
+ } while (true);
+}
+
+// Fill this in when doing glslang-level scanning, to hand back to the parser.
+class TParserToken {
+public:
+ explicit TParserToken(YYSTYPE& b) : sType(b) { }
+
+ YYSTYPE& sType;
+protected:
+ TParserToken(TParserToken&);
+ TParserToken& operator=(TParserToken&);
+};
+
+} // end namespace glslang
+
+// This is the function the glslang parser (i.e., bison) calls to get its next token
+int yylex(YYSTYPE* glslangTokenDesc, glslang::TParseContext& parseContext)
+{
+ glslang::TParserToken token(*glslangTokenDesc);
+
+ return parseContext.getScanContext()->tokenize(parseContext.getPpContext(), token);
+}
+
+namespace {
+
+struct str_eq
+{
+ bool operator()(const char* lhs, const char* rhs) const
+ {
+ return strcmp(lhs, rhs) == 0;
+ }
+};
+
+struct str_hash
+{
+ size_t operator()(const char* str) const
+ {
+ // djb2
+ unsigned long hash = 5381;
+ int c;
+
+ while ((c = *str++) != 0)
+ hash = ((hash << 5) + hash) + c;
+
+ return hash;
+ }
+};
+
+// A single global usable by all threads, by all versions, by all languages.
+// After a single process-level initialization, this is read only and thread safe
+std::unordered_map<const char*, int, str_hash, str_eq>* KeywordMap = nullptr;
+std::unordered_set<const char*, str_hash, str_eq>* ReservedSet = nullptr;
+
+};
+
+namespace glslang {
+
+void TScanContext::fillInKeywordMap()
+{
+ if (KeywordMap != nullptr) {
+ // this is really an error, as this should called only once per process
+ // but, the only risk is if two threads called simultaneously
+ return;
+ }
+ KeywordMap = new std::unordered_map<const char*, int, str_hash, str_eq>;
+
+ (*KeywordMap)["const"] = CONST;
+ (*KeywordMap)["uniform"] = UNIFORM;
+ (*KeywordMap)["nonuniformEXT"] = NONUNIFORM;
+ (*KeywordMap)["in"] = IN;
+ (*KeywordMap)["out"] = OUT;
+ (*KeywordMap)["inout"] = INOUT;
+ (*KeywordMap)["struct"] = STRUCT;
+ (*KeywordMap)["break"] = BREAK;
+ (*KeywordMap)["continue"] = CONTINUE;
+ (*KeywordMap)["do"] = DO;
+ (*KeywordMap)["for"] = FOR;
+ (*KeywordMap)["while"] = WHILE;
+ (*KeywordMap)["switch"] = SWITCH;
+ (*KeywordMap)["case"] = CASE;
+ (*KeywordMap)["default"] = DEFAULT;
+ (*KeywordMap)["if"] = IF;
+ (*KeywordMap)["else"] = ELSE;
+ (*KeywordMap)["discard"] = DISCARD;
+ (*KeywordMap)["return"] = RETURN;
+ (*KeywordMap)["void"] = VOID;
+ (*KeywordMap)["bool"] = BOOL;
+ (*KeywordMap)["float"] = FLOAT;
+ (*KeywordMap)["int"] = INT;
+ (*KeywordMap)["bvec2"] = BVEC2;
+ (*KeywordMap)["bvec3"] = BVEC3;
+ (*KeywordMap)["bvec4"] = BVEC4;
+ (*KeywordMap)["vec2"] = VEC2;
+ (*KeywordMap)["vec3"] = VEC3;
+ (*KeywordMap)["vec4"] = VEC4;
+ (*KeywordMap)["ivec2"] = IVEC2;
+ (*KeywordMap)["ivec3"] = IVEC3;
+ (*KeywordMap)["ivec4"] = IVEC4;
+ (*KeywordMap)["mat2"] = MAT2;
+ (*KeywordMap)["mat3"] = MAT3;
+ (*KeywordMap)["mat4"] = MAT4;
+ (*KeywordMap)["true"] = BOOLCONSTANT;
+ (*KeywordMap)["false"] = BOOLCONSTANT;
+ (*KeywordMap)["attribute"] = ATTRIBUTE;
+ (*KeywordMap)["varying"] = VARYING;
+ (*KeywordMap)["buffer"] = BUFFER;
+ (*KeywordMap)["coherent"] = COHERENT;
+ (*KeywordMap)["devicecoherent"] = DEVICECOHERENT;
+ (*KeywordMap)["queuefamilycoherent"] = QUEUEFAMILYCOHERENT;
+ (*KeywordMap)["workgroupcoherent"] = WORKGROUPCOHERENT;
+ (*KeywordMap)["subgroupcoherent"] = SUBGROUPCOHERENT;
+ (*KeywordMap)["nonprivate"] = NONPRIVATE;
+ (*KeywordMap)["restrict"] = RESTRICT;
+ (*KeywordMap)["readonly"] = READONLY;
+ (*KeywordMap)["writeonly"] = WRITEONLY;
+ (*KeywordMap)["atomic_uint"] = ATOMIC_UINT;
+ (*KeywordMap)["volatile"] = VOLATILE;
+ (*KeywordMap)["layout"] = LAYOUT;
+ (*KeywordMap)["shared"] = SHARED;
+ (*KeywordMap)["patch"] = PATCH;
+ (*KeywordMap)["sample"] = SAMPLE;
+ (*KeywordMap)["subroutine"] = SUBROUTINE;
+ (*KeywordMap)["highp"] = HIGH_PRECISION;
+ (*KeywordMap)["mediump"] = MEDIUM_PRECISION;
+ (*KeywordMap)["lowp"] = LOW_PRECISION;
+ (*KeywordMap)["precision"] = PRECISION;
+ (*KeywordMap)["mat2x2"] = MAT2X2;
+ (*KeywordMap)["mat2x3"] = MAT2X3;
+ (*KeywordMap)["mat2x4"] = MAT2X4;
+ (*KeywordMap)["mat3x2"] = MAT3X2;
+ (*KeywordMap)["mat3x3"] = MAT3X3;
+ (*KeywordMap)["mat3x4"] = MAT3X4;
+ (*KeywordMap)["mat4x2"] = MAT4X2;
+ (*KeywordMap)["mat4x3"] = MAT4X3;
+ (*KeywordMap)["mat4x4"] = MAT4X4;
+ (*KeywordMap)["dmat2"] = DMAT2;
+ (*KeywordMap)["dmat3"] = DMAT3;
+ (*KeywordMap)["dmat4"] = DMAT4;
+ (*KeywordMap)["dmat2x2"] = DMAT2X2;
+ (*KeywordMap)["dmat2x3"] = DMAT2X3;
+ (*KeywordMap)["dmat2x4"] = DMAT2X4;
+ (*KeywordMap)["dmat3x2"] = DMAT3X2;
+ (*KeywordMap)["dmat3x3"] = DMAT3X3;
+ (*KeywordMap)["dmat3x4"] = DMAT3X4;
+ (*KeywordMap)["dmat4x2"] = DMAT4X2;
+ (*KeywordMap)["dmat4x3"] = DMAT4X3;
+ (*KeywordMap)["dmat4x4"] = DMAT4X4;
+ (*KeywordMap)["image1D"] = IMAGE1D;
+ (*KeywordMap)["iimage1D"] = IIMAGE1D;
+ (*KeywordMap)["uimage1D"] = UIMAGE1D;
+ (*KeywordMap)["image2D"] = IMAGE2D;
+ (*KeywordMap)["iimage2D"] = IIMAGE2D;
+ (*KeywordMap)["uimage2D"] = UIMAGE2D;
+ (*KeywordMap)["image3D"] = IMAGE3D;
+ (*KeywordMap)["iimage3D"] = IIMAGE3D;
+ (*KeywordMap)["uimage3D"] = UIMAGE3D;
+ (*KeywordMap)["image2DRect"] = IMAGE2DRECT;
+ (*KeywordMap)["iimage2DRect"] = IIMAGE2DRECT;
+ (*KeywordMap)["uimage2DRect"] = UIMAGE2DRECT;
+ (*KeywordMap)["imageCube"] = IMAGECUBE;
+ (*KeywordMap)["iimageCube"] = IIMAGECUBE;
+ (*KeywordMap)["uimageCube"] = UIMAGECUBE;
+ (*KeywordMap)["imageBuffer"] = IMAGEBUFFER;
+ (*KeywordMap)["iimageBuffer"] = IIMAGEBUFFER;
+ (*KeywordMap)["uimageBuffer"] = UIMAGEBUFFER;
+ (*KeywordMap)["image1DArray"] = IMAGE1DARRAY;
+ (*KeywordMap)["iimage1DArray"] = IIMAGE1DARRAY;
+ (*KeywordMap)["uimage1DArray"] = UIMAGE1DARRAY;
+ (*KeywordMap)["image2DArray"] = IMAGE2DARRAY;
+ (*KeywordMap)["iimage2DArray"] = IIMAGE2DARRAY;
+ (*KeywordMap)["uimage2DArray"] = UIMAGE2DARRAY;
+ (*KeywordMap)["imageCubeArray"] = IMAGECUBEARRAY;
+ (*KeywordMap)["iimageCubeArray"] = IIMAGECUBEARRAY;
+ (*KeywordMap)["uimageCubeArray"] = UIMAGECUBEARRAY;
+ (*KeywordMap)["image2DMS"] = IMAGE2DMS;
+ (*KeywordMap)["iimage2DMS"] = IIMAGE2DMS;
+ (*KeywordMap)["uimage2DMS"] = UIMAGE2DMS;
+ (*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY;
+ (*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY;
+ (*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY;
+ (*KeywordMap)["double"] = DOUBLE;
+ (*KeywordMap)["dvec2"] = DVEC2;
+ (*KeywordMap)["dvec3"] = DVEC3;
+ (*KeywordMap)["dvec4"] = DVEC4;
+ (*KeywordMap)["uint"] = UINT;
+ (*KeywordMap)["uvec2"] = UVEC2;
+ (*KeywordMap)["uvec3"] = UVEC3;
+ (*KeywordMap)["uvec4"] = UVEC4;
+
+ (*KeywordMap)["int64_t"] = INT64_T;
+ (*KeywordMap)["uint64_t"] = UINT64_T;
+ (*KeywordMap)["i64vec2"] = I64VEC2;
+ (*KeywordMap)["i64vec3"] = I64VEC3;
+ (*KeywordMap)["i64vec4"] = I64VEC4;
+ (*KeywordMap)["u64vec2"] = U64VEC2;
+ (*KeywordMap)["u64vec3"] = U64VEC3;
+ (*KeywordMap)["u64vec4"] = U64VEC4;
+
+ // GL_EXT_shader_explicit_arithmetic_types
+ (*KeywordMap)["int8_t"] = INT8_T;
+ (*KeywordMap)["i8vec2"] = I8VEC2;
+ (*KeywordMap)["i8vec3"] = I8VEC3;
+ (*KeywordMap)["i8vec4"] = I8VEC4;
+ (*KeywordMap)["uint8_t"] = UINT8_T;
+ (*KeywordMap)["u8vec2"] = U8VEC2;
+ (*KeywordMap)["u8vec3"] = U8VEC3;
+ (*KeywordMap)["u8vec4"] = U8VEC4;
+
+ (*KeywordMap)["int16_t"] = INT16_T;
+ (*KeywordMap)["i16vec2"] = I16VEC2;
+ (*KeywordMap)["i16vec3"] = I16VEC3;
+ (*KeywordMap)["i16vec4"] = I16VEC4;
+ (*KeywordMap)["uint16_t"] = UINT16_T;
+ (*KeywordMap)["u16vec2"] = U16VEC2;
+ (*KeywordMap)["u16vec3"] = U16VEC3;
+ (*KeywordMap)["u16vec4"] = U16VEC4;
+
+ (*KeywordMap)["int32_t"] = INT32_T;
+ (*KeywordMap)["i32vec2"] = I32VEC2;
+ (*KeywordMap)["i32vec3"] = I32VEC3;
+ (*KeywordMap)["i32vec4"] = I32VEC4;
+ (*KeywordMap)["uint32_t"] = UINT32_T;
+ (*KeywordMap)["u32vec2"] = U32VEC2;
+ (*KeywordMap)["u32vec3"] = U32VEC3;
+ (*KeywordMap)["u32vec4"] = U32VEC4;
+
+ (*KeywordMap)["float16_t"] = FLOAT16_T;
+ (*KeywordMap)["f16vec2"] = F16VEC2;
+ (*KeywordMap)["f16vec3"] = F16VEC3;
+ (*KeywordMap)["f16vec4"] = F16VEC4;
+ (*KeywordMap)["f16mat2"] = F16MAT2;
+ (*KeywordMap)["f16mat3"] = F16MAT3;
+ (*KeywordMap)["f16mat4"] = F16MAT4;
+ (*KeywordMap)["f16mat2x2"] = F16MAT2X2;
+ (*KeywordMap)["f16mat2x3"] = F16MAT2X3;
+ (*KeywordMap)["f16mat2x4"] = F16MAT2X4;
+ (*KeywordMap)["f16mat3x2"] = F16MAT3X2;
+ (*KeywordMap)["f16mat3x3"] = F16MAT3X3;
+ (*KeywordMap)["f16mat3x4"] = F16MAT3X4;
+ (*KeywordMap)["f16mat4x2"] = F16MAT4X2;
+ (*KeywordMap)["f16mat4x3"] = F16MAT4X3;
+ (*KeywordMap)["f16mat4x4"] = F16MAT4X4;
+
+ (*KeywordMap)["float32_t"] = FLOAT32_T;
+ (*KeywordMap)["f32vec2"] = F32VEC2;
+ (*KeywordMap)["f32vec3"] = F32VEC3;
+ (*KeywordMap)["f32vec4"] = F32VEC4;
+ (*KeywordMap)["f32mat2"] = F32MAT2;
+ (*KeywordMap)["f32mat3"] = F32MAT3;
+ (*KeywordMap)["f32mat4"] = F32MAT4;
+ (*KeywordMap)["f32mat2x2"] = F32MAT2X2;
+ (*KeywordMap)["f32mat2x3"] = F32MAT2X3;
+ (*KeywordMap)["f32mat2x4"] = F32MAT2X4;
+ (*KeywordMap)["f32mat3x2"] = F32MAT3X2;
+ (*KeywordMap)["f32mat3x3"] = F32MAT3X3;
+ (*KeywordMap)["f32mat3x4"] = F32MAT3X4;
+ (*KeywordMap)["f32mat4x2"] = F32MAT4X2;
+ (*KeywordMap)["f32mat4x3"] = F32MAT4X3;
+ (*KeywordMap)["f32mat4x4"] = F32MAT4X4;
+ (*KeywordMap)["float64_t"] = FLOAT64_T;
+ (*KeywordMap)["f64vec2"] = F64VEC2;
+ (*KeywordMap)["f64vec3"] = F64VEC3;
+ (*KeywordMap)["f64vec4"] = F64VEC4;
+ (*KeywordMap)["f64mat2"] = F64MAT2;
+ (*KeywordMap)["f64mat3"] = F64MAT3;
+ (*KeywordMap)["f64mat4"] = F64MAT4;
+ (*KeywordMap)["f64mat2x2"] = F64MAT2X2;
+ (*KeywordMap)["f64mat2x3"] = F64MAT2X3;
+ (*KeywordMap)["f64mat2x4"] = F64MAT2X4;
+ (*KeywordMap)["f64mat3x2"] = F64MAT3X2;
+ (*KeywordMap)["f64mat3x3"] = F64MAT3X3;
+ (*KeywordMap)["f64mat3x4"] = F64MAT3X4;
+ (*KeywordMap)["f64mat4x2"] = F64MAT4X2;
+ (*KeywordMap)["f64mat4x3"] = F64MAT4X3;
+ (*KeywordMap)["f64mat4x4"] = F64MAT4X4;
+
+ (*KeywordMap)["sampler2D"] = SAMPLER2D;
+ (*KeywordMap)["samplerCube"] = SAMPLERCUBE;
+ (*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY;
+ (*KeywordMap)["samplerCubeArrayShadow"] = SAMPLERCUBEARRAYSHADOW;
+ (*KeywordMap)["isamplerCubeArray"] = ISAMPLERCUBEARRAY;
+ (*KeywordMap)["usamplerCubeArray"] = USAMPLERCUBEARRAY;
+ (*KeywordMap)["sampler1DArrayShadow"] = SAMPLER1DARRAYSHADOW;
+ (*KeywordMap)["isampler1DArray"] = ISAMPLER1DARRAY;
+ (*KeywordMap)["usampler1D"] = USAMPLER1D;
+ (*KeywordMap)["isampler1D"] = ISAMPLER1D;
+ (*KeywordMap)["usampler1DArray"] = USAMPLER1DARRAY;
+ (*KeywordMap)["samplerBuffer"] = SAMPLERBUFFER;
+ (*KeywordMap)["samplerCubeShadow"] = SAMPLERCUBESHADOW;
+ (*KeywordMap)["sampler2DArray"] = SAMPLER2DARRAY;
+ (*KeywordMap)["sampler2DArrayShadow"] = SAMPLER2DARRAYSHADOW;
+ (*KeywordMap)["isampler2D"] = ISAMPLER2D;
+ (*KeywordMap)["isampler3D"] = ISAMPLER3D;
+ (*KeywordMap)["isamplerCube"] = ISAMPLERCUBE;
+ (*KeywordMap)["isampler2DArray"] = ISAMPLER2DARRAY;
+ (*KeywordMap)["usampler2D"] = USAMPLER2D;
+ (*KeywordMap)["usampler3D"] = USAMPLER3D;
+ (*KeywordMap)["usamplerCube"] = USAMPLERCUBE;
+ (*KeywordMap)["usampler2DArray"] = USAMPLER2DARRAY;
+ (*KeywordMap)["isampler2DRect"] = ISAMPLER2DRECT;
+ (*KeywordMap)["usampler2DRect"] = USAMPLER2DRECT;
+ (*KeywordMap)["isamplerBuffer"] = ISAMPLERBUFFER;
+ (*KeywordMap)["usamplerBuffer"] = USAMPLERBUFFER;
+ (*KeywordMap)["sampler2DMS"] = SAMPLER2DMS;
+ (*KeywordMap)["isampler2DMS"] = ISAMPLER2DMS;
+ (*KeywordMap)["usampler2DMS"] = USAMPLER2DMS;
+ (*KeywordMap)["sampler2DMSArray"] = SAMPLER2DMSARRAY;
+ (*KeywordMap)["isampler2DMSArray"] = ISAMPLER2DMSARRAY;
+ (*KeywordMap)["usampler2DMSArray"] = USAMPLER2DMSARRAY;
+ (*KeywordMap)["sampler1D"] = SAMPLER1D;
+ (*KeywordMap)["sampler1DShadow"] = SAMPLER1DSHADOW;
+ (*KeywordMap)["sampler3D"] = SAMPLER3D;
+ (*KeywordMap)["sampler2DShadow"] = SAMPLER2DSHADOW;
+ (*KeywordMap)["sampler2DRect"] = SAMPLER2DRECT;
+ (*KeywordMap)["sampler2DRectShadow"] = SAMPLER2DRECTSHADOW;
+ (*KeywordMap)["sampler1DArray"] = SAMPLER1DARRAY;
+
+ (*KeywordMap)["samplerExternalOES"] = SAMPLEREXTERNALOES; // GL_OES_EGL_image_external
+
+ (*KeywordMap)["__samplerExternal2DY2YEXT"] = SAMPLEREXTERNAL2DY2YEXT; // GL_EXT_YUV_target
+
+ (*KeywordMap)["sampler"] = SAMPLER;
+ (*KeywordMap)["samplerShadow"] = SAMPLERSHADOW;
+
+ (*KeywordMap)["texture2D"] = TEXTURE2D;
+ (*KeywordMap)["textureCube"] = TEXTURECUBE;
+ (*KeywordMap)["textureCubeArray"] = TEXTURECUBEARRAY;
+ (*KeywordMap)["itextureCubeArray"] = ITEXTURECUBEARRAY;
+ (*KeywordMap)["utextureCubeArray"] = UTEXTURECUBEARRAY;
+ (*KeywordMap)["itexture1DArray"] = ITEXTURE1DARRAY;
+ (*KeywordMap)["utexture1D"] = UTEXTURE1D;
+ (*KeywordMap)["itexture1D"] = ITEXTURE1D;
+ (*KeywordMap)["utexture1DArray"] = UTEXTURE1DARRAY;
+ (*KeywordMap)["textureBuffer"] = TEXTUREBUFFER;
+ (*KeywordMap)["texture2DArray"] = TEXTURE2DARRAY;
+ (*KeywordMap)["itexture2D"] = ITEXTURE2D;
+ (*KeywordMap)["itexture3D"] = ITEXTURE3D;
+ (*KeywordMap)["itextureCube"] = ITEXTURECUBE;
+ (*KeywordMap)["itexture2DArray"] = ITEXTURE2DARRAY;
+ (*KeywordMap)["utexture2D"] = UTEXTURE2D;
+ (*KeywordMap)["utexture3D"] = UTEXTURE3D;
+ (*KeywordMap)["utextureCube"] = UTEXTURECUBE;
+ (*KeywordMap)["utexture2DArray"] = UTEXTURE2DARRAY;
+ (*KeywordMap)["itexture2DRect"] = ITEXTURE2DRECT;
+ (*KeywordMap)["utexture2DRect"] = UTEXTURE2DRECT;
+ (*KeywordMap)["itextureBuffer"] = ITEXTUREBUFFER;
+ (*KeywordMap)["utextureBuffer"] = UTEXTUREBUFFER;
+ (*KeywordMap)["texture2DMS"] = TEXTURE2DMS;
+ (*KeywordMap)["itexture2DMS"] = ITEXTURE2DMS;
+ (*KeywordMap)["utexture2DMS"] = UTEXTURE2DMS;
+ (*KeywordMap)["texture2DMSArray"] = TEXTURE2DMSARRAY;
+ (*KeywordMap)["itexture2DMSArray"] = ITEXTURE2DMSARRAY;
+ (*KeywordMap)["utexture2DMSArray"] = UTEXTURE2DMSARRAY;
+ (*KeywordMap)["texture1D"] = TEXTURE1D;
+ (*KeywordMap)["texture3D"] = TEXTURE3D;
+ (*KeywordMap)["texture2DRect"] = TEXTURE2DRECT;
+ (*KeywordMap)["texture1DArray"] = TEXTURE1DARRAY;
+
+ (*KeywordMap)["subpassInput"] = SUBPASSINPUT;
+ (*KeywordMap)["subpassInputMS"] = SUBPASSINPUTMS;
+ (*KeywordMap)["isubpassInput"] = ISUBPASSINPUT;
+ (*KeywordMap)["isubpassInputMS"] = ISUBPASSINPUTMS;
+ (*KeywordMap)["usubpassInput"] = USUBPASSINPUT;
+ (*KeywordMap)["usubpassInputMS"] = USUBPASSINPUTMS;
+
+#ifdef AMD_EXTENSIONS
+ (*KeywordMap)["f16sampler1D"] = F16SAMPLER1D;
+ (*KeywordMap)["f16sampler2D"] = F16SAMPLER2D;
+ (*KeywordMap)["f16sampler3D"] = F16SAMPLER3D;
+ (*KeywordMap)["f16sampler2DRect"] = F16SAMPLER2DRECT;
+ (*KeywordMap)["f16samplerCube"] = F16SAMPLERCUBE;
+ (*KeywordMap)["f16sampler1DArray"] = F16SAMPLER1DARRAY;
+ (*KeywordMap)["f16sampler2DArray"] = F16SAMPLER2DARRAY;
+ (*KeywordMap)["f16samplerCubeArray"] = F16SAMPLERCUBEARRAY;
+ (*KeywordMap)["f16samplerBuffer"] = F16SAMPLERBUFFER;
+ (*KeywordMap)["f16sampler2DMS"] = F16SAMPLER2DMS;
+ (*KeywordMap)["f16sampler2DMSArray"] = F16SAMPLER2DMSARRAY;
+ (*KeywordMap)["f16sampler1DShadow"] = F16SAMPLER1DSHADOW;
+ (*KeywordMap)["f16sampler2DShadow"] = F16SAMPLER2DSHADOW;
+ (*KeywordMap)["f16sampler2DRectShadow"] = F16SAMPLER2DRECTSHADOW;
+ (*KeywordMap)["f16samplerCubeShadow"] = F16SAMPLERCUBESHADOW;
+ (*KeywordMap)["f16sampler1DArrayShadow"] = F16SAMPLER1DARRAYSHADOW;
+ (*KeywordMap)["f16sampler2DArrayShadow"] = F16SAMPLER2DARRAYSHADOW;
+ (*KeywordMap)["f16samplerCubeArrayShadow"] = F16SAMPLERCUBEARRAYSHADOW;
+
+ (*KeywordMap)["f16image1D"] = F16IMAGE1D;
+ (*KeywordMap)["f16image2D"] = F16IMAGE2D;
+ (*KeywordMap)["f16image3D"] = F16IMAGE3D;
+ (*KeywordMap)["f16image2DRect"] = F16IMAGE2DRECT;
+ (*KeywordMap)["f16imageCube"] = F16IMAGECUBE;
+ (*KeywordMap)["f16image1DArray"] = F16IMAGE1DARRAY;
+ (*KeywordMap)["f16image2DArray"] = F16IMAGE2DARRAY;
+ (*KeywordMap)["f16imageCubeArray"] = F16IMAGECUBEARRAY;
+ (*KeywordMap)["f16imageBuffer"] = F16IMAGEBUFFER;
+ (*KeywordMap)["f16image2DMS"] = F16IMAGE2DMS;
+ (*KeywordMap)["f16image2DMSArray"] = F16IMAGE2DMSARRAY;
+
+ (*KeywordMap)["f16texture1D"] = F16TEXTURE1D;
+ (*KeywordMap)["f16texture2D"] = F16TEXTURE2D;
+ (*KeywordMap)["f16texture3D"] = F16TEXTURE3D;
+ (*KeywordMap)["f16texture2DRect"] = F16TEXTURE2DRECT;
+ (*KeywordMap)["f16textureCube"] = F16TEXTURECUBE;
+ (*KeywordMap)["f16texture1DArray"] = F16TEXTURE1DARRAY;
+ (*KeywordMap)["f16texture2DArray"] = F16TEXTURE2DARRAY;
+ (*KeywordMap)["f16textureCubeArray"] = F16TEXTURECUBEARRAY;
+ (*KeywordMap)["f16textureBuffer"] = F16TEXTUREBUFFER;
+ (*KeywordMap)["f16texture2DMS"] = F16TEXTURE2DMS;
+ (*KeywordMap)["f16texture2DMSArray"] = F16TEXTURE2DMSARRAY;
+
+ (*KeywordMap)["f16subpassInput"] = F16SUBPASSINPUT;
+ (*KeywordMap)["f16subpassInputMS"] = F16SUBPASSINPUTMS;
+#endif
+
+ (*KeywordMap)["noperspective"] = NOPERSPECTIVE;
+ (*KeywordMap)["smooth"] = SMOOTH;
+ (*KeywordMap)["flat"] = FLAT;
+#ifdef AMD_EXTENSIONS
+ (*KeywordMap)["__explicitInterpAMD"] = EXPLICITINTERPAMD;
+#endif
+ (*KeywordMap)["centroid"] = CENTROID;
+#ifdef NV_EXTENSIONS
+ (*KeywordMap)["pervertexNV"] = PERVERTEXNV;
+#endif
+ (*KeywordMap)["precise"] = PRECISE;
+ (*KeywordMap)["invariant"] = INVARIANT;
+ (*KeywordMap)["packed"] = PACKED;
+ (*KeywordMap)["resource"] = RESOURCE;
+ (*KeywordMap)["superp"] = SUPERP;
+
+#ifdef NV_EXTENSIONS
+ (*KeywordMap)["rayPayloadNV"] = PAYLOADNV;
+ (*KeywordMap)["rayPayloadInNV"] = PAYLOADINNV;
+ (*KeywordMap)["hitAttributeNV"] = HITATTRNV;
+ (*KeywordMap)["callableDataNV"] = CALLDATANV;
+ (*KeywordMap)["callableDataInNV"] = CALLDATAINNV;
+ (*KeywordMap)["accelerationStructureNV"] = ACCSTRUCTNV;
+ (*KeywordMap)["perprimitiveNV"] = PERPRIMITIVENV;
+ (*KeywordMap)["perviewNV"] = PERVIEWNV;
+ (*KeywordMap)["taskNV"] = PERTASKNV;
+#endif
+
+ (*KeywordMap)["fcoopmatNV"] = FCOOPMATNV;
+
+ ReservedSet = new std::unordered_set<const char*, str_hash, str_eq>;
+
+ ReservedSet->insert("common");
+ ReservedSet->insert("partition");
+ ReservedSet->insert("active");
+ ReservedSet->insert("asm");
+ ReservedSet->insert("class");
+ ReservedSet->insert("union");
+ ReservedSet->insert("enum");
+ ReservedSet->insert("typedef");
+ ReservedSet->insert("template");
+ ReservedSet->insert("this");
+ ReservedSet->insert("goto");
+ ReservedSet->insert("inline");
+ ReservedSet->insert("noinline");
+ ReservedSet->insert("public");
+ ReservedSet->insert("static");
+ ReservedSet->insert("extern");
+ ReservedSet->insert("external");
+ ReservedSet->insert("interface");
+ ReservedSet->insert("long");
+ ReservedSet->insert("short");
+ ReservedSet->insert("half");
+ ReservedSet->insert("fixed");
+ ReservedSet->insert("unsigned");
+ ReservedSet->insert("input");
+ ReservedSet->insert("output");
+ ReservedSet->insert("hvec2");
+ ReservedSet->insert("hvec3");
+ ReservedSet->insert("hvec4");
+ ReservedSet->insert("fvec2");
+ ReservedSet->insert("fvec3");
+ ReservedSet->insert("fvec4");
+ ReservedSet->insert("sampler3DRect");
+ ReservedSet->insert("filter");
+ ReservedSet->insert("sizeof");
+ ReservedSet->insert("cast");
+ ReservedSet->insert("namespace");
+ ReservedSet->insert("using");
+}
+
+void TScanContext::deleteKeywordMap()
+{
+ delete KeywordMap;
+ KeywordMap = nullptr;
+ delete ReservedSet;
+ ReservedSet = nullptr;
+}
+
+// Called by yylex to get the next token.
+// Returning 0 implies end of input.
+int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
+{
+ do {
+ parserToken = &token;
+ TPpToken ppToken;
+ int token = pp->tokenize(ppToken);
+ if (token == EndOfInput)
+ return 0;
+
+ tokenText = ppToken.name;
+ loc = ppToken.loc;
+ parserToken->sType.lex.loc = loc;
+ switch (token) {
+ case ';': afterType = false; afterBuffer = false; return SEMICOLON;
+ case ',': afterType = false; return COMMA;
+ case ':': return COLON;
+ case '=': afterType = false; return EQUAL;
+ case '(': afterType = false; return LEFT_PAREN;
+ case ')': afterType = false; return RIGHT_PAREN;
+ case '.': field = true; return DOT;
+ case '!': return BANG;
+ case '-': return DASH;
+ case '~': return TILDE;
+ case '+': return PLUS;
+ case '*': return STAR;
+ case '/': return SLASH;
+ case '%': return PERCENT;
+ case '<': return LEFT_ANGLE;
+ case '>': return RIGHT_ANGLE;
+ case '|': return VERTICAL_BAR;
+ case '^': return CARET;
+ case '&': return AMPERSAND;
+ case '?': return QUESTION;
+ case '[': return LEFT_BRACKET;
+ case ']': return RIGHT_BRACKET;
+ case '{': afterStruct = false; afterBuffer = false; return LEFT_BRACE;
+ case '}': return RIGHT_BRACE;
+ case '\\':
+ parseContext.error(loc, "illegal use of escape character", "\\", "");
+ break;
+
+ case PPAtomAddAssign: return ADD_ASSIGN;
+ case PPAtomSubAssign: return SUB_ASSIGN;
+ case PPAtomMulAssign: return MUL_ASSIGN;
+ case PPAtomDivAssign: return DIV_ASSIGN;
+ case PPAtomModAssign: return MOD_ASSIGN;
+
+ case PpAtomRight: return RIGHT_OP;
+ case PpAtomLeft: return LEFT_OP;
+
+ case PpAtomRightAssign: return RIGHT_ASSIGN;
+ case PpAtomLeftAssign: return LEFT_ASSIGN;
+ case PpAtomAndAssign: return AND_ASSIGN;
+ case PpAtomOrAssign: return OR_ASSIGN;
+ case PpAtomXorAssign: return XOR_ASSIGN;
+
+ case PpAtomAnd: return AND_OP;
+ case PpAtomOr: return OR_OP;
+ case PpAtomXor: return XOR_OP;
+
+ case PpAtomEQ: return EQ_OP;
+ case PpAtomGE: return GE_OP;
+ case PpAtomNE: return NE_OP;
+ case PpAtomLE: return LE_OP;
+
+ case PpAtomDecrement: return DEC_OP;
+ case PpAtomIncrement: return INC_OP;
+
+ case PpAtomColonColon:
+ parseContext.error(loc, "not supported", "::", "");
+ break;
+
+ case PpAtomConstInt: parserToken->sType.lex.i = ppToken.ival; return INTCONSTANT;
+ case PpAtomConstUint: parserToken->sType.lex.i = ppToken.ival; return UINTCONSTANT;
+ case PpAtomConstInt16: parserToken->sType.lex.i = ppToken.ival; return INT16CONSTANT;
+ case PpAtomConstUint16: parserToken->sType.lex.i = ppToken.ival; return UINT16CONSTANT;
+ case PpAtomConstInt64: parserToken->sType.lex.i64 = ppToken.i64val; return INT64CONSTANT;
+ case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT;
+ case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT;
+ case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT;
+ case PpAtomConstFloat16: parserToken->sType.lex.d = ppToken.dval; return FLOAT16CONSTANT;
+ case PpAtomIdentifier:
+ {
+ int token = tokenizeIdentifier();
+ field = false;
+ return token;
+ }
+
+ case EndOfInput: return 0;
+
+ default:
+ char buf[2];
+ buf[0] = (char)token;
+ buf[1] = 0;
+ parseContext.error(loc, "unexpected token", buf, "");
+ break;
+ }
+ } while (true);
+}
+
+int TScanContext::tokenizeIdentifier()
+{
+ if (ReservedSet->find(tokenText) != ReservedSet->end())
+ return reservedWord();
+
+ auto it = KeywordMap->find(tokenText);
+ if (it == KeywordMap->end()) {
+ // Should have an identifier of some sort
+ return identifierOrType();
+ }
+ keyword = it->second;
+
+ switch (keyword) {
+ case CONST:
+ case UNIFORM:
+ case IN:
+ case OUT:
+ case INOUT:
+ case BREAK:
+ case CONTINUE:
+ case DO:
+ case FOR:
+ case WHILE:
+ case IF:
+ case ELSE:
+ case DISCARD:
+ case RETURN:
+ case CASE:
+ return keyword;
+
+ case STRUCT:
+ afterStruct = true;
+ return keyword;
+
+ case NONUNIFORM:
+ if (parseContext.extensionTurnedOn(E_GL_EXT_nonuniform_qualifier))
+ return keyword;
+ else
+ return identifierOrType();
+
+ case SWITCH:
+ case DEFAULT:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ reservedWord();
+ return keyword;
+
+ case VOID:
+ case BOOL:
+ case FLOAT:
+ case INT:
+ case BVEC2:
+ case BVEC3:
+ case BVEC4:
+ case VEC2:
+ case VEC3:
+ case VEC4:
+ case IVEC2:
+ case IVEC3:
+ case IVEC4:
+ case MAT2:
+ case MAT3:
+ case MAT4:
+ case SAMPLER2D:
+ case SAMPLERCUBE:
+ afterType = true;
+ return keyword;
+
+ case BOOLCONSTANT:
+ if (strcmp("true", tokenText) == 0)
+ parserToken->sType.lex.b = true;
+ else
+ parserToken->sType.lex.b = false;
+ return keyword;
+
+ case ATTRIBUTE:
+ case VARYING:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300)
+ reservedWord();
+ return keyword;
+
+ case BUFFER:
+ afterBuffer = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version < 310) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 430))
+ return identifierOrType();
+ return keyword;
+
+#ifdef NV_EXTENSIONS
+ case PAYLOADNV:
+ case PAYLOADINNV:
+ case HITATTRNV:
+ case CALLDATANV:
+ case CALLDATAINNV:
+ case ACCSTRUCTNV:
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 460
+ && parseContext.extensionTurnedOn(E_GL_NV_ray_tracing)))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case ATOMIC_UINT:
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 310) ||
+ parseContext.extensionTurnedOn(E_GL_ARB_shader_atomic_counters))
+ return keyword;
+ return es30ReservedFromGLSL(420);
+
+ case COHERENT:
+ case DEVICECOHERENT:
+ case QUEUEFAMILYCOHERENT:
+ case WORKGROUPCOHERENT:
+ case SUBGROUPCOHERENT:
+ case NONPRIVATE:
+ case RESTRICT:
+ case READONLY:
+ case WRITEONLY:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ return es30ReservedFromGLSL(parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store) ? 130 : 420);
+
+ case VOLATILE:
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ if (! parseContext.symbolTable.atBuiltInLevel() && (parseContext.profile == EEsProfile ||
+ (parseContext.version < 420 && ! parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))))
+ reservedWord();
+ return keyword;
+
+ case LAYOUT:
+ {
+ const int numLayoutExts = 2;
+ const char* layoutExts[numLayoutExts] = { E_GL_ARB_shading_language_420pack,
+ E_GL_ARB_explicit_attrib_location };
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 140 &&
+ ! parseContext.extensionsTurnedOn(numLayoutExts, layoutExts)))
+ return identifierOrType();
+ return keyword;
+ }
+ case SHARED:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 140))
+ return identifierOrType();
+ return keyword;
+
+ case PATCH:
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile == EEsProfile &&
+ (parseContext.version >= 320 ||
+ parseContext.extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))) ||
+ (parseContext.profile != EEsProfile && parseContext.extensionTurnedOn(E_GL_ARB_tessellation_shader)))
+ return keyword;
+
+ return es30ReservedFromGLSL(400);
+
+ case SAMPLE:
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(1, &E_GL_OES_shader_multisample_interpolation))
+ return keyword;
+ return es30ReservedFromGLSL(400);
+
+ case SUBROUTINE:
+ return es30ReservedFromGLSL(400);
+
+ case HIGH_PRECISION:
+ case MEDIUM_PRECISION:
+ case LOW_PRECISION:
+ case PRECISION:
+ return precisionKeyword();
+
+ case MAT2X2:
+ case MAT2X3:
+ case MAT2X4:
+ case MAT3X2:
+ case MAT3X3:
+ case MAT3X4:
+ case MAT4X2:
+ case MAT4X3:
+ case MAT4X4:
+ return matNxM();
+
+ case DMAT2:
+ case DMAT3:
+ case DMAT4:
+ case DMAT2X2:
+ case DMAT2X3:
+ case DMAT2X4:
+ case DMAT3X2:
+ case DMAT3X3:
+ case DMAT3X4:
+ case DMAT4X2:
+ case DMAT4X3:
+ case DMAT4X4:
+ return dMat();
+
+ case IMAGE1D:
+ case IIMAGE1D:
+ case UIMAGE1D:
+ case IMAGE1DARRAY:
+ case IIMAGE1DARRAY:
+ case UIMAGE1DARRAY:
+ case IMAGE2DRECT:
+ case IIMAGE2DRECT:
+ case UIMAGE2DRECT:
+ afterType = true;
+ return firstGenerationImage(false);
+
+ case IMAGEBUFFER:
+ case IIMAGEBUFFER:
+ case UIMAGEBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return firstGenerationImage(false);
+
+ case IMAGE2D:
+ case IIMAGE2D:
+ case UIMAGE2D:
+ case IMAGE3D:
+ case IIMAGE3D:
+ case UIMAGE3D:
+ case IMAGECUBE:
+ case IIMAGECUBE:
+ case UIMAGECUBE:
+ case IMAGE2DARRAY:
+ case IIMAGE2DARRAY:
+ case UIMAGE2DARRAY:
+ afterType = true;
+ return firstGenerationImage(true);
+
+ case IMAGECUBEARRAY:
+ case IIMAGECUBEARRAY:
+ case UIMAGECUBEARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
+ return keyword;
+ return secondGenerationImage();
+
+ case IMAGE2DMS:
+ case IIMAGE2DMS:
+ case UIMAGE2DMS:
+ case IMAGE2DMSARRAY:
+ case IIMAGE2DMSARRAY:
+ case UIMAGE2DMSARRAY:
+ afterType = true;
+ return secondGenerationImage();
+
+ case DOUBLE:
+ case DVEC2:
+ case DVEC3:
+ case DVEC4:
+ afterType = true;
+ if (parseContext.profile == EEsProfile || parseContext.version < 400)
+ reservedWord();
+ return keyword;
+
+ case INT64_T:
+ case UINT64_T:
+ case I64VEC2:
+ case I64VEC3:
+ case I64VEC4:
+ case U64VEC2:
+ case U64VEC3:
+ case U64VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64))))
+ return keyword;
+ return identifierOrType();
+
+ case INT8_T:
+ case UINT8_T:
+ case I8VEC2:
+ case I8VEC3:
+ case I8VEC4:
+ case U8VEC2:
+ case U8VEC3:
+ case U8VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_8bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case INT16_T:
+ case UINT16_T:
+ case I16VEC2:
+ case I16VEC3:
+ case I16VEC4:
+ case U16VEC2:
+ case U16VEC3:
+ case U16VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_int16) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16))))
+ return keyword;
+ return identifierOrType();
+ case INT32_T:
+ case UINT32_T:
+ case I32VEC2:
+ case I32VEC3:
+ case I32VEC4:
+ case U32VEC2:
+ case U32VEC3:
+ case U32VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+ case FLOAT32_T:
+ case F32VEC2:
+ case F32VEC3:
+ case F32VEC4:
+ case F32MAT2:
+ case F32MAT3:
+ case F32MAT4:
+ case F32MAT2X2:
+ case F32MAT2X3:
+ case F32MAT2X4:
+ case F32MAT3X2:
+ case F32MAT3X3:
+ case F32MAT3X4:
+ case F32MAT4X2:
+ case F32MAT4X3:
+ case F32MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case FLOAT64_T:
+ case F64VEC2:
+ case F64VEC3:
+ case F64VEC4:
+ case F64MAT2:
+ case F64MAT3:
+ case F64MAT4:
+ case F64MAT2X2:
+ case F64MAT2X3:
+ case F64MAT2X4:
+ case F64MAT3X2:
+ case F64MAT3X3:
+ case F64MAT3X4:
+ case F64MAT4X2:
+ case F64MAT4X3:
+ case F64MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ ((parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64)) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+
+ case FLOAT16_T:
+ case F16VEC2:
+ case F16VEC3:
+ case F16VEC4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16))))
+ return keyword;
+
+ return identifierOrType();
+
+ case F16MAT2:
+ case F16MAT3:
+ case F16MAT4:
+ case F16MAT2X2:
+ case F16MAT2X3:
+ case F16MAT2X4:
+ case F16MAT3X2:
+ case F16MAT3X3:
+ case F16MAT3X4:
+ case F16MAT4X2:
+ case F16MAT4X3:
+ case F16MAT4X4:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ (
+#ifdef AMD_EXTENSIONS
+ parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) ||
+#endif
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) ||
+ parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16))))
+ return keyword;
+
+ return identifierOrType();
+
+ case SAMPLERCUBEARRAY:
+ case SAMPLERCUBEARRAYSHADOW:
+ case ISAMPLERCUBEARRAY:
+ case USAMPLERCUBEARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
+ return keyword;
+ if (parseContext.profile == EEsProfile || (parseContext.version < 400 && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_cube_map_array)))
+ reservedWord();
+ return keyword;
+
+ case ISAMPLER1D:
+ case ISAMPLER1DARRAY:
+ case SAMPLER1DARRAYSHADOW:
+ case USAMPLER1D:
+ case USAMPLER1DARRAY:
+ afterType = true;
+ return es30ReservedFromGLSL(130);
+
+ case UINT:
+ case UVEC2:
+ case UVEC3:
+ case UVEC4:
+ case SAMPLERCUBESHADOW:
+ case SAMPLER2DARRAY:
+ case SAMPLER2DARRAYSHADOW:
+ case ISAMPLER2D:
+ case ISAMPLER3D:
+ case ISAMPLERCUBE:
+ case ISAMPLER2DARRAY:
+ case USAMPLER2D:
+ case USAMPLER3D:
+ case USAMPLERCUBE:
+ case USAMPLER2DARRAY:
+ afterType = true;
+ return nonreservedKeyword(300, 130);
+
+ case ISAMPLER2DRECT:
+ case USAMPLER2DRECT:
+ afterType = true;
+ return es30ReservedFromGLSL(140);
+
+ case SAMPLERBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return es30ReservedFromGLSL(130);
+
+ case ISAMPLERBUFFER:
+ case USAMPLERBUFFER:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
+ return keyword;
+ return es30ReservedFromGLSL(140);
+
+ case SAMPLER2DMS:
+ case ISAMPLER2DMS:
+ case USAMPLER2DMS:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310)
+ return keyword;
+ return es30ReservedFromGLSL(150);
+
+ case SAMPLER2DMSARRAY:
+ case ISAMPLER2DMSARRAY:
+ case USAMPLER2DMSARRAY:
+ afterType = true;
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionsTurnedOn(1, &E_GL_OES_texture_storage_multisample_2d_array))
+ return keyword;
+ return es30ReservedFromGLSL(150);
+
+ case SAMPLER1D:
+ case SAMPLER1DSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile)
+ reservedWord();
+ return keyword;
+
+ case SAMPLER3D:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version < 300) {
+ if (!parseContext.extensionTurnedOn(E_GL_OES_texture_3D))
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER2DSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version < 300) {
+ if (!parseContext.extensionTurnedOn(E_GL_EXT_shadow_samplers))
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER2DRECT:
+ case SAMPLER2DRECTSHADOW:
+ afterType = true;
+ if (parseContext.profile == EEsProfile)
+ reservedWord();
+ else if (parseContext.version < 140 && ! parseContext.symbolTable.atBuiltInLevel() && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_rectangle)) {
+ if (parseContext.relaxedErrors())
+ parseContext.requireExtensions(loc, 1, &E_GL_ARB_texture_rectangle, "texture-rectangle sampler keyword");
+ else
+ reservedWord();
+ }
+ return keyword;
+
+ case SAMPLER1DARRAY:
+ afterType = true;
+ if (parseContext.profile == EEsProfile && parseContext.version == 300)
+ reservedWord();
+ else if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ return identifierOrType();
+ return keyword;
+
+ case SAMPLEREXTERNALOES:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external) ||
+ parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external_essl3))
+ return keyword;
+ return identifierOrType();
+
+ case SAMPLEREXTERNAL2DY2YEXT:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_EXT_YUV_target))
+ return keyword;
+ return identifierOrType();
+
+ case TEXTURE2D:
+ case TEXTURECUBE:
+ case TEXTURECUBEARRAY:
+ case ITEXTURECUBEARRAY:
+ case UTEXTURECUBEARRAY:
+ case ITEXTURE1DARRAY:
+ case UTEXTURE1D:
+ case ITEXTURE1D:
+ case UTEXTURE1DARRAY:
+ case TEXTUREBUFFER:
+ case TEXTURE2DARRAY:
+ case ITEXTURE2D:
+ case ITEXTURE3D:
+ case ITEXTURECUBE:
+ case ITEXTURE2DARRAY:
+ case UTEXTURE2D:
+ case UTEXTURE3D:
+ case UTEXTURECUBE:
+ case UTEXTURE2DARRAY:
+ case ITEXTURE2DRECT:
+ case UTEXTURE2DRECT:
+ case ITEXTUREBUFFER:
+ case UTEXTUREBUFFER:
+ case TEXTURE2DMS:
+ case ITEXTURE2DMS:
+ case UTEXTURE2DMS:
+ case TEXTURE2DMSARRAY:
+ case ITEXTURE2DMSARRAY:
+ case UTEXTURE2DMSARRAY:
+ case TEXTURE1D:
+ case TEXTURE3D:
+ case TEXTURE2DRECT:
+ case TEXTURE1DARRAY:
+ case SAMPLER:
+ case SAMPLERSHADOW:
+ if (parseContext.spvVersion.vulkan > 0)
+ return keyword;
+ else
+ return identifierOrType();
+
+ case SUBPASSINPUT:
+ case SUBPASSINPUTMS:
+ case ISUBPASSINPUT:
+ case ISUBPASSINPUTMS:
+ case USUBPASSINPUT:
+ case USUBPASSINPUTMS:
+ if (parseContext.spvVersion.vulkan > 0)
+ return keyword;
+ else
+ return identifierOrType();
+
+#ifdef AMD_EXTENSIONS
+ case F16SAMPLER1D:
+ case F16SAMPLER2D:
+ case F16SAMPLER3D:
+ case F16SAMPLER2DRECT:
+ case F16SAMPLERCUBE:
+ case F16SAMPLER1DARRAY:
+ case F16SAMPLER2DARRAY:
+ case F16SAMPLERCUBEARRAY:
+ case F16SAMPLERBUFFER:
+ case F16SAMPLER2DMS:
+ case F16SAMPLER2DMSARRAY:
+ case F16SAMPLER1DSHADOW:
+ case F16SAMPLER2DSHADOW:
+ case F16SAMPLER1DARRAYSHADOW:
+ case F16SAMPLER2DARRAYSHADOW:
+ case F16SAMPLER2DRECTSHADOW:
+ case F16SAMPLERCUBESHADOW:
+ case F16SAMPLERCUBEARRAYSHADOW:
+
+ case F16IMAGE1D:
+ case F16IMAGE2D:
+ case F16IMAGE3D:
+ case F16IMAGE2DRECT:
+ case F16IMAGECUBE:
+ case F16IMAGE1DARRAY:
+ case F16IMAGE2DARRAY:
+ case F16IMAGECUBEARRAY:
+ case F16IMAGEBUFFER:
+ case F16IMAGE2DMS:
+ case F16IMAGE2DMSARRAY:
+
+ case F16TEXTURE1D:
+ case F16TEXTURE2D:
+ case F16TEXTURE3D:
+ case F16TEXTURE2DRECT:
+ case F16TEXTURECUBE:
+ case F16TEXTURE1DARRAY:
+ case F16TEXTURE2DARRAY:
+ case F16TEXTURECUBEARRAY:
+ case F16TEXTUREBUFFER:
+ case F16TEXTURE2DMS:
+ case F16TEXTURE2DMSARRAY:
+
+ case F16SUBPASSINPUT:
+ case F16SUBPASSINPUTMS:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float_fetch) &&
+ parseContext.profile != EEsProfile && parseContext.version >= 450))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case NOPERSPECTIVE:
+#ifdef NV_EXTENSIONS
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300 &&
+ parseContext.extensionTurnedOn(E_GL_NV_shader_noperspective_interpolation))
+ return keyword;
+#endif
+ return es30ReservedFromGLSL(130);
+
+ case SMOOTH:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 130))
+ return identifierOrType();
+ return keyword;
+
+#ifdef AMD_EXTENSIONS
+ case EXPLICITINTERPAMD:
+ if (parseContext.profile != EEsProfile && parseContext.version >= 450 &&
+ parseContext.extensionTurnedOn(E_GL_AMD_shader_explicit_vertex_parameter))
+ return keyword;
+ return identifierOrType();
+#endif
+
+#ifdef NV_EXTENSIONS
+ case PERVERTEXNV:
+ if (((parseContext.profile != EEsProfile && parseContext.version >= 450) ||
+ (parseContext.profile == EEsProfile && parseContext.version >= 320)) &&
+ parseContext.extensionTurnedOn(E_GL_NV_fragment_shader_barycentric))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case FLAT:
+ if (parseContext.profile == EEsProfile && parseContext.version < 300)
+ reservedWord();
+ else if (parseContext.profile != EEsProfile && parseContext.version < 130)
+ return identifierOrType();
+ return keyword;
+
+ case CENTROID:
+ if (parseContext.version < 120)
+ return identifierOrType();
+ return keyword;
+
+ case PRECISE:
+ if ((parseContext.profile == EEsProfile &&
+ (parseContext.version >= 320 || parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5))) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 400))
+ return keyword;
+ if (parseContext.profile == EEsProfile && parseContext.version == 310) {
+ reservedWord();
+ return keyword;
+ }
+ return identifierOrType();
+
+ case INVARIANT:
+ if (parseContext.profile != EEsProfile && parseContext.version < 120)
+ return identifierOrType();
+ return keyword;
+
+ case PACKED:
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < 330))
+ return reservedWord();
+ return identifierOrType();
+
+ case RESOURCE:
+ {
+ bool reserved = (parseContext.profile == EEsProfile && parseContext.version >= 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 420);
+ return identifierOrReserved(reserved);
+ }
+ case SUPERP:
+ {
+ bool reserved = parseContext.profile == EEsProfile || parseContext.version >= 130;
+ return identifierOrReserved(reserved);
+ }
+
+#ifdef NV_EXTENSIONS
+ case PERPRIMITIVENV:
+ case PERVIEWNV:
+ case PERTASKNV:
+ if ((parseContext.profile != EEsProfile && parseContext.version >= 450) ||
+ (parseContext.profile == EEsProfile && parseContext.version >= 320) ||
+ parseContext.extensionTurnedOn(E_GL_NV_mesh_shader))
+ return keyword;
+ return identifierOrType();
+#endif
+
+ case FCOOPMATNV:
+ afterType = true;
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ parseContext.extensionTurnedOn(E_GL_NV_cooperative_matrix))
+ return keyword;
+ return identifierOrType();
+
+ default:
+ parseContext.infoSink.info.message(EPrefixInternalError, "Unknown glslang keyword", loc);
+ return 0;
+ }
+}
+
+int TScanContext::identifierOrType()
+{
+ parserToken->sType.lex.string = NewPoolTString(tokenText);
+ if (field)
+ return IDENTIFIER;
+
+ parserToken->sType.lex.symbol = parseContext.symbolTable.find(*parserToken->sType.lex.string);
+ if ((afterType == false && afterStruct == false) && parserToken->sType.lex.symbol != nullptr) {
+ if (const TVariable* variable = parserToken->sType.lex.symbol->getAsVariable()) {
+ if (variable->isUserType() &&
+ // treat redeclaration of forward-declared buffer/uniform reference as an identifier
+ !(variable->getType().getBasicType() == EbtReference && afterBuffer)) {
+ afterType = true;
+
+ return TYPE_NAME;
+ }
+ }
+ }
+
+ return IDENTIFIER;
+}
+
+// Give an error for use of a reserved symbol.
+// However, allow built-in declarations to use reserved words, to allow
+// extension support before the extension is enabled.
+int TScanContext::reservedWord()
+{
+ if (! parseContext.symbolTable.atBuiltInLevel())
+ parseContext.error(loc, "Reserved word.", tokenText, "", "");
+
+ return 0;
+}
+
+int TScanContext::identifierOrReserved(bool reserved)
+{
+ if (reserved) {
+ reservedWord();
+
+ return 0;
+ }
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future reserved keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+// For keywords that suddenly showed up on non-ES (not previously reserved)
+// but then got reserved by ES 3.0.
+int TScanContext::es30ReservedFromGLSL(int version)
+{
+ if (parseContext.symbolTable.atBuiltInLevel())
+ return keyword;
+
+ if ((parseContext.profile == EEsProfile && parseContext.version < 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version < version)) {
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "future reserved word in ES 300 and keyword in GLSL", tokenText, "");
+
+ return identifierOrType();
+ } else if (parseContext.profile == EEsProfile && parseContext.version >= 300)
+ reservedWord();
+
+ return keyword;
+}
+
+// For a keyword that was never reserved, until it suddenly
+// showed up, both in an es version and a non-ES version.
+int TScanContext::nonreservedKeyword(int esVersion, int nonEsVersion)
+{
+ if ((parseContext.profile == EEsProfile && parseContext.version < esVersion) ||
+ (parseContext.profile != EEsProfile && parseContext.version < nonEsVersion)) {
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future keyword", tokenText, "");
+
+ return identifierOrType();
+ }
+
+ return keyword;
+}
+
+int TScanContext::precisionKeyword()
+{
+ if (parseContext.profile == EEsProfile || parseContext.version >= 130)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using ES precision qualifier keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::matNxM()
+{
+ afterType = true;
+
+ if (parseContext.version > 110)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future non-square matrix type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::dMat()
+{
+ afterType = true;
+
+ if (parseContext.profile == EEsProfile && parseContext.version >= 300) {
+ reservedWord();
+
+ return keyword;
+ }
+
+ if (parseContext.profile != EEsProfile && parseContext.version >= 400)
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::firstGenerationImage(bool inEs310)
+{
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile && (parseContext.version >= 420 ||
+ parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))) ||
+ (inEs310 && parseContext.profile == EEsProfile && parseContext.version >= 310))
+ return keyword;
+
+ if ((parseContext.profile == EEsProfile && parseContext.version >= 300) ||
+ (parseContext.profile != EEsProfile && parseContext.version >= 130)) {
+ reservedWord();
+
+ return keyword;
+ }
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+int TScanContext::secondGenerationImage()
+{
+ if (parseContext.profile == EEsProfile && parseContext.version >= 310) {
+ reservedWord();
+ return keyword;
+ }
+
+ if (parseContext.symbolTable.atBuiltInLevel() ||
+ (parseContext.profile != EEsProfile &&
+ (parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))))
+ return keyword;
+
+ if (parseContext.forwardCompatible)
+ parseContext.warn(loc, "using future type keyword", tokenText, "");
+
+ return identifierOrType();
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Scan.h b/src/3rdparty/glslang/glslang/MachineIndependent/Scan.h
new file mode 100644
index 0000000..24b75cf
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Scan.h
@@ -0,0 +1,276 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _GLSLANG_SCAN_INCLUDED_
+#define _GLSLANG_SCAN_INCLUDED_
+
+#include "Versions.h"
+
+namespace glslang {
+
+// Use a global end-of-input character, so no translation is needed across
+// layers of encapsulation. Characters are all 8 bit, and positive, so there is
+// no aliasing of character 255 onto -1, for example.
+const int EndOfInput = -1;
+
+//
+// A character scanner that seamlessly, on read-only strings, reads across an
+// array of strings without assuming null termination.
+//
+class TInputScanner {
+public:
+ TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr,
+ int b = 0, int f = 0, bool single = false) :
+ numSources(n),
+ // up to this point, common usage is "char*", but now we need positive 8-bit characters
+ sources(reinterpret_cast<const unsigned char* const *>(s)),
+ lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single),
+ endOfFileReached(false)
+ {
+ loc = new TSourceLoc[numSources];
+ for (int i = 0; i < numSources; ++i) {
+ loc[i].init(i - stringBias);
+ }
+ if (names != nullptr) {
+ for (int i = 0; i < numSources; ++i)
+ loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr;
+ }
+ loc[currentSource].line = 1;
+ logicalSourceLoc.init(1);
+ logicalSourceLoc.name = loc[0].name;
+ }
+
+ virtual ~TInputScanner()
+ {
+ delete [] loc;
+ }
+
+ // retrieve the next character and advance one character
+ int get()
+ {
+ int ret = peek();
+ if (ret == EndOfInput)
+ return ret;
+ ++loc[currentSource].column;
+ ++logicalSourceLoc.column;
+ if (ret == '\n') {
+ ++loc[currentSource].line;
+ ++logicalSourceLoc.line;
+ logicalSourceLoc.column = 0;
+ loc[currentSource].column = 0;
+ }
+ advance();
+
+ return ret;
+ }
+
+ // retrieve the next character, no advance
+ int peek()
+ {
+ if (currentSource >= numSources) {
+ endOfFileReached = true;
+ return EndOfInput;
+ }
+ // Make sure we do not read off the end of a string.
+ // N.B. Sources can have a length of 0.
+ int sourceToRead = currentSource;
+ size_t charToRead = currentChar;
+ while(charToRead >= lengths[sourceToRead]) {
+ charToRead = 0;
+ sourceToRead += 1;
+ if (sourceToRead >= numSources) {
+ return EndOfInput;
+ }
+ }
+
+ // Here, we care about making negative valued characters positive
+ return sources[sourceToRead][charToRead];
+ }
+
+ // go back one character
+ void unget()
+ {
+ // Do not roll back once we've reached the end of the file.
+ if (endOfFileReached)
+ return;
+
+ if (currentChar > 0) {
+ --currentChar;
+ --loc[currentSource].column;
+ --logicalSourceLoc.column;
+ if (loc[currentSource].column < 0) {
+ // We've moved back past a new line. Find the
+ // previous newline (or start of the file) to compute
+ // the column count on the now current line.
+ size_t chIndex = currentChar;
+ while (chIndex > 0) {
+ if (sources[currentSource][chIndex] == '\n') {
+ break;
+ }
+ --chIndex;
+ }
+ logicalSourceLoc.column = (int)(currentChar - chIndex);
+ loc[currentSource].column = (int)(currentChar - chIndex);
+ }
+ } else {
+ do {
+ --currentSource;
+ } while (currentSource > 0 && lengths[currentSource] == 0);
+ if (lengths[currentSource] == 0) {
+ // set to 0 if we've backed up to the start of an empty string
+ currentChar = 0;
+ } else
+ currentChar = lengths[currentSource] - 1;
+ }
+ if (peek() == '\n') {
+ --loc[currentSource].line;
+ --logicalSourceLoc.line;
+ }
+ }
+
+ // for #line override
+ void setLine(int newLine)
+ {
+ logicalSourceLoc.line = newLine;
+ loc[getLastValidSourceIndex()].line = newLine;
+ }
+
+ // for #line override in filename based parsing
+ void setFile(const char* filename)
+ {
+ TString* fn_tstr = NewPoolTString(filename);
+ logicalSourceLoc.name = fn_tstr;
+ loc[getLastValidSourceIndex()].name = fn_tstr;
+ }
+
+ void setFile(const char* filename, int i)
+ {
+ TString* fn_tstr = NewPoolTString(filename);
+ if (i == getLastValidSourceIndex()) {
+ logicalSourceLoc.name = fn_tstr;
+ }
+ loc[i].name = fn_tstr;
+ }
+
+ void setString(int newString)
+ {
+ logicalSourceLoc.string = newString;
+ loc[getLastValidSourceIndex()].string = newString;
+ logicalSourceLoc.name = nullptr;
+ loc[getLastValidSourceIndex()].name = nullptr;
+ }
+
+ // for #include content indentation
+ void setColumn(int col)
+ {
+ logicalSourceLoc.column = col;
+ loc[getLastValidSourceIndex()].column = col;
+ }
+
+ void setEndOfInput()
+ {
+ endOfFileReached = true;
+ currentSource = numSources;
+ }
+
+ bool atEndOfInput() const { return endOfFileReached; }
+
+ const TSourceLoc& getSourceLoc() const
+ {
+ if (singleLogical) {
+ return logicalSourceLoc;
+ } else {
+ return loc[std::max(0, std::min(currentSource, numSources - finale - 1))];
+ }
+ }
+ // Returns the index (starting from 0) of the most recent valid source string we are reading from.
+ int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); }
+
+ void consumeWhiteSpace(bool& foundNonSpaceTab);
+ bool consumeComment();
+ void consumeWhitespaceComment(bool& foundNonSpaceTab);
+ bool scanVersion(int& version, EProfile& profile, bool& notFirstToken);
+
+protected:
+
+ // advance one character
+ void advance()
+ {
+ ++currentChar;
+ if (currentChar >= lengths[currentSource]) {
+ ++currentSource;
+ if (currentSource < numSources) {
+ loc[currentSource].string = loc[currentSource - 1].string + 1;
+ loc[currentSource].line = 1;
+ loc[currentSource].column = 0;
+ }
+ while (currentSource < numSources && lengths[currentSource] == 0) {
+ ++currentSource;
+ if (currentSource < numSources) {
+ loc[currentSource].string = loc[currentSource - 1].string + 1;
+ loc[currentSource].line = 1;
+ loc[currentSource].column = 0;
+ }
+ }
+ currentChar = 0;
+ }
+ }
+
+ int numSources; // number of strings in source
+ const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput
+ const size_t *lengths; // length of each string
+ int currentSource;
+ size_t currentChar;
+
+ // This is for reporting what string/line an error occurred on, and can be overridden by #line.
+ // It remembers the last state of each source string as it is left for the next one, so unget()
+ // can restore that state.
+ TSourceLoc* loc; // an array
+
+ int stringBias; // the first string that is the user's string number 0
+ int finale; // number of internal strings after user's last string
+
+ TSourceLoc logicalSourceLoc;
+ bool singleLogical; // treats the strings as a single logical string.
+ // locations will be reported from the first string.
+
+ // Set to true once peek() returns EndOfFile, so that we won't roll back
+ // once we've reached EndOfFile.
+ bool endOfFileReached;
+};
+
+} // end namespace glslang
+
+#endif // _GLSLANG_SCAN_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/ScanContext.h b/src/3rdparty/glslang/glslang/MachineIndependent/ScanContext.h
new file mode 100644
index 0000000..74b2b3c
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/ScanContext.h
@@ -0,0 +1,93 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This holds context specific to the GLSL scanner, which
+// sits between the preprocessor scanner and parser.
+//
+
+#pragma once
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+class TPpContext;
+class TPpToken;
+class TParserToken;
+
+class TScanContext {
+public:
+ explicit TScanContext(TParseContextBase& pc) :
+ parseContext(pc),
+ afterType(false), afterStruct(false),
+ field(false), afterBuffer(false) { }
+ virtual ~TScanContext() { }
+
+ static void fillInKeywordMap();
+ static void deleteKeywordMap();
+
+ int tokenize(TPpContext*, TParserToken&);
+
+protected:
+ TScanContext(TScanContext&);
+ TScanContext& operator=(TScanContext&);
+
+ int tokenizeIdentifier();
+ int identifierOrType();
+ int reservedWord();
+ int identifierOrReserved(bool reserved);
+ int es30ReservedFromGLSL(int version);
+ int nonreservedKeyword(int esVersion, int nonEsVersion);
+ int precisionKeyword();
+ int matNxM();
+ int dMat();
+ int firstGenerationImage(bool inEs310);
+ int secondGenerationImage();
+
+ TParseContextBase& parseContext;
+ bool afterType; // true if we've recognized a type, so can only be looking for an identifier
+ bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier
+ bool field; // true if we're on a field, right after a '.'
+ bool afterBuffer; // true if we've recognized the BUFFER keyword
+ TSourceLoc loc;
+ TParserToken* parserToken;
+ TPpToken* ppToken;
+
+ const char* tokenText;
+ int keyword;
+};
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp
new file mode 100644
index 0000000..adfe534
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/ShaderLang.cpp
@@ -0,0 +1,2041 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement the top-level of interface to the compiler/linker,
+// as defined in ShaderLang.h
+// This is the platform independent interface between an OGL driver
+// and the shading language compiler/linker.
+//
+#include <cstring>
+#include <iostream>
+#include <sstream>
+#include <memory>
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "Scan.h"
+#include "ScanContext.h"
+
+#ifdef ENABLE_HLSL
+#include "../../hlsl/hlslParseHelper.h"
+#include "../../hlsl/hlslParseables.h"
+#include "../../hlsl/hlslScanContext.h"
+#endif
+
+#include "../Include/ShHandle.h"
+#include "../../OGLCompilersDLL/InitializeDll.h"
+
+#include "preprocessor/PpContext.h"
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "reflection.h"
+#include "iomapper.h"
+#include "Initialize.h"
+
+// TODO: this really shouldn't be here, it is only because of the trial addition
+// of printing pre-processed tokens, which requires knowing the string literal
+// token to print ", but none of that seems appropriate for this file.
+#include "preprocessor/PpTokens.h"
+
+namespace { // anonymous namespace for file-local functions and symbols
+
+// Total number of successful initializers of glslang: a refcount
+// Shared global; access should be protected by a global mutex/critical section.
+int NumberOfClients = 0;
+
+using namespace glslang;
+
+// Create a language specific version of parseables.
+TBuiltInParseables* CreateBuiltInParseables(TInfoSink& infoSink, EShSource source)
+{
+ switch (source) {
+ case EShSourceGlsl: return new TBuiltIns(); // GLSL builtIns
+#ifdef ENABLE_HLSL
+ case EShSourceHlsl: return new TBuiltInParseablesHlsl(); // HLSL intrinsics
+#endif
+
+ default:
+ infoSink.info.message(EPrefixInternalError, "Unable to determine source language");
+ return nullptr;
+ }
+}
+
+// Create a language specific version of a parse context.
+TParseContextBase* CreateParseContext(TSymbolTable& symbolTable, TIntermediate& intermediate,
+ int version, EProfile profile, EShSource source,
+ EShLanguage language, TInfoSink& infoSink,
+ SpvVersion spvVersion, bool forwardCompatible, EShMessages messages,
+ bool parsingBuiltIns, std::string sourceEntryPointName = "")
+{
+ switch (source) {
+ case EShSourceGlsl: {
+ if (sourceEntryPointName.size() == 0)
+ intermediate.setEntryPointName("main");
+ TString entryPoint = sourceEntryPointName.c_str();
+ return new TParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion,
+ language, infoSink, forwardCompatible, messages, &entryPoint);
+ }
+#ifdef ENABLE_HLSL
+ case EShSourceHlsl:
+ return new HlslParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion,
+ language, infoSink, sourceEntryPointName.c_str(), forwardCompatible, messages);
+#endif
+ default:
+ infoSink.info.message(EPrefixInternalError, "Unable to determine source language");
+ return nullptr;
+ }
+}
+
+// Local mapping functions for making arrays of symbol tables....
+
+const int VersionCount = 17; // index range in MapVersionToIndex
+
+int MapVersionToIndex(int version)
+{
+ int index = 0;
+
+ switch (version) {
+ case 100: index = 0; break;
+ case 110: index = 1; break;
+ case 120: index = 2; break;
+ case 130: index = 3; break;
+ case 140: index = 4; break;
+ case 150: index = 5; break;
+ case 300: index = 6; break;
+ case 330: index = 7; break;
+ case 400: index = 8; break;
+ case 410: index = 9; break;
+ case 420: index = 10; break;
+ case 430: index = 11; break;
+ case 440: index = 12; break;
+ case 310: index = 13; break;
+ case 450: index = 14; break;
+ case 500: index = 0; break; // HLSL
+ case 320: index = 15; break;
+ case 460: index = 16; break;
+ default: assert(0); break;
+ }
+
+ assert(index < VersionCount);
+
+ return index;
+}
+
+const int SpvVersionCount = 3; // index range in MapSpvVersionToIndex
+
+int MapSpvVersionToIndex(const SpvVersion& spvVersion)
+{
+ int index = 0;
+
+ if (spvVersion.openGl > 0)
+ index = 1;
+ else if (spvVersion.vulkan > 0)
+ index = 2;
+
+ assert(index < SpvVersionCount);
+
+ return index;
+}
+
+const int ProfileCount = 4; // index range in MapProfileToIndex
+
+int MapProfileToIndex(EProfile profile)
+{
+ int index = 0;
+
+ switch (profile) {
+ case ENoProfile: index = 0; break;
+ case ECoreProfile: index = 1; break;
+ case ECompatibilityProfile: index = 2; break;
+ case EEsProfile: index = 3; break;
+ default: break;
+ }
+
+ assert(index < ProfileCount);
+
+ return index;
+}
+
+const int SourceCount = 2;
+
+int MapSourceToIndex(EShSource source)
+{
+ int index = 0;
+
+ switch (source) {
+ case EShSourceGlsl: index = 0; break;
+ case EShSourceHlsl: index = 1; break;
+ default: break;
+ }
+
+ assert(index < SourceCount);
+
+ return index;
+}
+
+// only one of these needed for non-ES; ES needs 2 for different precision defaults of built-ins
+enum EPrecisionClass {
+ EPcGeneral,
+ EPcFragment,
+ EPcCount
+};
+
+// A process-global symbol table per version per profile for built-ins common
+// to multiple stages (languages), and a process-global symbol table per version
+// per profile per stage for built-ins unique to each stage. They will be sparsely
+// populated, so they will only be generated as needed.
+//
+// Each has a different set of built-ins, and we want to preserve that from
+// compile to compile.
+//
+TSymbolTable* CommonSymbolTable[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EPcCount] = {};
+TSymbolTable* SharedSymbolTables[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EShLangCount] = {};
+
+TPoolAllocator* PerProcessGPA = nullptr;
+
+//
+// Parse and add to the given symbol table the content of the given shader string.
+//
+bool InitializeSymbolTable(const TString& builtIns, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
+ EShSource source, TInfoSink& infoSink, TSymbolTable& symbolTable)
+{
+ TIntermediate intermediate(language, version, profile);
+
+ intermediate.setSource(source);
+
+ std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source,
+ language, infoSink, spvVersion, true, EShMsgDefault,
+ true));
+
+ TShader::ForbidIncluder includer;
+ TPpContext ppContext(*parseContext, "", includer);
+ TScanContext scanContext(*parseContext);
+ parseContext->setScanContext(&scanContext);
+ parseContext->setPpContext(&ppContext);
+
+ //
+ // Push the symbol table to give it an initial scope. This
+ // push should not have a corresponding pop, so that built-ins
+ // are preserved, and the test for an empty table fails.
+ //
+
+ symbolTable.push();
+
+ const char* builtInShaders[2];
+ size_t builtInLengths[2];
+ builtInShaders[0] = builtIns.c_str();
+ builtInLengths[0] = builtIns.size();
+
+ if (builtInLengths[0] == 0)
+ return true;
+
+ TInputScanner input(1, builtInShaders, builtInLengths);
+ if (! parseContext->parseShaderStrings(ppContext, input) != 0) {
+ infoSink.info.message(EPrefixInternalError, "Unable to parse built-ins");
+ printf("Unable to parse built-ins\n%s\n", infoSink.info.c_str());
+ printf("%s\n", builtInShaders[0]);
+
+ return false;
+ }
+
+ return true;
+}
+
+int CommonIndex(EProfile profile, EShLanguage language)
+{
+ return (profile == EEsProfile && language == EShLangFragment) ? EPcFragment : EPcGeneral;
+}
+
+//
+// To initialize per-stage shared tables, with the common table already complete.
+//
+void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int version, EProfile profile, const SpvVersion& spvVersion,
+ EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable,
+ TSymbolTable** symbolTables)
+{
+ (*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]);
+ InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source,
+ infoSink, *symbolTables[language]);
+ builtInParseables.identifyBuiltIns(version, profile, spvVersion, language, *symbolTables[language]);
+ if (profile == EEsProfile && version >= 300)
+ (*symbolTables[language]).setNoBuiltInRedeclarations();
+ if (version == 110)
+ (*symbolTables[language]).setSeparateNameSpaces();
+}
+
+//
+// Initialize the full set of shareable symbol tables;
+// The common (cross-stage) and those shareable per-stage.
+//
+bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
+{
+ std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
+
+ if (builtInParseables == nullptr)
+ return false;
+
+ builtInParseables->initialize(version, profile, spvVersion);
+
+ // do the common tables
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangVertex, source,
+ infoSink, *commonTable[EPcGeneral]);
+ if (profile == EEsProfile)
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangFragment, source,
+ infoSink, *commonTable[EPcFragment]);
+
+ // do the per-stage tables
+
+ // always have vertex and fragment
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangVertex, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for tessellation
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310)) {
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessControl, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessEvaluation, source,
+ infoSink, commonTable, symbolTables);
+ }
+
+ // check for geometry
+ if ((profile != EEsProfile && version >= 150) ||
+ (profile == EEsProfile && version >= 310))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for compute
+ if ((profile != EEsProfile && version >= 420) ||
+ (profile == EEsProfile && version >= 310))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source,
+ infoSink, commonTable, symbolTables);
+
+#ifdef NV_EXTENSIONS
+ // check for ray tracing stages
+ if (profile != EEsProfile && version >= 450) {
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGenNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangIntersectNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangAnyHitNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangClosestHitNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMissNV, source,
+ infoSink, commonTable, symbolTables);
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCallableNV, source,
+ infoSink, commonTable, symbolTables);
+ }
+
+ // check for mesh
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMeshNV, source,
+ infoSink, commonTable, symbolTables);
+
+ // check for task
+ if ((profile != EEsProfile && version >= 450) ||
+ (profile == EEsProfile && version >= 320))
+ InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source,
+ infoSink, commonTable, symbolTables);
+#endif
+
+ return true;
+}
+
+bool AddContextSpecificSymbols(const TBuiltInResource* resources, TInfoSink& infoSink, TSymbolTable& symbolTable, int version,
+ EProfile profile, const SpvVersion& spvVersion, EShLanguage language, EShSource source)
+{
+ std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
+
+ if (builtInParseables == nullptr)
+ return false;
+
+ builtInParseables->initialize(*resources, version, profile, spvVersion, language);
+ InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, language, source, infoSink, symbolTable);
+ builtInParseables->identifyBuiltIns(version, profile, spvVersion, language, symbolTable, *resources);
+
+ return true;
+}
+
+//
+// To do this on the fly, we want to leave the current state of our thread's
+// pool allocator intact, so:
+// - Switch to a new pool for parsing the built-ins
+// - Do the parsing, which builds the symbol table, using the new pool
+// - Switch to the process-global pool to save a copy of the resulting symbol table
+// - Free up the new pool used to parse the built-ins
+// - Switch back to the original thread's pool
+//
+// This only gets done the first time any thread needs a particular symbol table
+// (lazy evaluation).
+//
+void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
+{
+ TInfoSink infoSink;
+
+ // Make sure only one thread tries to do this at a time
+ glslang::GetGlobalLock();
+
+ // See if it's already been done for this version/profile combination
+ int versionIndex = MapVersionToIndex(version);
+ int spvVersionIndex = MapSpvVersionToIndex(spvVersion);
+ int profileIndex = MapProfileToIndex(profile);
+ int sourceIndex = MapSourceToIndex(source);
+ if (CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][EPcGeneral]) {
+ glslang::ReleaseGlobalLock();
+
+ return;
+ }
+
+ // Switch to a new pool
+ TPoolAllocator& previousAllocator = GetThreadPoolAllocator();
+ TPoolAllocator* builtInPoolAllocator = new TPoolAllocator;
+ SetThreadPoolAllocator(builtInPoolAllocator);
+
+ // Dynamically allocate the local symbol tables so we can control when they are deallocated WRT when the pool is popped.
+ TSymbolTable* commonTable[EPcCount];
+ TSymbolTable* stageTables[EShLangCount];
+ for (int precClass = 0; precClass < EPcCount; ++precClass)
+ commonTable[precClass] = new TSymbolTable;
+ for (int stage = 0; stage < EShLangCount; ++stage)
+ stageTables[stage] = new TSymbolTable;
+
+ // Generate the local symbol tables using the new pool
+ InitializeSymbolTables(infoSink, commonTable, stageTables, version, profile, spvVersion, source);
+
+ // Switch to the process-global pool
+ SetThreadPoolAllocator(PerProcessGPA);
+
+ // Copy the local symbol tables from the new pool to the global tables using the process-global pool
+ for (int precClass = 0; precClass < EPcCount; ++precClass) {
+ if (! commonTable[precClass]->isEmpty()) {
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass] = new TSymbolTable;
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->copyTable(*commonTable[precClass]);
+ CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->readOnly();
+ }
+ }
+ for (int stage = 0; stage < EShLangCount; ++stage) {
+ if (! stageTables[stage]->isEmpty()) {
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage] = new TSymbolTable;
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->adoptLevels(*CommonSymbolTable
+ [versionIndex][spvVersionIndex][profileIndex][sourceIndex][CommonIndex(profile, (EShLanguage)stage)]);
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->copyTable(*stageTables[stage]);
+ SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->readOnly();
+ }
+ }
+
+ // Clean up the local tables before deleting the pool they used.
+ for (int precClass = 0; precClass < EPcCount; ++precClass)
+ delete commonTable[precClass];
+ for (int stage = 0; stage < EShLangCount; ++stage)
+ delete stageTables[stage];
+
+ delete builtInPoolAllocator;
+ SetThreadPoolAllocator(&previousAllocator);
+
+ glslang::ReleaseGlobalLock();
+}
+
+// Return true if the shader was correctly specified for version/profile/stage.
+bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion,
+ EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion)
+{
+ const int FirstProfileVersion = 150;
+ bool correct = true;
+
+ if (source == EShSourceHlsl) {
+ version = 500; // shader model; currently a characteristic of glslang, not the input
+ profile = ECoreProfile; // allow doubles in prototype parsing
+ return correct;
+ }
+
+ // Get a version...
+ if (version == 0) {
+ version = defaultVersion;
+ // infoSink.info.message(EPrefixWarning, "#version: statement missing; use #version on first line of shader");
+ }
+
+ // Get a good profile...
+ if (profile == ENoProfile) {
+ if (version == 300 || version == 310 || version == 320) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 require specifying the 'es' profile");
+ profile = EEsProfile;
+ } else if (version == 100)
+ profile = EEsProfile;
+ else if (version >= FirstProfileVersion)
+ profile = ECoreProfile;
+ else
+ profile = ENoProfile;
+ } else {
+ // a profile was provided...
+ if (version < 150) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions before 150 do not allow a profile token");
+ if (version == 100)
+ profile = EEsProfile;
+ else
+ profile = ENoProfile;
+ } else if (version == 300 || version == 310 || version == 320) {
+ if (profile != EEsProfile) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 support only the es profile");
+ }
+ profile = EEsProfile;
+ } else {
+ if (profile == EEsProfile) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: only version 300, 310, and 320 support the es profile");
+ if (version >= FirstProfileVersion)
+ profile = ECoreProfile;
+ else
+ profile = ENoProfile;
+ }
+ // else: typical desktop case... e.g., "#version 410 core"
+ }
+ }
+
+ // Fix version...
+ switch (version) {
+ // ES versions
+ case 100: break;
+ case 300: break;
+ case 310: break;
+ case 320: break;
+
+ // desktop versions
+ case 110: break;
+ case 120: break;
+ case 130: break;
+ case 140: break;
+ case 150: break;
+ case 330: break;
+ case 400: break;
+ case 410: break;
+ case 420: break;
+ case 430: break;
+ case 440: break;
+ case 450: break;
+ case 460: break;
+
+ // unknown version
+ default:
+ correct = false;
+ infoSink.info.message(EPrefixError, "version not supported");
+ if (profile == EEsProfile)
+ version = 310;
+ else {
+ version = 450;
+ profile = ECoreProfile;
+ }
+ break;
+ }
+
+ // Correct for stage type...
+ switch (stage) {
+ case EShLangGeometry:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 150)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: geometry shaders require es profile with version 310 or non-es profile with version 150 or above");
+ version = (profile == EEsProfile) ? 310 : 150;
+ if (profile == EEsProfile || profile == ENoProfile)
+ profile = ECoreProfile;
+ }
+ break;
+ case EShLangTessControl:
+ case EShLangTessEvaluation:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 150)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: tessellation shaders require es profile with version 310 or non-es profile with version 150 or above");
+ version = (profile == EEsProfile) ? 310 : 400; // 150 supports the extension, correction is to 400 which does not
+ if (profile == EEsProfile || profile == ENoProfile)
+ profile = ECoreProfile;
+ }
+ break;
+ case EShLangCompute:
+ if ((profile == EEsProfile && version < 310) ||
+ (profile != EEsProfile && version < 420)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: compute shaders require es profile with version 310 or above, or non-es profile with version 420 or above");
+ version = profile == EEsProfile ? 310 : 420;
+ }
+ break;
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (profile == EEsProfile || version < 460) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ray tracing shaders require non-es profile with version 460 or above");
+ version = 460;
+ }
+ break;
+ case EShLangMeshNV:
+ case EShLangTaskNV:
+ if ((profile == EEsProfile && version < 320) ||
+ (profile != EEsProfile && version < 450)) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above");
+ version = profile == EEsProfile ? 320 : 450;
+ }
+#endif
+ default:
+ break;
+ }
+
+ if (profile == EEsProfile && version >= 300 && versionNotFirst) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: statement must appear first in es-profile shader; before comments or newlines");
+ }
+
+ // Check for SPIR-V compatibility
+ if (spvVersion.spv != 0) {
+ switch (profile) {
+ case EEsProfile:
+ if (spvVersion.vulkan > 0 && version < 310) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ES shaders for Vulkan SPIR-V require version 310 or higher");
+ version = 310;
+ }
+ if (spvVersion.openGl >= 100) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: ES shaders for OpenGL SPIR-V are not supported");
+ version = 310;
+ }
+ break;
+ case ECompatibilityProfile:
+ infoSink.info.message(EPrefixError, "#version: compilation for SPIR-V does not support the compatibility profile");
+ break;
+ default:
+ if (spvVersion.vulkan > 0 && version < 140) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: Desktop shaders for Vulkan SPIR-V require version 140 or higher");
+ version = 140;
+ }
+ if (spvVersion.openGl >= 100 && version < 330) {
+ correct = false;
+ infoSink.info.message(EPrefixError, "#version: Desktop shaders for OpenGL SPIR-V require version 330 or higher");
+ version = 330;
+ }
+ break;
+ }
+ }
+
+ return correct;
+}
+
+// There are multiple paths in for setting environment stuff.
+// TEnvironment takes precedence, for what it sets, so sort all this out.
+// Ideally, the internal code could be made to use TEnvironment, but for
+// now, translate it to the historically used parameters.
+void TranslateEnvironment(const TEnvironment* environment, EShMessages& messages, EShSource& source,
+ EShLanguage& stage, SpvVersion& spvVersion)
+{
+ // Set up environmental defaults, first ignoring 'environment'.
+ if (messages & EShMsgSpvRules)
+ spvVersion.spv = EShTargetSpv_1_0;
+ if (messages & EShMsgVulkanRules) {
+ spvVersion.vulkan = EShTargetVulkan_1_0;
+ spvVersion.vulkanGlsl = 100;
+ } else if (spvVersion.spv != 0)
+ spvVersion.openGl = 100;
+
+ // Now, override, based on any content set in 'environment'.
+ // 'environment' must be cleared to ESh*None settings when items
+ // are not being set.
+ if (environment != nullptr) {
+ // input language
+ if (environment->input.languageFamily != EShSourceNone) {
+ stage = environment->input.stage;
+ switch (environment->input.dialect) {
+ case EShClientNone:
+ break;
+ case EShClientVulkan:
+ spvVersion.vulkanGlsl = environment->input.dialectVersion;
+ break;
+ case EShClientOpenGL:
+ spvVersion.openGl = environment->input.dialectVersion;
+ break;
+ }
+ switch (environment->input.languageFamily) {
+ case EShSourceNone:
+ break;
+ case EShSourceGlsl:
+ source = EShSourceGlsl;
+ messages = static_cast<EShMessages>(messages & ~EShMsgReadHlsl);
+ break;
+ case EShSourceHlsl:
+ source = EShSourceHlsl;
+ messages = static_cast<EShMessages>(messages | EShMsgReadHlsl);
+ break;
+ }
+ }
+
+ // client
+ switch (environment->client.client) {
+ case EShClientVulkan:
+ spvVersion.vulkan = environment->client.version;
+ break;
+ default:
+ break;
+ }
+
+ // generated code
+ switch (environment->target.language) {
+ case EshTargetSpv:
+ spvVersion.spv = environment->target.version;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+// Most processes are recorded when set in the intermediate representation,
+// These are the few that are not.
+void RecordProcesses(TIntermediate& intermediate, EShMessages messages, const std::string& sourceEntryPointName)
+{
+ if ((messages & EShMsgRelaxedErrors) != 0)
+ intermediate.addProcess("relaxed-errors");
+ if ((messages & EShMsgSuppressWarnings) != 0)
+ intermediate.addProcess("suppress-warnings");
+ if ((messages & EShMsgKeepUncalled) != 0)
+ intermediate.addProcess("keep-uncalled");
+ if (sourceEntryPointName.size() > 0) {
+ intermediate.addProcess("source-entrypoint");
+ intermediate.addProcessArgument(sourceEntryPointName);
+ }
+}
+
+// This is the common setup and cleanup code for PreprocessDeferred and
+// CompileDeferred.
+// It takes any callable with a signature of
+// bool (TParseContextBase& parseContext, TPpContext& ppContext,
+// TInputScanner& input, bool versionWillBeError,
+// TSymbolTable& , TIntermediate& ,
+// EShOptimizationLevel , EShMessages );
+// Which returns false if a failure was detected and true otherwise.
+//
+template<typename ProcessingContext>
+bool ProcessDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* customPreamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop; this is the GLSL version, not SPIR-V or Vulkan
+ EProfile defaultProfile,
+ // set version/profile to defaultVersion/defaultProfile regardless of the #version
+ // directive in the source code
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TIntermediate& intermediate, // returned tree, etc.
+ ProcessingContext& processingContext,
+ bool requireNonempty,
+ TShader::Includer& includer,
+ const std::string sourceEntryPointName = "",
+ const TEnvironment* environment = nullptr) // optional way of fully setting all versions, overriding the above
+{
+ // This must be undone (.pop()) by the caller, after it finishes consuming the created tree.
+ GetThreadPoolAllocator().push();
+
+ if (numStrings == 0)
+ return true;
+
+ // Move to length-based strings, rather than null-terminated strings.
+ // Also, add strings to include the preamble and to ensure the shader is not null,
+ // which lets the grammar accept what was a null (post preprocessing) shader.
+ //
+ // Shader will look like
+ // string 0: system preamble
+ // string 1: custom preamble
+ // string 2...numStrings+1: user's shader
+ // string numStrings+2: "int;"
+ const int numPre = 2;
+ const int numPost = requireNonempty? 1 : 0;
+ const int numTotal = numPre + numStrings + numPost;
+ std::unique_ptr<size_t[]> lengths(new size_t[numTotal]);
+ std::unique_ptr<const char*[]> strings(new const char*[numTotal]);
+ std::unique_ptr<const char*[]> names(new const char*[numTotal]);
+ for (int s = 0; s < numStrings; ++s) {
+ strings[s + numPre] = shaderStrings[s];
+ if (inputLengths == nullptr || inputLengths[s] < 0)
+ lengths[s + numPre] = strlen(shaderStrings[s]);
+ else
+ lengths[s + numPre] = inputLengths[s];
+ }
+ if (stringNames != nullptr) {
+ for (int s = 0; s < numStrings; ++s)
+ names[s + numPre] = stringNames[s];
+ } else {
+ for (int s = 0; s < numStrings; ++s)
+ names[s + numPre] = nullptr;
+ }
+
+ // Get all the stages, languages, clients, and other environment
+ // stuff sorted out.
+ EShSource source = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
+ SpvVersion spvVersion;
+ EShLanguage stage = compiler->getLanguage();
+ TranslateEnvironment(environment, messages, source, stage, spvVersion);
+ if (environment != nullptr && environment->target.hlslFunctionality1)
+ intermediate.setHlslFunctionality1();
+
+ // First, without using the preprocessor or parser, find the #version, so we know what
+ // symbol tables, processing rules, etc. to set up. This does not need the extra strings
+ // outlined above, just the user shader, after the system and user preambles.
+ glslang::TInputScanner userInput(numStrings, &strings[numPre], &lengths[numPre]);
+ int version = 0;
+ EProfile profile = ENoProfile;
+ bool versionNotFirstToken = false;
+ bool versionNotFirst = (source == EShSourceHlsl)
+ ? true
+ : userInput.scanVersion(version, profile, versionNotFirstToken);
+ bool versionNotFound = version == 0;
+ if (forceDefaultVersionAndProfile && source == EShSourceGlsl) {
+ if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound &&
+ (version != defaultVersion || profile != defaultProfile)) {
+ compiler->infoSink.info << "Warning, (version, profile) forced to be ("
+ << defaultVersion << ", " << ProfileName(defaultProfile)
+ << "), while in source code it is ("
+ << version << ", " << ProfileName(profile) << ")\n";
+ }
+
+ if (versionNotFound) {
+ versionNotFirstToken = false;
+ versionNotFirst = false;
+ versionNotFound = false;
+ }
+ version = defaultVersion;
+ profile = defaultProfile;
+ }
+
+ bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage,
+ versionNotFirst, defaultVersion, source, version, profile, spvVersion);
+ bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst));
+ bool warnVersionNotFirst = false;
+ if (! versionWillBeError && versionNotFirstToken) {
+ if (messages & EShMsgRelaxedErrors)
+ warnVersionNotFirst = true;
+ else
+ versionWillBeError = true;
+ }
+
+ intermediate.setSource(source);
+ intermediate.setVersion(version);
+ intermediate.setProfile(profile);
+ intermediate.setSpv(spvVersion);
+ RecordProcesses(intermediate, messages, sourceEntryPointName);
+ if (spvVersion.vulkan > 0)
+ intermediate.setOriginUpperLeft();
+ if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl)
+ intermediate.setHlslOffsets();
+ if (messages & EShMsgDebugInfo) {
+ intermediate.setSourceFile(names[numPre]);
+ for (int s = 0; s < numStrings; ++s) {
+ // The string may not be null-terminated, so make sure we provide
+ // the length along with the string.
+ intermediate.addSourceText(strings[numPre + s], lengths[numPre + s]);
+ }
+ }
+ SetupBuiltinSymbolTable(version, profile, spvVersion, source);
+
+ TSymbolTable* cachedTable = SharedSymbolTables[MapVersionToIndex(version)]
+ [MapSpvVersionToIndex(spvVersion)]
+ [MapProfileToIndex(profile)]
+ [MapSourceToIndex(source)]
+ [stage];
+
+ // Dynamically allocate the symbol table so we can control when it is deallocated WRT the pool.
+ std::unique_ptr<TSymbolTable> symbolTable(new TSymbolTable);
+ if (cachedTable)
+ symbolTable->adoptLevels(*cachedTable);
+
+ // Add built-in symbols that are potentially context dependent;
+ // they get popped again further down.
+ if (! AddContextSpecificSymbols(resources, compiler->infoSink, *symbolTable, version, profile, spvVersion,
+ stage, source)) {
+ return false;
+ }
+
+ //
+ // Now we can process the full shader under proper symbols and rules.
+ //
+
+ std::unique_ptr<TParseContextBase> parseContext(CreateParseContext(*symbolTable, intermediate, version, profile, source,
+ stage, compiler->infoSink,
+ spvVersion, forwardCompatible, messages, false, sourceEntryPointName));
+ TPpContext ppContext(*parseContext, names[numPre] ? names[numPre] : "", includer);
+
+ // only GLSL (bison triggered, really) needs an externally set scan context
+ glslang::TScanContext scanContext(*parseContext);
+ if (source == EShSourceGlsl)
+ parseContext->setScanContext(&scanContext);
+
+ parseContext->setPpContext(&ppContext);
+ parseContext->setLimits(*resources);
+ if (! goodVersion)
+ parseContext->addError();
+ if (warnVersionNotFirst) {
+ TSourceLoc loc;
+ loc.init();
+ parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", "");
+ }
+
+ parseContext->initializeExtensionBehavior();
+
+ // Fill in the strings as outlined above.
+ std::string preamble;
+ parseContext->getPreamble(preamble);
+ strings[0] = preamble.c_str();
+ lengths[0] = strlen(strings[0]);
+ names[0] = nullptr;
+ strings[1] = customPreamble;
+ lengths[1] = strlen(strings[1]);
+ names[1] = nullptr;
+ assert(2 == numPre);
+ if (requireNonempty) {
+ const int postIndex = numStrings + numPre;
+ strings[postIndex] = "\n int;";
+ lengths[postIndex] = strlen(strings[numStrings + numPre]);
+ names[postIndex] = nullptr;
+ }
+ TInputScanner fullInput(numStrings + numPre + numPost, strings.get(), lengths.get(), names.get(), numPre, numPost);
+
+ // Push a new symbol allocation scope that will get used for the shader's globals.
+ symbolTable->push();
+
+ bool success = processingContext(*parseContext, ppContext, fullInput,
+ versionWillBeError, *symbolTable,
+ intermediate, optLevel, messages);
+ return success;
+}
+
+// Responsible for keeping track of the most recent source string and line in
+// the preprocessor and outputting newlines appropriately if the source string
+// or line changes.
+class SourceLineSynchronizer {
+public:
+ SourceLineSynchronizer(const std::function<int()>& lastSourceIndex,
+ std::string* output)
+ : getLastSourceIndex(lastSourceIndex), output(output), lastSource(-1), lastLine(0) {}
+// SourceLineSynchronizer(const SourceLineSynchronizer&) = delete;
+// SourceLineSynchronizer& operator=(const SourceLineSynchronizer&) = delete;
+
+ // Sets the internally tracked source string index to that of the most
+ // recently read token. If we switched to a new source string, returns
+ // true and inserts a newline. Otherwise, returns false and outputs nothing.
+ bool syncToMostRecentString() {
+ if (getLastSourceIndex() != lastSource) {
+ // After switching to a new source string, we need to reset lastLine
+ // because line number resets every time a new source string is
+ // used. We also need to output a newline to separate the output
+ // from the previous source string (if there is one).
+ if (lastSource != -1 || lastLine != 0)
+ *output += '\n';
+ lastSource = getLastSourceIndex();
+ lastLine = -1;
+ return true;
+ }
+ return false;
+ }
+
+ // Calls syncToMostRecentString() and then sets the internally tracked line
+ // number to tokenLine. If we switched to a new line, returns true and inserts
+ // newlines appropriately. Otherwise, returns false and outputs nothing.
+ bool syncToLine(int tokenLine) {
+ syncToMostRecentString();
+ const bool newLineStarted = lastLine < tokenLine;
+ for (; lastLine < tokenLine; ++lastLine) {
+ if (lastLine > 0) *output += '\n';
+ }
+ return newLineStarted;
+ }
+
+ // Sets the internally tracked line number to newLineNum.
+ void setLineNum(int newLineNum) { lastLine = newLineNum; }
+
+private:
+ SourceLineSynchronizer& operator=(const SourceLineSynchronizer&);
+
+ // A function for getting the index of the last valid source string we've
+ // read tokens from.
+ const std::function<int()> getLastSourceIndex;
+ // output string for newlines.
+ std::string* output;
+ // lastSource is the source string index (starting from 0) of the last token
+ // processed. It is tracked in order for newlines to be inserted when a new
+ // source string starts. -1 means we haven't started processing any source
+ // string.
+ int lastSource;
+ // lastLine is the line number (starting from 1) of the last token processed.
+ // It is tracked in order for newlines to be inserted when a token appears
+ // on a new line. 0 means we haven't started processing any line in the
+ // current source string.
+ int lastLine;
+};
+
+// DoPreprocessing is a valid ProcessingContext template argument,
+// which only performs the preprocessing step of compilation.
+// It places the result in the "string" argument to its constructor.
+//
+// This is not an officially supported or fully working path.
+struct DoPreprocessing {
+ explicit DoPreprocessing(std::string* string): outputString(string) {}
+ bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
+ TInputScanner& input, bool versionWillBeError,
+ TSymbolTable&, TIntermediate&,
+ EShOptimizationLevel, EShMessages)
+ {
+ // This is a list of tokens that do not require a space before or after.
+ static const std::string unNeededSpaceTokens = ";()[]";
+ static const std::string noSpaceBeforeTokens = ",";
+ glslang::TPpToken ppToken;
+
+ parseContext.setScanner(&input);
+ ppContext.setInput(input, versionWillBeError);
+
+ std::string outputBuffer;
+ SourceLineSynchronizer lineSync(
+ std::bind(&TInputScanner::getLastValidSourceIndex, &input), &outputBuffer);
+
+ parseContext.setExtensionCallback([&lineSync, &outputBuffer](
+ int line, const char* extension, const char* behavior) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#extension ";
+ outputBuffer += extension;
+ outputBuffer += " : ";
+ outputBuffer += behavior;
+ });
+
+ parseContext.setLineCallback([&lineSync, &outputBuffer, &parseContext](
+ int curLineNum, int newLineNum, bool hasSource, int sourceNum, const char* sourceName) {
+ // SourceNum is the number of the source-string that is being parsed.
+ lineSync.syncToLine(curLineNum);
+ outputBuffer += "#line ";
+ outputBuffer += std::to_string(newLineNum);
+ if (hasSource) {
+ outputBuffer += ' ';
+ if (sourceName != nullptr) {
+ outputBuffer += '\"';
+ outputBuffer += sourceName;
+ outputBuffer += '\"';
+ } else {
+ outputBuffer += std::to_string(sourceNum);
+ }
+ }
+ if (parseContext.lineDirectiveShouldSetNextLine()) {
+ // newLineNum is the new line number for the line following the #line
+ // directive. So the new line number for the current line is
+ newLineNum -= 1;
+ }
+ outputBuffer += '\n';
+ // And we are at the next line of the #line directive now.
+ lineSync.setLineNum(newLineNum + 1);
+ });
+
+ parseContext.setVersionCallback(
+ [&lineSync, &outputBuffer](int line, int version, const char* str) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#version ";
+ outputBuffer += std::to_string(version);
+ if (str) {
+ outputBuffer += ' ';
+ outputBuffer += str;
+ }
+ });
+
+ parseContext.setPragmaCallback([&lineSync, &outputBuffer](
+ int line, const glslang::TVector<glslang::TString>& ops) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#pragma ";
+ for(size_t i = 0; i < ops.size(); ++i) {
+ outputBuffer += ops[i].c_str();
+ }
+ });
+
+ parseContext.setErrorCallback([&lineSync, &outputBuffer](
+ int line, const char* errorMessage) {
+ lineSync.syncToLine(line);
+ outputBuffer += "#error ";
+ outputBuffer += errorMessage;
+ });
+
+ int lastToken = EndOfInput; // lastToken records the last token processed.
+ do {
+ int token = ppContext.tokenize(ppToken);
+ if (token == EndOfInput)
+ break;
+
+ bool isNewString = lineSync.syncToMostRecentString();
+ bool isNewLine = lineSync.syncToLine(ppToken.loc.line);
+
+ if (isNewLine) {
+ // Don't emit whitespace onto empty lines.
+ // Copy any whitespace characters at the start of a line
+ // from the input to the output.
+ outputBuffer += std::string(ppToken.loc.column - 1, ' ');
+ }
+
+ // Output a space in between tokens, but not at the start of a line,
+ // and also not around special tokens. This helps with readability
+ // and consistency.
+ if (!isNewString && !isNewLine && lastToken != EndOfInput &&
+ (unNeededSpaceTokens.find((char)token) == std::string::npos) &&
+ (unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
+ (noSpaceBeforeTokens.find((char)token) == std::string::npos)) {
+ outputBuffer += ' ';
+ }
+ lastToken = token;
+ if (token == PpAtomConstString)
+ outputBuffer += "\"";
+ outputBuffer += ppToken.name;
+ if (token == PpAtomConstString)
+ outputBuffer += "\"";
+ } while (true);
+ outputBuffer += '\n';
+ *outputString = std::move(outputBuffer);
+
+ bool success = true;
+ if (parseContext.getNumErrors() > 0) {
+ success = false;
+ parseContext.infoSink.info.prefix(EPrefixError);
+ parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n";
+ }
+ return success;
+ }
+ std::string* outputString;
+};
+
+// DoFullParse is a valid ProcessingConext template argument for fully
+// parsing the shader. It populates the "intermediate" with the AST.
+struct DoFullParse{
+ bool operator()(TParseContextBase& parseContext, TPpContext& ppContext,
+ TInputScanner& fullInput, bool versionWillBeError,
+ TSymbolTable&, TIntermediate& intermediate,
+ EShOptimizationLevel optLevel, EShMessages messages)
+ {
+ bool success = true;
+ // Parse the full shader.
+ if (! parseContext.parseShaderStrings(ppContext, fullInput, versionWillBeError))
+ success = false;
+
+ if (success && intermediate.getTreeRoot()) {
+ if (optLevel == EShOptNoGeneration)
+ parseContext.infoSink.info.message(EPrefixNone, "No errors. No code generation or linking was requested.");
+ else
+ success = intermediate.postProcess(intermediate.getTreeRoot(), parseContext.getLanguage());
+ } else if (! success) {
+ parseContext.infoSink.info.prefix(EPrefixError);
+ parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n";
+ }
+
+ if (messages & EShMsgAST)
+ intermediate.output(parseContext.infoSink, true);
+
+ return success;
+ }
+};
+
+// Take a single compilation unit, and run the preprocessor on it.
+// Return: True if there were no issues found in preprocessing,
+// False if during preprocessing any unknown version, pragmas or
+// extensions were found.
+//
+// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+// is not an officially supported or fully working path.
+bool PreprocessDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* preamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TShader::Includer& includer,
+ TIntermediate& intermediate, // returned tree, etc.
+ std::string* outputString)
+{
+ DoPreprocessing parser(outputString);
+ return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames,
+ preamble, optLevel, resources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, intermediate, parser,
+ false, includer);
+}
+
+//
+// do a partial compile on the given strings for a single compilation unit
+// for a potential deferred link into a single stage (and deferred full compile of that
+// stage through machine-dependent compilation).
+//
+// all preprocessing, parsing, semantic checks, etc. for a single compilation unit
+// are done here.
+//
+// return: the tree and other information is filled into the intermediate argument,
+// and true is returned by the function for success.
+//
+bool CompileDeferred(
+ TCompiler* compiler,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const char* const stringNames[],
+ const char* preamble,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages, // warnings/errors/AST; things to print out
+ TIntermediate& intermediate,// returned tree, etc.
+ TShader::Includer& includer,
+ const std::string sourceEntryPointName = "",
+ TEnvironment* environment = nullptr)
+{
+ DoFullParse parser;
+ return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames,
+ preamble, optLevel, resources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, intermediate, parser,
+ true, includer, sourceEntryPointName, environment);
+}
+
+} // end anonymous namespace for local functions
+
+//
+// ShInitialize() should be called exactly once per process, not per thread.
+//
+int ShInitialize()
+{
+ glslang::InitGlobalLock();
+
+ if (! InitProcess())
+ return 0;
+
+ glslang::GetGlobalLock();
+ ++NumberOfClients;
+ glslang::ReleaseGlobalLock();
+
+ if (PerProcessGPA == nullptr)
+ PerProcessGPA = new TPoolAllocator();
+
+ glslang::TScanContext::fillInKeywordMap();
+#ifdef ENABLE_HLSL
+ glslang::HlslScanContext::fillInKeywordMap();
+#endif
+
+ return 1;
+}
+
+//
+// Driver calls these to create and destroy compiler/linker
+// objects.
+//
+
+ShHandle ShConstructCompiler(const EShLanguage language, int debugOptions)
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructCompiler(language, debugOptions));
+
+ return reinterpret_cast<void*>(base);
+}
+
+ShHandle ShConstructLinker(const EShExecutable executable, int debugOptions)
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructLinker(executable, debugOptions));
+
+ return reinterpret_cast<void*>(base);
+}
+
+ShHandle ShConstructUniformMap()
+{
+ if (!InitThread())
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(ConstructUniformMap());
+
+ return reinterpret_cast<void*>(base);
+}
+
+void ShDestruct(ShHandle handle)
+{
+ if (handle == 0)
+ return;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(handle);
+
+ if (base->getAsCompiler())
+ DeleteCompiler(base->getAsCompiler());
+ else if (base->getAsLinker())
+ DeleteLinker(base->getAsLinker());
+ else if (base->getAsUniformMap())
+ DeleteUniformMap(base->getAsUniformMap());
+}
+
+//
+// Cleanup symbol tables
+//
+int ShFinalize()
+{
+ glslang::GetGlobalLock();
+ --NumberOfClients;
+ assert(NumberOfClients >= 0);
+ bool finalize = NumberOfClients == 0;
+ glslang::ReleaseGlobalLock();
+ if (! finalize)
+ return 1;
+
+ for (int version = 0; version < VersionCount; ++version) {
+ for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) {
+ for (int p = 0; p < ProfileCount; ++p) {
+ for (int source = 0; source < SourceCount; ++source) {
+ for (int stage = 0; stage < EShLangCount; ++stage) {
+ delete SharedSymbolTables[version][spvVersion][p][source][stage];
+ SharedSymbolTables[version][spvVersion][p][source][stage] = 0;
+ }
+ }
+ }
+ }
+ }
+
+ for (int version = 0; version < VersionCount; ++version) {
+ for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) {
+ for (int p = 0; p < ProfileCount; ++p) {
+ for (int source = 0; source < SourceCount; ++source) {
+ for (int pc = 0; pc < EPcCount; ++pc) {
+ delete CommonSymbolTable[version][spvVersion][p][source][pc];
+ CommonSymbolTable[version][spvVersion][p][source][pc] = 0;
+ }
+ }
+ }
+ }
+ }
+
+ if (PerProcessGPA != nullptr) {
+ delete PerProcessGPA;
+ PerProcessGPA = nullptr;
+ }
+
+ glslang::TScanContext::deleteKeywordMap();
+#ifdef ENABLE_HLSL
+ glslang::HlslScanContext::deleteKeywordMap();
+#endif
+
+ return 1;
+}
+
+//
+// Do a full compile on the given strings for a single compilation unit
+// forming a complete stage. The result of the machine dependent compilation
+// is left in the provided compile object.
+//
+// Return: The return value is really boolean, indicating
+// success (1) or failure (0).
+//
+int ShCompile(
+ const ShHandle handle,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* inputLengths,
+ const EShOptimizationLevel optLevel,
+ const TBuiltInResource* resources,
+ int /*debugOptions*/,
+ int defaultVersion, // use 100 for ES environment, 110 for desktop
+ bool forwardCompatible, // give errors for use of deprecated features
+ EShMessages messages // warnings/errors/AST; things to print out
+ )
+{
+ // Map the generic handle to the C++ object
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TCompiler* compiler = base->getAsCompiler();
+ if (compiler == 0)
+ return 0;
+
+ SetThreadPoolAllocator(compiler->getPool());
+
+ compiler->infoSink.info.erase();
+ compiler->infoSink.debug.erase();
+
+ TIntermediate intermediate(compiler->getLanguage());
+ TShader::ForbidIncluder includer;
+ bool success = CompileDeferred(compiler, shaderStrings, numStrings, inputLengths, nullptr,
+ "", optLevel, resources, defaultVersion, ENoProfile, false,
+ forwardCompatible, messages, intermediate, includer);
+
+ //
+ // Call the machine dependent compiler
+ //
+ if (success && intermediate.getTreeRoot() && optLevel != EShOptNoGeneration)
+ success = compiler->compile(intermediate.getTreeRoot(), intermediate.getVersion(), intermediate.getProfile());
+
+ intermediate.removeTree();
+
+ // Throw away all the temporary memory used by the compilation process.
+ // The push was done in the CompileDeferred() call above.
+ GetThreadPoolAllocator().pop();
+
+ return success ? 1 : 0;
+}
+
+//
+// Link the given compile objects.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShLinkExt(
+ const ShHandle linkHandle,
+ const ShHandle compHandles[],
+ const int numHandles)
+{
+ if (linkHandle == 0 || numHandles == 0)
+ return 0;
+
+ THandleList cObjects;
+
+ for (int i = 0; i < numHandles; ++i) {
+ if (compHandles[i] == 0)
+ return 0;
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(compHandles[i]);
+ if (base->getAsLinker()) {
+ cObjects.push_back(base->getAsLinker());
+ }
+ if (base->getAsCompiler())
+ cObjects.push_back(base->getAsCompiler());
+
+ if (cObjects[i] == 0)
+ return 0;
+ }
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(linkHandle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ SetThreadPoolAllocator(linker->getPool());
+
+ if (linker == 0)
+ return 0;
+
+ linker->infoSink.info.erase();
+
+ for (int i = 0; i < numHandles; ++i) {
+ if (cObjects[i]->getAsCompiler()) {
+ if (! cObjects[i]->getAsCompiler()->linkable()) {
+ linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code.");
+ return 0;
+ }
+ }
+ }
+
+ bool ret = linker->link(cObjects);
+
+ return ret ? 1 : 0;
+}
+
+//
+// ShSetEncrpytionMethod is a place-holder for specifying
+// how source code is encrypted.
+//
+void ShSetEncryptionMethod(ShHandle handle)
+{
+ if (handle == 0)
+ return;
+}
+
+//
+// Return any compiler/linker/uniformmap log of messages for the application.
+//
+const char* ShGetInfoLog(const ShHandle handle)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = static_cast<TShHandleBase*>(handle);
+ TInfoSink* infoSink;
+
+ if (base->getAsCompiler())
+ infoSink = &(base->getAsCompiler()->getInfoSink());
+ else if (base->getAsLinker())
+ infoSink = &(base->getAsLinker()->getInfoSink());
+ else
+ return 0;
+
+ infoSink->info << infoSink->debug.c_str();
+ return infoSink->info.c_str();
+}
+
+//
+// Return the resulting binary code from the link process. Structure
+// is machine dependent.
+//
+const void* ShGetExecutable(const ShHandle handle)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+ if (linker == 0)
+ return 0;
+
+ return linker->getObjectCode();
+}
+
+//
+// Let the linker know where the application said it's attributes are bound.
+// The linker does not use these values, they are remapped by the ICD or
+// hardware. It just needs them to know what's aliased.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShSetVirtualAttributeBindings(const ShHandle handle, const ShBindingTable* table)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ if (linker == 0)
+ return 0;
+
+ linker->setAppAttributeBindings(table);
+
+ return 1;
+}
+
+//
+// Let the linker know where the predefined attributes have to live.
+//
+int ShSetFixedAttributeBindings(const ShHandle handle, const ShBindingTable* table)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+
+ if (linker == 0)
+ return 0;
+
+ linker->setFixedAttributeBindings(table);
+ return 1;
+}
+
+//
+// Some attribute locations are off-limits to the linker...
+//
+int ShExcludeAttributes(const ShHandle handle, int *attributes, int count)
+{
+ if (handle == 0)
+ return 0;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TLinker* linker = static_cast<TLinker*>(base->getAsLinker());
+ if (linker == 0)
+ return 0;
+
+ linker->setExcludedAttributes(attributes, count);
+
+ return 1;
+}
+
+//
+// Return the index for OpenGL to use for knowing where a uniform lives.
+//
+// Return: The return value of is really boolean, indicating
+// success or failure.
+//
+int ShGetUniformLocation(const ShHandle handle, const char* name)
+{
+ if (handle == 0)
+ return -1;
+
+ TShHandleBase* base = reinterpret_cast<TShHandleBase*>(handle);
+ TUniformMap* uniformMap= base->getAsUniformMap();
+ if (uniformMap == 0)
+ return -1;
+
+ return uniformMap->getLocation(name);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Deferred-Lowering C++ Interface
+// -----------------------------------
+//
+// Below is a new alternate C++ interface that might potentially replace the above
+// opaque handle-based interface.
+//
+// See more detailed comment in ShaderLang.h
+//
+
+namespace glslang {
+
+#include "../Include/revision.h"
+
+#define QUOTE(s) #s
+#define STR(n) QUOTE(n)
+
+const char* GetEsslVersionString()
+{
+ return "OpenGL ES GLSL 3.20 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL);
+}
+
+const char* GetGlslVersionString()
+{
+ return "4.60 glslang Khronos. " STR(GLSLANG_MINOR_VERSION) "." STR(GLSLANG_PATCH_LEVEL);
+}
+
+int GetKhronosToolId()
+{
+ return 8;
+}
+
+bool InitializeProcess()
+{
+ return ShInitialize() != 0;
+}
+
+void FinalizeProcess()
+{
+ ShFinalize();
+}
+
+class TDeferredCompiler : public TCompiler {
+public:
+ TDeferredCompiler(EShLanguage s, TInfoSink& i) : TCompiler(s, i) { }
+ virtual bool compile(TIntermNode*, int = 0, EProfile = ENoProfile) { return true; }
+};
+
+TShader::TShader(EShLanguage s)
+ : stage(s), lengths(nullptr), stringNames(nullptr), preamble("")
+{
+ pool = new TPoolAllocator;
+ infoSink = new TInfoSink;
+ compiler = new TDeferredCompiler(stage, *infoSink);
+ intermediate = new TIntermediate(s);
+
+ // clear environment (avoid constructors in them for use in a C interface)
+ environment.input.languageFamily = EShSourceNone;
+ environment.input.dialect = EShClientNone;
+ environment.client.client = EShClientNone;
+ environment.target.language = EShTargetNone;
+ environment.target.hlslFunctionality1 = false;
+}
+
+TShader::~TShader()
+{
+ delete infoSink;
+ delete compiler;
+ delete intermediate;
+ delete pool;
+}
+
+void TShader::setStrings(const char* const* s, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = nullptr;
+}
+
+void TShader::setStringsWithLengths(const char* const* s, const int* l, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = l;
+}
+
+void TShader::setStringsWithLengthsAndNames(
+ const char* const* s, const int* l, const char* const* names, int n)
+{
+ strings = s;
+ numStrings = n;
+ lengths = l;
+ stringNames = names;
+}
+
+void TShader::setEntryPoint(const char* entryPoint)
+{
+ intermediate->setEntryPointName(entryPoint);
+}
+
+void TShader::setSourceEntryPoint(const char* name)
+{
+ sourceEntryPointName = name;
+}
+
+void TShader::addProcesses(const std::vector<std::string>& p)
+{
+ intermediate->addProcesses(p);
+}
+
+// Set binding base for given resource type
+void TShader::setShiftBinding(TResourceType res, unsigned int base) {
+ intermediate->setShiftBinding(res, base);
+}
+
+// Set binding base for given resource type for a given binding set.
+void TShader::setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set) {
+ intermediate->setShiftBindingForSet(res, base, set);
+}
+
+// Set binding base for sampler types
+void TShader::setShiftSamplerBinding(unsigned int base) { setShiftBinding(EResSampler, base); }
+// Set binding base for texture types (SRV)
+void TShader::setShiftTextureBinding(unsigned int base) { setShiftBinding(EResTexture, base); }
+// Set binding base for image types
+void TShader::setShiftImageBinding(unsigned int base) { setShiftBinding(EResImage, base); }
+// Set binding base for uniform buffer objects (CBV)
+void TShader::setShiftUboBinding(unsigned int base) { setShiftBinding(EResUbo, base); }
+// Synonym for setShiftUboBinding, to match HLSL language.
+void TShader::setShiftCbufferBinding(unsigned int base) { setShiftBinding(EResUbo, base); }
+// Set binding base for UAV (unordered access view)
+void TShader::setShiftUavBinding(unsigned int base) { setShiftBinding(EResUav, base); }
+// Set binding base for SSBOs
+void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSsbo, base); }
+// Enables binding automapping using TIoMapper
+void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); }
+// Enables position.Y output negation in vertex shader
+void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
+// Fragile: currently within one stage: simple auto-assignment of location
+void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); }
+void TShader::addUniformLocationOverride(const char* name, int loc)
+{
+ intermediate->addUniformLocationOverride(name, loc);
+}
+void TShader::setUniformLocationBase(int base)
+{
+ intermediate->setUniformLocationBase(base);
+}
+// See comment above TDefaultHlslIoMapper in iomapper.cpp:
+void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
+void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
+void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); }
+void TShader::setResourceSetBinding(const std::vector<std::string>& base) { intermediate->setResourceSetBinding(base); }
+void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); }
+
+//
+// Turn the shader strings into a parse tree in the TIntermediate.
+//
+// Returns true for success.
+//
+bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages messages, Includer& includer)
+{
+ if (! InitThread())
+ return false;
+ SetThreadPoolAllocator(pool);
+
+ if (! preamble)
+ preamble = "";
+
+ return CompileDeferred(compiler, strings, numStrings, lengths, stringNames,
+ preamble, EShOptNone, builtInResources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, messages, *intermediate, includer, sourceEntryPointName,
+ &environment);
+}
+
+// Fill in a string with the result of preprocessing ShaderStrings
+// Returns true if all extensions, pragmas and version strings were valid.
+//
+// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+// is not an officially supported or fully working path.
+bool TShader::preprocess(const TBuiltInResource* builtInResources,
+ int defaultVersion, EProfile defaultProfile,
+ bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages message,
+ std::string* output_string,
+ Includer& includer)
+{
+ if (! InitThread())
+ return false;
+ SetThreadPoolAllocator(pool);
+
+ if (! preamble)
+ preamble = "";
+
+ return PreprocessDeferred(compiler, strings, numStrings, lengths, stringNames, preamble,
+ EShOptNone, builtInResources, defaultVersion,
+ defaultProfile, forceDefaultVersionAndProfile,
+ forwardCompatible, message, includer, *intermediate, output_string);
+}
+
+const char* TShader::getInfoLog()
+{
+ return infoSink->info.c_str();
+}
+
+const char* TShader::getInfoDebugLog()
+{
+ return infoSink->debug.c_str();
+}
+
+TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
+{
+ pool = new TPoolAllocator;
+ infoSink = new TInfoSink;
+ for (int s = 0; s < EShLangCount; ++s) {
+ intermediate[s] = 0;
+ newedIntermediate[s] = false;
+ }
+}
+
+TProgram::~TProgram()
+{
+ delete ioMapper;
+ delete infoSink;
+ delete reflection;
+
+ for (int s = 0; s < EShLangCount; ++s)
+ if (newedIntermediate[s])
+ delete intermediate[s];
+
+ delete pool;
+}
+
+//
+// Merge the compilation units within each stage into a single TIntermediate.
+// All starting compilation units need to be the result of calling TShader::parse().
+//
+// Return true for success.
+//
+bool TProgram::link(EShMessages messages)
+{
+ if (linked)
+ return false;
+ linked = true;
+
+ bool error = false;
+
+ SetThreadPoolAllocator(pool);
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (! linkStage((EShLanguage)s, messages))
+ error = true;
+ }
+
+ // TODO: Link: cross-stage error checking
+
+ return ! error;
+}
+
+//
+// Merge the compilation units within the given stage into a single TIntermediate.
+//
+// Return true for success.
+//
+bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
+{
+ if (stages[stage].size() == 0)
+ return true;
+
+ int numEsShaders = 0, numNonEsShaders = 0;
+ for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) {
+ if ((*it)->intermediate->getProfile() == EEsProfile) {
+ numEsShaders++;
+ } else {
+ numNonEsShaders++;
+ }
+ }
+
+ if (numEsShaders > 0 && numNonEsShaders > 0) {
+ infoSink->info.message(EPrefixError, "Cannot mix ES profile with non-ES profile shaders");
+ return false;
+ } else if (numEsShaders > 1) {
+ infoSink->info.message(EPrefixError, "Cannot attach multiple ES shaders of the same type to a single program");
+ return false;
+ }
+
+ //
+ // Be efficient for the common single compilation unit per stage case,
+ // reusing it's TIntermediate instead of merging into a new one.
+ //
+ TIntermediate *firstIntermediate = stages[stage].front()->intermediate;
+ if (stages[stage].size() == 1)
+ intermediate[stage] = firstIntermediate;
+ else {
+ intermediate[stage] = new TIntermediate(stage,
+ firstIntermediate->getVersion(),
+ firstIntermediate->getProfile());
+
+
+ // The new TIntermediate must use the same origin as the original TIntermediates.
+ // Otherwise linking will fail due to different coordinate systems.
+ if (firstIntermediate->getOriginUpperLeft()) {
+ intermediate[stage]->setOriginUpperLeft();
+ }
+ intermediate[stage]->setSpv(firstIntermediate->getSpv());
+
+ newedIntermediate[stage] = true;
+ }
+
+ if (messages & EShMsgAST)
+ infoSink->info << "\nLinked " << StageName(stage) << " stage:\n\n";
+
+ if (stages[stage].size() > 1) {
+ std::list<TShader*>::const_iterator it;
+ for (it = stages[stage].begin(); it != stages[stage].end(); ++it)
+ intermediate[stage]->merge(*infoSink, *(*it)->intermediate);
+ }
+
+ intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0);
+
+ if (messages & EShMsgAST)
+ intermediate[stage]->output(*infoSink, true);
+
+ return intermediate[stage]->getNumErrors() == 0;
+}
+
+const char* TProgram::getInfoLog()
+{
+ return infoSink->info.c_str();
+}
+
+const char* TProgram::getInfoDebugLog()
+{
+ return infoSink->debug.c_str();
+}
+
+//
+// Reflection implementation.
+//
+
+bool TProgram::buildReflection(int opts)
+{
+ if (! linked || reflection)
+ return false;
+
+ int firstStage = EShLangVertex, lastStage = EShLangFragment;
+
+ if (opts & EShReflectionIntermediateIO) {
+ // if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the
+ // boundaries for which stages generate pipeline inputs/outputs
+ firstStage = EShLangCount;
+ lastStage = 0;
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ firstStage = std::min(firstStage, s);
+ lastStage = std::max(lastStage, s);
+ }
+ }
+ }
+
+ reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage);
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ if (! reflection->addStage((EShLanguage)s, *intermediate[s]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
+int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); }
+
+int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); }
+const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); }
+int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); }
+const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); }
+int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); }
+const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); }
+int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); }
+const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); }
+int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); }
+const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); }
+int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); }
+const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); }
+int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); }
+const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); }
+
+void TProgram::dumpReflection() { reflection->dump(); }
+
+//
+// I/O mapping implementation.
+//
+bool TProgram::mapIO(TIoMapResolver* resolver)
+{
+ if (! linked || ioMapper)
+ return false;
+
+ ioMapper = new TIoMapper;
+
+ for (int s = 0; s < EShLangCount; ++s) {
+ if (intermediate[s]) {
+ if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, resolver))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp
new file mode 100644
index 0000000..d8d6846
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.cpp
@@ -0,0 +1,396 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Symbol table for parsing. Most functionality and main ideas
+// are documented in the header file.
+//
+
+#include "SymbolTable.h"
+
+namespace glslang {
+
+//
+// TType helper function needs a place to live.
+//
+
+//
+// Recursively generate mangled names.
+//
+void TType::buildMangledName(TString& mangledName) const
+{
+ if (isMatrix())
+ mangledName += 'm';
+ else if (isVector())
+ mangledName += 'v';
+
+ switch (basicType) {
+ case EbtFloat: mangledName += 'f'; break;
+ case EbtDouble: mangledName += 'd'; break;
+ case EbtFloat16: mangledName += "f16"; break;
+ case EbtInt: mangledName += 'i'; break;
+ case EbtUint: mangledName += 'u'; break;
+ case EbtInt8: mangledName += "i8"; break;
+ case EbtUint8: mangledName += "u8"; break;
+ case EbtInt16: mangledName += "i16"; break;
+ case EbtUint16: mangledName += "u16"; break;
+ case EbtInt64: mangledName += "i64"; break;
+ case EbtUint64: mangledName += "u64"; break;
+ case EbtBool: mangledName += 'b'; break;
+ case EbtAtomicUint: mangledName += "au"; break;
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV: mangledName += "asnv"; break;
+#endif
+ case EbtSampler:
+ switch (sampler.type) {
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: mangledName += "f16"; break;
+#endif
+ case EbtInt: mangledName += "i"; break;
+ case EbtUint: mangledName += "u"; break;
+ default: break; // some compilers want this
+ }
+ if (sampler.image)
+ mangledName += "I"; // a normal image
+ else if (sampler.sampler)
+ mangledName += "p"; // a "pure" sampler
+ else if (!sampler.combined)
+ mangledName += "t"; // a "pure" texture
+ else
+ mangledName += "s"; // traditional combined sampler
+ if (sampler.arrayed)
+ mangledName += "A";
+ if (sampler.shadow)
+ mangledName += "S";
+ if (sampler.external)
+ mangledName += "E";
+ if (sampler.yuv)
+ mangledName += "Y";
+ switch (sampler.dim) {
+ case Esd1D: mangledName += "1"; break;
+ case Esd2D: mangledName += "2"; break;
+ case Esd3D: mangledName += "3"; break;
+ case EsdCube: mangledName += "C"; break;
+ case EsdRect: mangledName += "R2"; break;
+ case EsdBuffer: mangledName += "B"; break;
+ case EsdSubpass: mangledName += "P"; break;
+ default: break; // some compilers want this
+ }
+
+ if (sampler.hasReturnStruct()) {
+ // Name mangle for sampler return struct uses struct table index.
+ mangledName += "-tx-struct";
+
+ char text[16]; // plenty enough space for the small integers.
+ snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
+ mangledName += text;
+ } else {
+ switch (sampler.getVectorSize()) {
+ case 1: mangledName += "1"; break;
+ case 2: mangledName += "2"; break;
+ case 3: mangledName += "3"; break;
+ case 4: break; // default to prior name mangle behavior
+ }
+ }
+
+ if (sampler.ms)
+ mangledName += "M";
+ break;
+ case EbtStruct:
+ case EbtBlock:
+ if (basicType == EbtStruct)
+ mangledName += "struct-";
+ else
+ mangledName += "block-";
+ if (typeName)
+ mangledName += *typeName;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ mangledName += '-';
+ (*structure)[i].type->buildMangledName(mangledName);
+ }
+ default:
+ break;
+ }
+
+ if (getVectorSize() > 0)
+ mangledName += static_cast<char>('0' + getVectorSize());
+ else {
+ mangledName += static_cast<char>('0' + getMatrixCols());
+ mangledName += static_cast<char>('0' + getMatrixRows());
+ }
+
+ if (arraySizes) {
+ const int maxSize = 11;
+ char buf[maxSize];
+ for (int i = 0; i < arraySizes->getNumDims(); ++i) {
+ if (arraySizes->getDimNode(i)) {
+ if (arraySizes->getDimNode(i)->getAsSymbolNode())
+ snprintf(buf, maxSize, "s%d", arraySizes->getDimNode(i)->getAsSymbolNode()->getId());
+ else
+ snprintf(buf, maxSize, "s%p", arraySizes->getDimNode(i));
+ } else
+ snprintf(buf, maxSize, "%d", arraySizes->getDimSize(i));
+ mangledName += '[';
+ mangledName += buf;
+ mangledName += ']';
+ }
+ }
+}
+
+//
+// Dump functions.
+//
+
+void TVariable::dump(TInfoSink& infoSink) const
+{
+ infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " << type.getBasicTypeString();
+ if (type.isArray()) {
+ infoSink.debug << "[0]";
+ }
+ infoSink.debug << "\n";
+}
+
+void TFunction::dump(TInfoSink& infoSink) const
+{
+ infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " << getMangledName().c_str() << "\n";
+}
+
+void TAnonMember::dump(TInfoSink& TInfoSink) const
+{
+ TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() << "\n";
+}
+
+void TSymbolTableLevel::dump(TInfoSink &infoSink) const
+{
+ tLevel::const_iterator it;
+ for (it = level.begin(); it != level.end(); ++it)
+ (*it).second->dump(infoSink);
+}
+
+void TSymbolTable::dump(TInfoSink &infoSink) const
+{
+ for (int level = currentLevel(); level >= 0; --level) {
+ infoSink.debug << "LEVEL " << level << "\n";
+ table[level]->dump(infoSink);
+ }
+}
+
+//
+// Functions have buried pointers to delete.
+//
+TFunction::~TFunction()
+{
+ for (TParamList::iterator i = parameters.begin(); i != parameters.end(); ++i)
+ delete (*i).type;
+}
+
+//
+// Symbol table levels are a map of pointers to symbols that have to be deleted.
+//
+TSymbolTableLevel::~TSymbolTableLevel()
+{
+ for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
+ delete (*it).second;
+
+ delete [] defaultPrecision;
+}
+
+//
+// Change all function entries in the table with the non-mangled name
+// to be related to the provided built-in operation.
+//
+void TSymbolTableLevel::relateToOperator(const char* name, TOperator op)
+{
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ while (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
+ TFunction* function = (*candidate).second->getAsFunction();
+ function->relateToOperator(op);
+ } else
+ break;
+ ++candidate;
+ }
+}
+
+// Make all function overloads of the given name require an extension(s).
+// Should only be used for a version/profile that actually needs the extension(s).
+void TSymbolTableLevel::setFunctionExtensions(const char* name, int num, const char* const extensions[])
+{
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ while (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
+ TSymbol* symbol = candidate->second;
+ symbol->setExtensions(num, extensions);
+ } else
+ break;
+ ++candidate;
+ }
+}
+
+//
+// Make all symbols in this table level read only.
+//
+void TSymbolTableLevel::readOnly()
+{
+ for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
+ (*it).second->makeReadOnly();
+}
+
+//
+// Copy a symbol, but the copy is writable; call readOnly() afterward if that's not desired.
+//
+TSymbol::TSymbol(const TSymbol& copyOf)
+{
+ name = NewPoolTString(copyOf.name->c_str());
+ uniqueId = copyOf.uniqueId;
+ writable = true;
+}
+
+TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf)
+{
+ type.deepCopy(copyOf.type);
+ userType = copyOf.userType;
+
+ // we don't support specialization-constant subtrees in cloned tables, only extensions
+ constSubtree = nullptr;
+ extensions = nullptr;
+ memberExtensions = nullptr;
+ if (copyOf.getNumExtensions() > 0)
+ setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
+ if (copyOf.hasMemberExtensions()) {
+ for (int m = 0; m < (int)copyOf.type.getStruct()->size(); ++m) {
+ if (copyOf.getNumMemberExtensions(m) > 0)
+ setMemberExtensions(m, copyOf.getNumMemberExtensions(m), copyOf.getMemberExtensions(m));
+ }
+ }
+
+ if (! copyOf.constArray.empty()) {
+ assert(! copyOf.type.isStruct());
+ TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size());
+ constArray = newArray;
+ }
+}
+
+TVariable* TVariable::clone() const
+{
+ TVariable *variable = new TVariable(*this);
+
+ return variable;
+}
+
+TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf)
+{
+ for (unsigned int i = 0; i < copyOf.parameters.size(); ++i) {
+ TParameter param;
+ parameters.push_back(param);
+ parameters.back().copyParam(copyOf.parameters[i]);
+ }
+
+ extensions = nullptr;
+ if (copyOf.getNumExtensions() > 0)
+ setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions());
+ returnType.deepCopy(copyOf.returnType);
+ mangledName = copyOf.mangledName;
+ op = copyOf.op;
+ defined = copyOf.defined;
+ prototyped = copyOf.prototyped;
+ implicitThis = copyOf.implicitThis;
+ illegalImplicitThis = copyOf.illegalImplicitThis;
+ defaultParamCount = copyOf.defaultParamCount;
+}
+
+TFunction* TFunction::clone() const
+{
+ TFunction *function = new TFunction(*this);
+
+ return function;
+}
+
+TAnonMember* TAnonMember::clone() const
+{
+ // Anonymous members of a given block should be cloned at a higher level,
+ // where they can all be assured to still end up pointing to a single
+ // copy of the original container.
+ assert(0);
+
+ return 0;
+}
+
+TSymbolTableLevel* TSymbolTableLevel::clone() const
+{
+ TSymbolTableLevel *symTableLevel = new TSymbolTableLevel();
+ symTableLevel->anonId = anonId;
+ symTableLevel->thisLevel = thisLevel;
+ std::vector<bool> containerCopied(anonId, false);
+ tLevel::const_iterator iter;
+ for (iter = level.begin(); iter != level.end(); ++iter) {
+ const TAnonMember* anon = iter->second->getAsAnonMember();
+ if (anon) {
+ // Insert all the anonymous members of this same container at once,
+ // avoid inserting the remaining members in the future, once this has been done,
+ // allowing them to all be part of the same new container.
+ if (! containerCopied[anon->getAnonId()]) {
+ TVariable* container = anon->getAnonContainer().clone();
+ container->changeName(NewPoolTString(""));
+ // insert the container and all its members
+ symTableLevel->insert(*container, false);
+ containerCopied[anon->getAnonId()] = true;
+ }
+ } else
+ symTableLevel->insert(*iter->second->clone(), false);
+ }
+
+ return symTableLevel;
+}
+
+void TSymbolTable::copyTable(const TSymbolTable& copyOf)
+{
+ assert(adoptedLevels == copyOf.adoptedLevels);
+
+ uniqueId = copyOf.uniqueId;
+ noBuiltInRedeclarations = copyOf.noBuiltInRedeclarations;
+ separateNameSpaces = copyOf.separateNameSpaces;
+ for (unsigned int i = copyOf.adoptedLevels; i < copyOf.table.size(); ++i)
+ table.push_back(copyOf.table[i]->clone());
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.h b/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.h
new file mode 100644
index 0000000..f9c1903
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/SymbolTable.h
@@ -0,0 +1,871 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SYMBOL_TABLE_INCLUDED_
+#define _SYMBOL_TABLE_INCLUDED_
+
+//
+// Symbol table for parsing. Has these design characteristics:
+//
+// * Same symbol table can be used to compile many shaders, to preserve
+// effort of creating and loading with the large numbers of built-in
+// symbols.
+//
+// --> This requires a copy mechanism, so initial pools used to create
+// the shared information can be popped. Done through "clone"
+// methods.
+//
+// * Name mangling will be used to give each function a unique name
+// so that symbol table lookups are never ambiguous. This allows
+// a simpler symbol table structure.
+//
+// * Pushing and popping of scope, so symbol table will really be a stack
+// of symbol tables. Searched from the top, with new inserts going into
+// the top.
+//
+// * Constants: Compile time constant symbols will keep their values
+// in the symbol table. The parser can substitute constants at parse
+// time, including doing constant folding and constant propagation.
+//
+// * No temporaries: Temporaries made from operations (+, --, .xy, etc.)
+// are tracked in the intermediate representation, not the symbol table.
+//
+
+#include "../Include/Common.h"
+#include "../Include/intermediate.h"
+#include "../Include/InfoSink.h"
+
+namespace glslang {
+
+//
+// Symbol base class. (Can build functions or variables out of these...)
+//
+
+class TVariable;
+class TFunction;
+class TAnonMember;
+
+typedef TVector<const char*> TExtensionList;
+
+class TSymbol {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ explicit TSymbol(const TString *n) : name(n), extensions(0), writable(true) { }
+ virtual TSymbol* clone() const = 0;
+ virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool
+
+ virtual const TString& getName() const { return *name; }
+ virtual void changeName(const TString* newName) { name = newName; }
+ virtual void addPrefix(const char* prefix)
+ {
+ TString newName(prefix);
+ newName.append(*name);
+ changeName(NewPoolTString(newName.c_str()));
+ }
+ virtual const TString& getMangledName() const { return getName(); }
+ virtual TFunction* getAsFunction() { return 0; }
+ virtual const TFunction* getAsFunction() const { return 0; }
+ virtual TVariable* getAsVariable() { return 0; }
+ virtual const TVariable* getAsVariable() const { return 0; }
+ virtual const TAnonMember* getAsAnonMember() const { return 0; }
+ virtual const TType& getType() const = 0;
+ virtual TType& getWritableType() = 0;
+ virtual void setUniqueId(int id) { uniqueId = id; }
+ virtual int getUniqueId() const { return uniqueId; }
+ virtual void setExtensions(int numExts, const char* const exts[])
+ {
+ assert(extensions == 0);
+ assert(numExts > 0);
+ extensions = NewPoolObject(extensions);
+ for (int e = 0; e < numExts; ++e)
+ extensions->push_back(exts[e]);
+ }
+ virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
+ virtual const char** getExtensions() const { return extensions->data(); }
+ virtual void dump(TInfoSink &infoSink) const = 0;
+
+ virtual bool isReadOnly() const { return ! writable; }
+ virtual void makeReadOnly() { writable = false; }
+
+protected:
+ explicit TSymbol(const TSymbol&);
+ TSymbol& operator=(const TSymbol&);
+
+ const TString *name;
+ unsigned int uniqueId; // For cross-scope comparing during code generation
+
+ // For tracking what extensions must be present
+ // (don't use if correct version/profile is present).
+ TExtensionList* extensions; // an array of pointers to existing constant char strings
+
+ //
+ // N.B.: Non-const functions that will be generally used should assert on this,
+ // to avoid overwriting shared symbol-table information.
+ //
+ bool writable;
+};
+
+//
+// Variable class, meaning a symbol that's not a function.
+//
+// There could be a separate class hierarchy for Constant variables;
+// Only one of int, bool, or float, (or none) is correct for
+// any particular use, but it's easy to do this way, and doesn't
+// seem worth having separate classes, and "getConst" can't simply return
+// different values for different types polymorphically, so this is
+// just simple and pragmatic.
+//
+class TVariable : public TSymbol {
+public:
+ TVariable(const TString *name, const TType& t, bool uT = false )
+ : TSymbol(name),
+ userType(uT),
+ constSubtree(nullptr),
+ memberExtensions(nullptr),
+ anonId(-1)
+ { type.shallowCopy(t); }
+ virtual TVariable* clone() const;
+ virtual ~TVariable() { }
+
+ virtual TVariable* getAsVariable() { return this; }
+ virtual const TVariable* getAsVariable() const { return this; }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { assert(writable); return type; }
+ virtual bool isUserType() const { return userType; }
+ virtual const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; }
+ virtual void setConstArray(const TConstUnionArray& array) { constArray = array; }
+ virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ virtual TIntermTyped* getConstSubtree() const { return constSubtree; }
+ virtual void setAnonId(int i) { anonId = i; }
+ virtual int getAnonId() const { return anonId; }
+
+ virtual void setMemberExtensions(int member, int numExts, const char* const exts[])
+ {
+ assert(type.isStruct());
+ assert(numExts > 0);
+ if (memberExtensions == nullptr) {
+ memberExtensions = NewPoolObject(memberExtensions);
+ memberExtensions->resize(type.getStruct()->size());
+ }
+ for (int e = 0; e < numExts; ++e)
+ (*memberExtensions)[member].push_back(exts[e]);
+ }
+ virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; }
+ virtual int getNumMemberExtensions(int member) const
+ {
+ return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size();
+ }
+ virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
+
+ virtual void dump(TInfoSink &infoSink) const;
+
+protected:
+ explicit TVariable(const TVariable&);
+ TVariable& operator=(const TVariable&);
+
+ TType type;
+ bool userType;
+
+ // we are assuming that Pool Allocator will free the memory allocated to unionArray
+ // when this object is destroyed
+
+ TConstUnionArray constArray; // for compile-time constant value
+ TIntermTyped* constSubtree; // for specialization constant computation
+ TVector<TExtensionList>* memberExtensions; // per-member extension list, allocated only when needed
+ int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
+};
+
+//
+// The function sub-class of symbols and the parser will need to
+// share this definition of a function parameter.
+//
+struct TParameter {
+ TString *name;
+ TType* type;
+ TIntermTyped* defaultValue;
+ void copyParam(const TParameter& param)
+ {
+ if (param.name)
+ name = NewPoolTString(param.name->c_str());
+ else
+ name = 0;
+ type = param.type->clone();
+ defaultValue = param.defaultValue;
+ }
+ TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; }
+};
+
+//
+// The function sub-class of a symbol.
+//
+class TFunction : public TSymbol {
+public:
+ explicit TFunction(TOperator o) :
+ TSymbol(0),
+ op(o),
+ defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { }
+ TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) :
+ TSymbol(name),
+ mangledName(*name + '('),
+ op(tOp),
+ defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0)
+ {
+ returnType.shallowCopy(retType);
+ declaredBuiltIn = retType.getQualifier().builtIn;
+ }
+ virtual TFunction* clone() const override;
+ virtual ~TFunction();
+
+ virtual TFunction* getAsFunction() override { return this; }
+ virtual const TFunction* getAsFunction() const override { return this; }
+
+ // Install 'p' as the (non-'this') last parameter.
+ // Non-'this' parameters are reflected in both the list of parameters and the
+ // mangled name.
+ virtual void addParameter(TParameter& p)
+ {
+ assert(writable);
+ parameters.push_back(p);
+ p.type->appendMangledName(mangledName);
+
+ if (p.defaultValue != nullptr)
+ defaultParamCount++;
+ }
+
+ // Install 'this' as the first parameter.
+ // 'this' is reflected in the list of parameters, but not the mangled name.
+ virtual void addThisParameter(TType& type, const char* name)
+ {
+ TParameter p = { NewPoolTString(name), new TType, nullptr };
+ p.type->shallowCopy(type);
+ parameters.insert(parameters.begin(), p);
+ }
+
+ virtual void addPrefix(const char* prefix) override
+ {
+ TSymbol::addPrefix(prefix);
+ mangledName.insert(0, prefix);
+ }
+
+ virtual void removePrefix(const TString& prefix)
+ {
+ assert(mangledName.compare(0, prefix.size(), prefix) == 0);
+ mangledName.erase(0, prefix.size());
+ }
+
+ virtual const TString& getMangledName() const override { return mangledName; }
+ virtual const TType& getType() const override { return returnType; }
+ virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; }
+ virtual TType& getWritableType() override { return returnType; }
+ virtual void relateToOperator(TOperator o) { assert(writable); op = o; }
+ virtual TOperator getBuiltInOp() const { return op; }
+ virtual void setDefined() { assert(writable); defined = true; }
+ virtual bool isDefined() const { return defined; }
+ virtual void setPrototyped() { assert(writable); prototyped = true; }
+ virtual bool isPrototyped() const { return prototyped; }
+ virtual void setImplicitThis() { assert(writable); implicitThis = true; }
+ virtual bool hasImplicitThis() const { return implicitThis; }
+ virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; }
+ virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; }
+
+ // Return total number of parameters
+ virtual int getParamCount() const { return static_cast<int>(parameters.size()); }
+ // Return number of parameters with default values.
+ virtual int getDefaultParamCount() const { return defaultParamCount; }
+ // Return number of fixed parameters (without default values)
+ virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); }
+
+ virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
+ virtual const TParameter& operator[](int i) const { return parameters[i]; }
+
+ virtual void dump(TInfoSink &infoSink) const override;
+
+protected:
+ explicit TFunction(const TFunction&);
+ TFunction& operator=(const TFunction&);
+
+ typedef TVector<TParameter> TParamList;
+ TParamList parameters;
+ TType returnType;
+ TBuiltInVariable declaredBuiltIn;
+
+ TString mangledName;
+ TOperator op;
+ bool defined;
+ bool prototyped;
+ bool implicitThis; // True if this function is allowed to see all members of 'this'
+ bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this',
+ // even if it finds member variables in the symbol table.
+ // This is important for a static member function that has member variables in scope,
+ // but is not allowed to use them, or see hidden symbols instead.
+ int defaultParamCount;
+};
+
+//
+// Members of anonymous blocks are a kind of TSymbol. They are not hidden in
+// the symbol table behind a container; rather they are visible and point to
+// their anonymous container. (The anonymous container is found through the
+// member, not the other way around.)
+//
+class TAnonMember : public TSymbol {
+public:
+ TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
+ virtual TAnonMember* clone() const override;
+ virtual ~TAnonMember() { }
+
+ virtual const TAnonMember* getAsAnonMember() const override { return this; }
+ virtual const TVariable& getAnonContainer() const { return anonContainer; }
+ virtual unsigned int getMemberNumber() const { return memberNumber; }
+
+ virtual const TType& getType() const override
+ {
+ const TTypeList& types = *anonContainer.getType().getStruct();
+ return *types[memberNumber].type;
+ }
+
+ virtual TType& getWritableType() override
+ {
+ assert(writable);
+ const TTypeList& types = *anonContainer.getType().getStruct();
+ return *types[memberNumber].type;
+ }
+
+ virtual void setExtensions(int numExts, const char* const exts[]) override
+ {
+ anonContainer.setMemberExtensions(memberNumber, numExts, exts);
+ }
+ virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); }
+ virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
+
+ virtual int getAnonId() const { return anonId; }
+ virtual void dump(TInfoSink &infoSink) const override;
+
+protected:
+ explicit TAnonMember(const TAnonMember&);
+ TAnonMember& operator=(const TAnonMember&);
+
+ TVariable& anonContainer;
+ unsigned int memberNumber;
+ int anonId;
+};
+
+class TSymbolTableLevel {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+ TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { }
+ ~TSymbolTableLevel();
+
+ bool insert(TSymbol& symbol, bool separateNameSpaces)
+ {
+ //
+ // returning true means symbol was added to the table with no semantic errors
+ //
+ const TString& name = symbol.getName();
+ if (name == "") {
+ symbol.getAsVariable()->setAnonId(anonId++);
+ // An empty name means an anonymous container, exposing its members to the external scope.
+ // Give it a name and insert its members in the symbol table, pointing to the container.
+ char buf[20];
+ snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId());
+ symbol.changeName(NewPoolTString(buf));
+
+ return insertAnonymousMembers(symbol, 0);
+ } else {
+ // Check for redefinition errors:
+ // - STL itself will tell us if there is a direct name collision, with name mangling, at this level
+ // - additionally, check for function-redefining-variable name collisions
+ const TString& insertName = symbol.getMangledName();
+ if (symbol.getAsFunction()) {
+ // make sure there isn't a variable of this name
+ if (! separateNameSpaces && level.find(name) != level.end())
+ return false;
+
+ // insert, and whatever happens is okay
+ level.insert(tLevelPair(insertName, &symbol));
+
+ return true;
+ } else
+ return level.insert(tLevelPair(insertName, &symbol)).second;
+ }
+ }
+
+ // Add more members to an already inserted aggregate object
+ bool amend(TSymbol& symbol, int firstNewMember)
+ {
+ // See insert() for comments on basic explanation of insert.
+ // This operates similarly, but more simply.
+ // Only supporting amend of anonymous blocks so far.
+ if (IsAnonymous(symbol.getName()))
+ return insertAnonymousMembers(symbol, firstNewMember);
+ else
+ return false;
+ }
+
+ bool insertAnonymousMembers(TSymbol& symbol, int firstMember)
+ {
+ const TTypeList& types = *symbol.getAsVariable()->getType().getStruct();
+ for (unsigned int m = firstMember; m < types.size(); ++m) {
+ TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId());
+ if (! level.insert(tLevelPair(member->getMangledName(), member)).second)
+ return false;
+ }
+
+ return true;
+ }
+
+ TSymbol* find(const TString& name) const
+ {
+ tLevel::const_iterator it = level.find(name);
+ if (it == level.end())
+ return 0;
+ else
+ return (*it).second;
+ }
+
+ void findFunctionNameList(const TString& name, TVector<const TFunction*>& list)
+ {
+ size_t parenAt = name.find_first_of('(');
+ TString base(name, 0, parenAt + 1);
+
+ tLevel::const_iterator begin = level.lower_bound(base);
+ base[parenAt] = ')'; // assume ')' is lexically after '('
+ tLevel::const_iterator end = level.upper_bound(base);
+ for (tLevel::const_iterator it = begin; it != end; ++it)
+ list.push_back(it->second->getAsFunction());
+ }
+
+ // See if there is already a function in the table having the given non-function-style name.
+ bool hasFunctionName(const TString& name) const
+ {
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ if (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0)
+
+ return true;
+ }
+
+ return false;
+ }
+
+ // See if there is a variable at this level having the given non-function-style name.
+ // Return true if name is found, and set variable to true if the name was a variable.
+ bool findFunctionVariableName(const TString& name, bool& variable) const
+ {
+ tLevel::const_iterator candidate = level.lower_bound(name);
+ if (candidate != level.end()) {
+ const TString& candidateName = (*candidate).first;
+ TString::size_type parenAt = candidateName.find_first_of('(');
+ if (parenAt == candidateName.npos) {
+ // not a mangled name
+ if (candidateName == name) {
+ // found a variable name match
+ variable = true;
+ return true;
+ }
+ } else {
+ // a mangled name
+ if (candidateName.compare(0, parenAt, name) == 0) {
+ // found a function name match
+ variable = false;
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ // Use this to do a lazy 'push' of precision defaults the first time
+ // a precision statement is seen in a new scope. Leave it at 0 for
+ // when no push was needed. Thus, it is not the current defaults,
+ // it is what to restore the defaults to when popping a level.
+ void setPreviousDefaultPrecisions(const TPrecisionQualifier *p)
+ {
+ // can call multiple times at one scope, will only latch on first call,
+ // as we're tracking the previous scope's values, not the current values
+ if (defaultPrecision != 0)
+ return;
+
+ defaultPrecision = new TPrecisionQualifier[EbtNumTypes];
+ for (int t = 0; t < EbtNumTypes; ++t)
+ defaultPrecision[t] = p[t];
+ }
+
+ void getPreviousDefaultPrecisions(TPrecisionQualifier *p)
+ {
+ // can be called for table level pops that didn't set the
+ // defaults
+ if (defaultPrecision == 0 || p == 0)
+ return;
+
+ for (int t = 0; t < EbtNumTypes; ++t)
+ p[t] = defaultPrecision[t];
+ }
+
+ void relateToOperator(const char* name, TOperator op);
+ void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
+ void dump(TInfoSink &infoSink) const;
+ TSymbolTableLevel* clone() const;
+ void readOnly();
+
+ void setThisLevel() { thisLevel = true; }
+ bool isThisLevel() const { return thisLevel; }
+
+protected:
+ explicit TSymbolTableLevel(TSymbolTableLevel&);
+ TSymbolTableLevel& operator=(TSymbolTableLevel&);
+
+ typedef std::map<TString, TSymbol*, std::less<TString>, pool_allocator<std::pair<const TString, TSymbol*> > > tLevel;
+ typedef const tLevel::value_type tLevelPair;
+ typedef std::pair<tLevel::iterator, bool> tInsertResult;
+
+ tLevel level; // named mappings
+ TPrecisionQualifier *defaultPrecision;
+ int anonId;
+ bool thisLevel; // True if this level of the symbol table is a structure scope containing member function
+ // that are supposed to see anonymous access to member variables.
+};
+
+class TSymbolTable {
+public:
+ TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0)
+ {
+ //
+ // This symbol table cannot be used until push() is called.
+ //
+ }
+ ~TSymbolTable()
+ {
+ // this can be called explicitly; safest to code it so it can be called multiple times
+
+ // don't deallocate levels passed in from elsewhere
+ while (table.size() > adoptedLevels)
+ pop(0);
+ }
+
+ void adoptLevels(TSymbolTable& symTable)
+ {
+ for (unsigned int level = 0; level < symTable.table.size(); ++level) {
+ table.push_back(symTable.table[level]);
+ ++adoptedLevels;
+ }
+ uniqueId = symTable.uniqueId;
+ noBuiltInRedeclarations = symTable.noBuiltInRedeclarations;
+ separateNameSpaces = symTable.separateNameSpaces;
+ }
+
+ //
+ // While level adopting is generic, the methods below enact a the following
+ // convention for levels:
+ // 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables
+ // 1: per-stage built-ins, shared across all compiles, but a different copy per stage
+ // 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins
+ // 3: user-shader globals
+ //
+protected:
+ static const int globalLevel = 3;
+ bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
+ bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
+ bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
+public:
+ bool isEmpty() { return table.size() == 0; }
+ bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); }
+ bool atGlobalLevel() { return isGlobalLevel(currentLevel()); }
+
+ void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
+ void setSeparateNameSpaces() { separateNameSpaces = true; }
+
+ void push()
+ {
+ table.push_back(new TSymbolTableLevel);
+ }
+
+ // Make a new symbol-table level to represent the scope introduced by a structure
+ // containing member functions, such that the member functions can find anonymous
+ // references to member variables.
+ //
+ // 'thisSymbol' should have a name of "" to trigger anonymous structure-member
+ // symbol finds.
+ void pushThis(TSymbol& thisSymbol)
+ {
+ assert(thisSymbol.getName().size() == 0);
+ table.push_back(new TSymbolTableLevel);
+ table.back()->setThisLevel();
+ insert(thisSymbol);
+ }
+
+ void pop(TPrecisionQualifier *p)
+ {
+ table[currentLevel()]->getPreviousDefaultPrecisions(p);
+ delete table.back();
+ table.pop_back();
+ }
+
+ //
+ // Insert a visible symbol into the symbol table so it can
+ // be found later by name.
+ //
+ // Returns false if the was a name collision.
+ //
+ bool insert(TSymbol& symbol)
+ {
+ symbol.setUniqueId(++uniqueId);
+
+ // make sure there isn't a function of this variable name
+ if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName()))
+ return false;
+
+ // check for not overloading or redefining a built-in function
+ if (noBuiltInRedeclarations) {
+ if (atGlobalLevel() && currentLevel() > 0) {
+ if (table[0]->hasFunctionName(symbol.getName()))
+ return false;
+ if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName()))
+ return false;
+ }
+ }
+
+ return table[currentLevel()]->insert(symbol, separateNameSpaces);
+ }
+
+ // Add more members to an already inserted aggregate object
+ bool amend(TSymbol& symbol, int firstNewMember)
+ {
+ // See insert() for comments on basic explanation of insert.
+ // This operates similarly, but more simply.
+ return table[currentLevel()]->amend(symbol, firstNewMember);
+ }
+
+ //
+ // To allocate an internal temporary, which will need to be uniquely
+ // identified by the consumer of the AST, but never need to
+ // found by doing a symbol table search by name, hence allowed an
+ // arbitrary name in the symbol with no worry of collision.
+ //
+ void makeInternalVariable(TSymbol& symbol)
+ {
+ symbol.setUniqueId(++uniqueId);
+ }
+
+ //
+ // Copy a variable or anonymous member's structure from a shared level so that
+ // it can be added (soon after return) to the symbol table where it can be
+ // modified without impacting other users of the shared table.
+ //
+ TSymbol* copyUpDeferredInsert(TSymbol* shared)
+ {
+ if (shared->getAsVariable()) {
+ TSymbol* copy = shared->clone();
+ copy->setUniqueId(shared->getUniqueId());
+ return copy;
+ } else {
+ const TAnonMember* anon = shared->getAsAnonMember();
+ assert(anon);
+ TVariable* container = anon->getAnonContainer().clone();
+ container->changeName(NewPoolTString(""));
+ container->setUniqueId(anon->getAnonContainer().getUniqueId());
+ return container;
+ }
+ }
+
+ TSymbol* copyUp(TSymbol* shared)
+ {
+ TSymbol* copy = copyUpDeferredInsert(shared);
+ table[globalLevel]->insert(*copy, separateNameSpaces);
+ if (shared->getAsVariable())
+ return copy;
+ else {
+ // return the copy of the anonymous member
+ return table[globalLevel]->find(shared->getName());
+ }
+ }
+
+ // Normal find of a symbol, that can optionally say whether the symbol was found
+ // at a built-in level or the current top-scope level.
+ TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0)
+ {
+ int level = currentLevel();
+ TSymbol* symbol;
+ int thisDepth = 0;
+ do {
+ if (table[level]->isThisLevel())
+ ++thisDepth;
+ symbol = table[level]->find(name);
+ --level;
+ } while (symbol == nullptr && level >= 0);
+ level++;
+ if (builtIn)
+ *builtIn = isBuiltInLevel(level);
+ if (currentScope)
+ *currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals
+ if (thisDepthP != nullptr) {
+ if (! table[level]->isThisLevel())
+ thisDepth = 0;
+ *thisDepthP = thisDepth;
+ }
+
+ return symbol;
+ }
+
+ // Find of a symbol that returns how many layers deep of nested
+ // structures-with-member-functions ('this' scopes) deep the symbol was
+ // found in.
+ TSymbol* find(const TString& name, int& thisDepth)
+ {
+ int level = currentLevel();
+ TSymbol* symbol;
+ thisDepth = 0;
+ do {
+ if (table[level]->isThisLevel())
+ ++thisDepth;
+ symbol = table[level]->find(name);
+ --level;
+ } while (symbol == 0 && level >= 0);
+
+ if (! table[level + 1]->isThisLevel())
+ thisDepth = 0;
+
+ return symbol;
+ }
+
+ bool isFunctionNameVariable(const TString& name) const
+ {
+ if (separateNameSpaces)
+ return false;
+
+ int level = currentLevel();
+ do {
+ bool variable;
+ bool found = table[level]->findFunctionVariableName(name, variable);
+ if (found)
+ return variable;
+ --level;
+ } while (level >= 0);
+
+ return false;
+ }
+
+ void findFunctionNameList(const TString& name, TVector<const TFunction*>& list, bool& builtIn)
+ {
+ // For user levels, return the set found in the first scope with a match
+ builtIn = false;
+ int level = currentLevel();
+ do {
+ table[level]->findFunctionNameList(name, list);
+ --level;
+ } while (list.empty() && level >= globalLevel);
+
+ if (! list.empty())
+ return;
+
+ // Gather across all built-in levels; they don't hide each other
+ builtIn = true;
+ do {
+ table[level]->findFunctionNameList(name, list);
+ --level;
+ } while (level >= 0);
+ }
+
+ void relateToOperator(const char* name, TOperator op)
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->relateToOperator(name, op);
+ }
+
+ void setFunctionExtensions(const char* name, int num, const char* const extensions[])
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->setFunctionExtensions(name, num, extensions);
+ }
+
+ void setVariableExtensions(const char* name, int numExts, const char* const extensions[])
+ {
+ TSymbol* symbol = find(TString(name));
+ if (symbol == nullptr)
+ return;
+
+ symbol->setExtensions(numExts, extensions);
+ }
+
+ void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[])
+ {
+ TSymbol* symbol = find(TString(blockName));
+ if (symbol == nullptr)
+ return;
+ TVariable* variable = symbol->getAsVariable();
+ assert(variable != nullptr);
+
+ const TTypeList& structure = *variable->getAsVariable()->getType().getStruct();
+ for (int member = 0; member < (int)structure.size(); ++member) {
+ if (structure[member].type->getFieldName().compare(name) == 0) {
+ variable->setMemberExtensions(member, numExts, extensions);
+ return;
+ }
+ }
+ }
+
+ int getMaxSymbolId() { return uniqueId; }
+ void dump(TInfoSink &infoSink) const;
+ void copyTable(const TSymbolTable& copyOf);
+
+ void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }
+
+ void readOnly()
+ {
+ for (unsigned int level = 0; level < table.size(); ++level)
+ table[level]->readOnly();
+ }
+
+protected:
+ TSymbolTable(TSymbolTable&);
+ TSymbolTable& operator=(TSymbolTableLevel&);
+
+ int currentLevel() const { return static_cast<int>(table.size()) - 1; }
+
+ std::vector<TSymbolTableLevel*> table;
+ int uniqueId; // for unique identification in code generation
+ bool noBuiltInRedeclarations;
+ bool separateNameSpaces;
+ unsigned int adoptedLevels;
+};
+
+} // end namespace glslang
+
+#endif // _SYMBOL_TABLE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Versions.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/Versions.cpp
new file mode 100644
index 0000000..0d4b994
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Versions.cpp
@@ -0,0 +1,1126 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Help manage multiple profiles, versions, extensions etc.
+//
+// These don't return error codes, as the presumption is parsing will
+// always continue as if the tested feature were enabled, and thus there
+// is no error recovery needed.
+//
+
+//
+// HOW TO add a feature enabled by an extension.
+//
+// To add a new hypothetical "Feature F" to the front end, where an extension
+// "XXX_extension_X" can be used to enable the feature, do the following.
+//
+// OVERVIEW: Specific features are what are error-checked for, not
+// extensions: A specific Feature F might be enabled by an extension, or a
+// particular version in a particular profile, or a stage, or combinations, etc.
+//
+// The basic mechanism is to use the following to "declare" all the things that
+// enable/disable Feature F, in a code path that implements Feature F:
+//
+// requireProfile()
+// profileRequires()
+// requireStage()
+// checkDeprecated()
+// requireNotRemoved()
+// requireExtensions()
+//
+// Typically, only the first two calls are needed. They go into a code path that
+// implements Feature F, and will log the proper error/warning messages. Parsing
+// will then always continue as if the tested feature was enabled.
+//
+// There is typically no if-testing or conditional parsing, just insertion of the calls above.
+// However, if symbols specific to the extension are added (step 5), they will
+// only be added under tests that the minimum version and profile are present.
+//
+// 1) Add a symbol name for the extension string at the bottom of Versions.h:
+//
+// const char* const XXX_extension_X = "XXX_extension_X";
+//
+// 2) Add extension initialization to TParseVersions::initializeExtensionBehavior(),
+// the first function below:
+//
+// extensionBehavior[XXX_extension_X] = EBhDisable;
+//
+// 3) Add any preprocessor directives etc. in the next function, TParseVersions::getPreamble():
+//
+// "#define XXX_extension_X 1\n"
+//
+// The new-line is important, as that ends preprocess tokens.
+//
+// 4) Insert a profile check in the feature's path (unless all profiles support the feature,
+// for some version level). That is, call requireProfile() to constrain the profiles, e.g.:
+//
+// // ... in a path specific to Feature F...
+// requireProfile(loc,
+// ECoreProfile | ECompatibilityProfile,
+// "Feature F");
+//
+// 5) For each profile that supports the feature, insert version/extension checks:
+//
+// The mostly likely scenario is that Feature F can only be used with a
+// particular profile if XXX_extension_X is present or the version is
+// high enough that the core specification already incorporated it.
+//
+// // following the requireProfile() call...
+// profileRequires(loc,
+// ECoreProfile | ECompatibilityProfile,
+// 420, // 0 if no version incorporated the feature into the core spec.
+// XXX_extension_X, // can be a list of extensions that all add the feature
+// "Feature F Description");
+//
+// This allows the feature if either A) one of the extensions is enabled or
+// B) the version is high enough. If no version yet incorporates the feature
+// into core, pass in 0.
+//
+// This can be called multiple times, if different profiles support the
+// feature starting at different version numbers or with different
+// extensions.
+//
+// This must be called for each profile allowed by the initial call to requireProfile().
+//
+// Profiles are all masks, which can be "or"-ed together.
+//
+// ENoProfile
+// ECoreProfile
+// ECompatibilityProfile
+// EEsProfile
+//
+// The ENoProfile profile is only for desktop, before profiles showed up in version 150;
+// All other #version with no profile default to either es or core, and so have profiles.
+//
+// You can select all but a particular profile using ~. The following basically means "desktop":
+//
+// ~EEsProfile
+//
+// 6) If built-in symbols are added by the extension, add them in Initialize.cpp: Their use
+// will be automatically error checked against the extensions enabled at that moment.
+// see the comment at the top of Initialize.cpp for where to put them. Establish them at
+// the earliest release that supports the extension. Then, tag them with the
+// set of extensions that both enable them and are necessary, given the version of the symbol
+// table. (There is a different symbol table for each version.)
+//
+
+#include "parseVersions.h"
+#include "localintermediate.h"
+
+namespace glslang {
+
+//
+// Initialize all extensions, almost always to 'disable', as once their features
+// are incorporated into a core version, their features are supported through allowing that
+// core version, not through a pseudo-enablement of the extension.
+//
+void TParseVersions::initializeExtensionBehavior()
+{
+ extensionBehavior[E_GL_OES_texture_3D] = EBhDisable;
+ extensionBehavior[E_GL_OES_standard_derivatives] = EBhDisable;
+ extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable;
+ extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable;
+ extensionBehavior[E_GL_OES_EGL_image_external_essl3] = EBhDisable;
+ extensionBehavior[E_GL_EXT_YUV_target] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shadow_samplers] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable;
+ extensionBehavior[E_GL_3DL_array_objects] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shading_language_420pack] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_gather] = EBhDisable;
+ extensionBehavior[E_GL_ARB_gpu_shader5] = EBhDisablePartial;
+ extensionBehavior[E_GL_ARB_separate_shader_objects] = EBhDisable;
+ extensionBehavior[E_GL_ARB_compute_shader] = EBhDisable;
+ extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable;
+ extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable;
+ extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_group_vote] = EBhDisable;
+ extensionBehavior[E_GL_ARB_derivative_control] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_texture_image_samples] = EBhDisable;
+ extensionBehavior[E_GL_ARB_viewport_array] = EBhDisable;
+ extensionBehavior[E_GL_ARB_gpu_shader_int64] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_ballot] = EBhDisable;
+ extensionBehavior[E_GL_ARB_sparse_texture2] = EBhDisable;
+ extensionBehavior[E_GL_ARB_sparse_texture_clamp] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_stencil_export] = EBhDisable;
+// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members
+ extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable;
+ extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable;
+
+ extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_arithmetic] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_ballot] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_shuffle] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable;
+ extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable;
+ extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_atomic_int64] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable;
+ extensionBehavior[E_GL_EXT_post_depth_coverage] = EBhDisable;
+ extensionBehavior[E_GL_EXT_control_flow_attributes] = EBhDisable;
+ extensionBehavior[E_GL_EXT_nonuniform_qualifier] = EBhDisable;
+ extensionBehavior[E_GL_EXT_samplerless_texture_functions] = EBhDisable;
+ extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable;
+ extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable;
+ extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable;
+
+ extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable;
+
+ // #line and #include
+ extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable;
+ extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable;
+
+#ifdef AMD_EXTENSIONS
+ extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable;
+ extensionBehavior[E_GL_AMD_texture_gather_bias_lod] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_int16] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable;
+ extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable;
+ extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable;
+#endif
+
+#ifdef NV_EXTENSIONS
+ extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
+ extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
+ extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable;
+ extensionBehavior[E_GL_NV_stereo_view_rendering] = EBhDisable;
+ extensionBehavior[E_GL_NVX_multiview_per_view_attributes] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_atomic_int64] = EBhDisable;
+ extensionBehavior[E_GL_NV_conservative_raster_underestimation] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_noperspective_interpolation] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_subgroup_partitioned] = EBhDisable;
+ extensionBehavior[E_GL_NV_shading_rate_image] = EBhDisable;
+ extensionBehavior[E_GL_NV_ray_tracing] = EBhDisable;
+ extensionBehavior[E_GL_NV_fragment_shader_barycentric] = EBhDisable;
+ extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable;
+ extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable;
+ extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable;
+#endif
+
+ extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable;
+
+ // AEP
+ extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable;
+ extensionBehavior[E_GL_KHR_blend_equation_advanced] = EBhDisable;
+ extensionBehavior[E_GL_OES_sample_variables] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_image_atomic] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_multisample_interpolation] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_storage_multisample_2d_array] = EBhDisable;
+ extensionBehavior[E_GL_EXT_geometry_shader] = EBhDisable;
+ extensionBehavior[E_GL_EXT_geometry_point_size] = EBhDisable;
+ extensionBehavior[E_GL_EXT_gpu_shader5] = EBhDisable;
+ extensionBehavior[E_GL_EXT_primitive_bounding_box] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_io_blocks] = EBhDisable;
+ extensionBehavior[E_GL_EXT_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_EXT_tessellation_point_size] = EBhDisable;
+ extensionBehavior[E_GL_EXT_texture_buffer] = EBhDisable;
+ extensionBehavior[E_GL_EXT_texture_cube_map_array] = EBhDisable;
+
+ // OES matching AEP
+ extensionBehavior[E_GL_OES_geometry_shader] = EBhDisable;
+ extensionBehavior[E_GL_OES_geometry_point_size] = EBhDisable;
+ extensionBehavior[E_GL_OES_gpu_shader5] = EBhDisable;
+ extensionBehavior[E_GL_OES_primitive_bounding_box] = EBhDisable;
+ extensionBehavior[E_GL_OES_shader_io_blocks] = EBhDisable;
+ extensionBehavior[E_GL_OES_tessellation_shader] = EBhDisable;
+ extensionBehavior[E_GL_OES_tessellation_point_size] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_buffer] = EBhDisable;
+ extensionBehavior[E_GL_OES_texture_cube_map_array] = EBhDisable;
+
+ // EXT extensions
+ extensionBehavior[E_GL_EXT_device_group] = EBhDisable;
+ extensionBehavior[E_GL_EXT_multiview] = EBhDisable;
+
+ // OVR extensions
+ extensionBehavior[E_GL_OVR_multiview] = EBhDisable;
+ extensionBehavior[E_GL_OVR_multiview2] = EBhDisable;
+
+ // explicit types
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int8] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int16] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int32] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int64] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable;
+ extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable;
+}
+
+// Get code that is not part of a shared symbol table, is specific to this shader,
+// or needed by the preprocessor (which does not use a shared symbol table).
+void TParseVersions::getPreamble(std::string& preamble)
+{
+ if (profile == EEsProfile) {
+ preamble =
+ "#define GL_ES 1\n"
+ "#define GL_FRAGMENT_PRECISION_HIGH 1\n"
+ "#define GL_OES_texture_3D 1\n"
+ "#define GL_OES_standard_derivatives 1\n"
+ "#define GL_EXT_frag_depth 1\n"
+ "#define GL_OES_EGL_image_external 1\n"
+ "#define GL_OES_EGL_image_external_essl3 1\n"
+ "#define GL_EXT_YUV_target 1\n"
+ "#define GL_EXT_shader_texture_lod 1\n"
+ "#define GL_EXT_shadow_samplers 1\n"
+
+ // AEP
+ "#define GL_ANDROID_extension_pack_es31a 1\n"
+ "#define GL_KHR_blend_equation_advanced 1\n"
+ "#define GL_OES_sample_variables 1\n"
+ "#define GL_OES_shader_image_atomic 1\n"
+ "#define GL_OES_shader_multisample_interpolation 1\n"
+ "#define GL_OES_texture_storage_multisample_2d_array 1\n"
+ "#define GL_EXT_geometry_shader 1\n"
+ "#define GL_EXT_geometry_point_size 1\n"
+ "#define GL_EXT_gpu_shader5 1\n"
+ "#define GL_EXT_primitive_bounding_box 1\n"
+ "#define GL_EXT_shader_io_blocks 1\n"
+ "#define GL_EXT_tessellation_shader 1\n"
+ "#define GL_EXT_tessellation_point_size 1\n"
+ "#define GL_EXT_texture_buffer 1\n"
+ "#define GL_EXT_texture_cube_map_array 1\n"
+
+ // OES matching AEP
+ "#define GL_OES_geometry_shader 1\n"
+ "#define GL_OES_geometry_point_size 1\n"
+ "#define GL_OES_gpu_shader5 1\n"
+ "#define GL_OES_primitive_bounding_box 1\n"
+ "#define GL_OES_shader_io_blocks 1\n"
+ "#define GL_OES_tessellation_shader 1\n"
+ "#define GL_OES_tessellation_point_size 1\n"
+ "#define GL_OES_texture_buffer 1\n"
+ "#define GL_OES_texture_cube_map_array 1\n"
+ "#define GL_EXT_shader_non_constant_global_initializers 1\n"
+ ;
+
+#ifdef NV_EXTENSIONS
+ if (profile == EEsProfile && version >= 300) {
+ preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
+ }
+#endif
+
+ } else {
+ preamble =
+ "#define GL_FRAGMENT_PRECISION_HIGH 1\n"
+ "#define GL_ARB_texture_rectangle 1\n"
+ "#define GL_ARB_shading_language_420pack 1\n"
+ "#define GL_ARB_texture_gather 1\n"
+ "#define GL_ARB_gpu_shader5 1\n"
+ "#define GL_ARB_separate_shader_objects 1\n"
+ "#define GL_ARB_compute_shader 1\n"
+ "#define GL_ARB_tessellation_shader 1\n"
+ "#define GL_ARB_enhanced_layouts 1\n"
+ "#define GL_ARB_texture_cube_map_array 1\n"
+ "#define GL_ARB_shader_texture_lod 1\n"
+ "#define GL_ARB_explicit_attrib_location 1\n"
+ "#define GL_ARB_shader_image_load_store 1\n"
+ "#define GL_ARB_shader_atomic_counters 1\n"
+ "#define GL_ARB_shader_draw_parameters 1\n"
+ "#define GL_ARB_shader_group_vote 1\n"
+ "#define GL_ARB_derivative_control 1\n"
+ "#define GL_ARB_shader_texture_image_samples 1\n"
+ "#define GL_ARB_viewport_array 1\n"
+ "#define GL_ARB_gpu_shader_int64 1\n"
+ "#define GL_ARB_shader_ballot 1\n"
+ "#define GL_ARB_sparse_texture2 1\n"
+ "#define GL_ARB_sparse_texture_clamp 1\n"
+ "#define GL_ARB_shader_stencil_export 1\n"
+// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members
+ "#define GL_ARB_post_depth_coverage 1\n"
+ "#define GL_EXT_shader_non_constant_global_initializers 1\n"
+ "#define GL_EXT_shader_image_load_formatted 1\n"
+ "#define GL_EXT_post_depth_coverage 1\n"
+ "#define GL_EXT_control_flow_attributes 1\n"
+ "#define GL_EXT_nonuniform_qualifier 1\n"
+ "#define GL_EXT_shader_16bit_storage 1\n"
+ "#define GL_EXT_shader_8bit_storage 1\n"
+ "#define GL_EXT_samplerless_texture_functions 1\n"
+ "#define GL_EXT_scalar_block_layout 1\n"
+ "#define GL_EXT_fragment_invocation_density 1\n"
+ "#define GL_EXT_buffer_reference 1\n"
+
+ // GL_KHR_shader_subgroup
+ "#define GL_KHR_shader_subgroup_basic 1\n"
+ "#define GL_KHR_shader_subgroup_vote 1\n"
+ "#define GL_KHR_shader_subgroup_arithmetic 1\n"
+ "#define GL_KHR_shader_subgroup_ballot 1\n"
+ "#define GL_KHR_shader_subgroup_shuffle 1\n"
+ "#define GL_KHR_shader_subgroup_shuffle_relative 1\n"
+ "#define GL_KHR_shader_subgroup_clustered 1\n"
+ "#define GL_KHR_shader_subgroup_quad 1\n"
+
+ "#define E_GL_EXT_shader_atomic_int64 1\n"
+
+#ifdef AMD_EXTENSIONS
+ "#define GL_AMD_shader_ballot 1\n"
+ "#define GL_AMD_shader_trinary_minmax 1\n"
+ "#define GL_AMD_shader_explicit_vertex_parameter 1\n"
+ "#define GL_AMD_gcn_shader 1\n"
+ "#define GL_AMD_gpu_shader_half_float 1\n"
+ "#define GL_AMD_texture_gather_bias_lod 1\n"
+ "#define GL_AMD_gpu_shader_int16 1\n"
+ "#define GL_AMD_shader_image_load_store_lod 1\n"
+ "#define GL_AMD_shader_fragment_mask 1\n"
+ "#define GL_AMD_gpu_shader_half_float_fetch 1\n"
+#endif
+
+#ifdef NV_EXTENSIONS
+ "#define GL_NV_sample_mask_override_coverage 1\n"
+ "#define GL_NV_geometry_shader_passthrough 1\n"
+ "#define GL_NV_viewport_array2 1\n"
+ "#define GL_NV_shader_atomic_int64 1\n"
+ "#define GL_NV_conservative_raster_underestimation 1\n"
+ "#define GL_NV_shader_subgroup_partitioned 1\n"
+ "#define GL_NV_shading_rate_image 1\n"
+ "#define GL_NV_ray_tracing 1\n"
+ "#define GL_NV_fragment_shader_barycentric 1\n"
+ "#define GL_NV_compute_shader_derivatives 1\n"
+ "#define GL_NV_shader_texture_footprint 1\n"
+ "#define GL_NV_mesh_shader 1\n"
+#endif
+ "#define GL_NV_cooperative_matrix 1\n"
+
+ "#define GL_EXT_shader_explicit_arithmetic_types 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int32 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_int64 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n"
+ "#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n"
+ ;
+
+ if (version >= 150) {
+ // define GL_core_profile and GL_compatibility_profile
+ preamble += "#define GL_core_profile 1\n";
+
+ if (profile == ECompatibilityProfile)
+ preamble += "#define GL_compatibility_profile 1\n";
+ }
+ }
+
+ if ((profile != EEsProfile && version >= 140) ||
+ (profile == EEsProfile && version >= 310)) {
+ preamble +=
+ "#define GL_EXT_device_group 1\n"
+ "#define GL_EXT_multiview 1\n"
+ ;
+ }
+
+ if (version >= 300 /* both ES and non-ES */) {
+ preamble +=
+ "#define GL_OVR_multiview 1\n"
+ "#define GL_OVR_multiview2 1\n"
+ ;
+ }
+
+ // #line and #include
+ preamble +=
+ "#define GL_GOOGLE_cpp_style_line_directive 1\n"
+ "#define GL_GOOGLE_include_directive 1\n"
+ ;
+
+ // #define VULKAN XXXX
+ const int numberBufSize = 12;
+ char numberBuf[numberBufSize];
+ if (spvVersion.vulkanGlsl > 0) {
+ preamble += "#define VULKAN ";
+ snprintf(numberBuf, numberBufSize, "%d", spvVersion.vulkanGlsl);
+ preamble += numberBuf;
+ preamble += "\n";
+ }
+ // #define GL_SPIRV XXXX
+ if (spvVersion.openGl > 0) {
+ preamble += "#define GL_SPIRV ";
+ snprintf(numberBuf, numberBufSize, "%d", spvVersion.openGl);
+ preamble += numberBuf;
+ preamble += "\n";
+ }
+
+}
+
+//
+// When to use requireProfile():
+//
+// Use if only some profiles support a feature. However, if within a profile the feature
+// is version or extension specific, follow this call with calls to profileRequires().
+//
+// Operation: If the current profile is not one of the profileMask,
+// give an error message.
+//
+void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
+{
+ if (! (profile & profileMask))
+ error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
+}
+
+//
+// Map from stage enum to externally readable text name.
+//
+const char* StageName(EShLanguage stage)
+{
+ switch(stage) {
+ case EShLangVertex: return "vertex";
+ case EShLangTessControl: return "tessellation control";
+ case EShLangTessEvaluation: return "tessellation evaluation";
+ case EShLangGeometry: return "geometry";
+ case EShLangFragment: return "fragment";
+ case EShLangCompute: return "compute";
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV: return "ray-generation";
+ case EShLangIntersectNV: return "intersection";
+ case EShLangAnyHitNV: return "any-hit";
+ case EShLangClosestHitNV: return "closest-hit";
+ case EShLangMissNV: return "miss";
+ case EShLangCallableNV: return "callable";
+ case EShLangMeshNV: return "mesh";
+ case EShLangTaskNV: return "task";
+#endif
+ default: return "unknown stage";
+ }
+}
+
+//
+// When to use profileRequires():
+//
+// If a set of profiles have the same requirements for what version or extensions
+// are needed to support a feature.
+//
+// It must be called for each profile that needs protection. Use requireProfile() first
+// to reduce that set of profiles.
+//
+// Operation: Will issue warnings/errors based on the current profile, version, and extension
+// behaviors. It only checks extensions when the current profile is one of the profileMask.
+//
+// A minVersion of 0 means no version of the profileMask support this in core,
+// the extension must be present.
+//
+
+// entry point that takes multiple extensions
+void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (profile & profileMask) {
+ bool okay = false;
+ if (minVersion > 0 && version >= minVersion)
+ okay = true;
+ for (int i = 0; i < numExtensions; ++i) {
+ switch (getExtensionBehavior(extensions[i])) {
+ case EBhWarn:
+ infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
+ // fall through
+ case EBhRequire:
+ case EBhEnable:
+ okay = true;
+ break;
+ default: break; // some compilers want this
+ }
+ }
+
+ if (! okay)
+ error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
+ }
+}
+
+// entry point for the above that takes a single extension
+void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, const char* featureDesc)
+{
+ profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
+}
+
+//
+// When to use requireStage()
+//
+// If only some stages support a feature.
+//
+// Operation: If the current stage is not present, give an error message.
+//
+void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguageMask languageMask, const char* featureDesc)
+{
+ if (((1 << language) & languageMask) == 0)
+ error(loc, "not supported in this stage:", featureDesc, StageName(language));
+}
+
+// If only one stage supports a feature, this can be called. But, all supporting stages
+// must be specified with one call.
+void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, const char* featureDesc)
+{
+ requireStage(loc, static_cast<EShLanguageMask>(1 << stage), featureDesc);
+}
+
+//
+// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether
+// a future compatibility context is being use.
+//
+void TParseVersions::checkDeprecated(const TSourceLoc& loc, int profileMask, int depVersion, const char* featureDesc)
+{
+ if (profile & profileMask) {
+ if (version >= depVersion) {
+ if (forwardCompatible)
+ error(loc, "deprecated, may be removed in future release", featureDesc, "");
+ else if (! suppressWarnings())
+ infoSink.info.message(EPrefixWarning, (TString(featureDesc) + " deprecated in version " +
+ String(depVersion) + "; may be removed in future release").c_str(), loc);
+ }
+ }
+}
+
+//
+// Within a set of profiles, see if a feature has now been removed and if so, give an error.
+// The version argument is the first version no longer having the feature.
+//
+void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, int removedVersion, const char* featureDesc)
+{
+ if (profile & profileMask) {
+ if (version >= removedVersion) {
+ const int maxSize = 60;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%s profile; removed in version %d", ProfileName(profile), removedVersion);
+ error(loc, "no longer supported in", featureDesc, buf);
+ }
+ }
+}
+
+void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
+{
+ error(loc, "feature not yet implemented", featureDesc, "");
+}
+
+// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false.
+// Warns appropriately if the requested behavior of an extension is "warn".
+bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ // First, see if any of the extensions are enabled
+ for (int i = 0; i < numExtensions; ++i) {
+ TExtensionBehavior behavior = getExtensionBehavior(extensions[i]);
+ if (behavior == EBhEnable || behavior == EBhRequire)
+ return true;
+ }
+
+ // See if any extensions want to give a warning on use; give warnings for all such extensions
+ bool warned = false;
+ for (int i = 0; i < numExtensions; ++i) {
+ TExtensionBehavior behavior = getExtensionBehavior(extensions[i]);
+ if (behavior == EBhDisable && relaxedErrors()) {
+ infoSink.info.message(EPrefixWarning, "The following extension must be enabled to use this feature:", loc);
+ behavior = EBhWarn;
+ }
+ if (behavior == EBhWarn) {
+ infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
+ warned = true;
+ }
+ }
+ if (warned)
+ return true;
+ return false;
+}
+
+//
+// Use when there are no profile/version to check, it's just an error if one of the
+// extensions is not present.
+//
+void TParseVersions::requireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc))
+ return;
+
+ // If we get this far, give errors explaining what extensions are needed
+ if (numExtensions == 1)
+ error(loc, "required extension not requested:", featureDesc, extensions[0]);
+ else {
+ error(loc, "required extension not requested:", featureDesc, "Possible extensions include:");
+ for (int i = 0; i < numExtensions; ++i)
+ infoSink.info.message(EPrefixNone, extensions[i]);
+ }
+}
+
+//
+// Use by preprocessor when there are no profile/version to check, it's just an error if one of the
+// extensions is not present.
+//
+void TParseVersions::ppRequireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
+{
+ if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc))
+ return;
+
+ // If we get this far, give errors explaining what extensions are needed
+ if (numExtensions == 1)
+ ppError(loc, "required extension not requested:", featureDesc, extensions[0]);
+ else {
+ ppError(loc, "required extension not requested:", featureDesc, "Possible extensions include:");
+ for (int i = 0; i < numExtensions; ++i)
+ infoSink.info.message(EPrefixNone, extensions[i]);
+ }
+}
+
+TExtensionBehavior TParseVersions::getExtensionBehavior(const char* extension)
+{
+ auto iter = extensionBehavior.find(TString(extension));
+ if (iter == extensionBehavior.end())
+ return EBhMissing;
+ else
+ return iter->second;
+}
+
+// Returns true if the given extension is set to enable, require, or warn.
+bool TParseVersions::extensionTurnedOn(const char* const extension)
+{
+ switch (getExtensionBehavior(extension)) {
+ case EBhEnable:
+ case EBhRequire:
+ case EBhWarn:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+// See if any of the extensions are set to enable, require, or warn.
+bool TParseVersions::extensionsTurnedOn(int numExtensions, const char* const extensions[])
+{
+ for (int i = 0; i < numExtensions; ++i) {
+ if (extensionTurnedOn(extensions[i]))
+ return true;
+ }
+ return false;
+}
+
+//
+// Change the current state of an extension's behavior.
+//
+void TParseVersions::updateExtensionBehavior(int line, const char* extension, const char* behaviorString)
+{
+ // Translate from text string of extension's behavior to an enum.
+ TExtensionBehavior behavior = EBhDisable;
+ if (! strcmp("require", behaviorString))
+ behavior = EBhRequire;
+ else if (! strcmp("enable", behaviorString))
+ behavior = EBhEnable;
+ else if (! strcmp("disable", behaviorString))
+ behavior = EBhDisable;
+ else if (! strcmp("warn", behaviorString))
+ behavior = EBhWarn;
+ else {
+ error(getCurrentLoc(), "behavior not supported:", "#extension", behaviorString);
+ return;
+ }
+
+ // check if extension is used with correct shader stage
+ checkExtensionStage(getCurrentLoc(), extension);
+
+ // update the requested extension
+ updateExtensionBehavior(extension, behavior);
+
+ // see if need to propagate to implicitly modified things
+ if (strcmp(extension, "GL_ANDROID_extension_pack_es31a") == 0) {
+ // to everything in AEP
+ updateExtensionBehavior(line, "GL_KHR_blend_equation_advanced", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_sample_variables", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_shader_image_atomic", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_shader_multisample_interpolation", behaviorString);
+ updateExtensionBehavior(line, "GL_OES_texture_storage_multisample_2d_array", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_geometry_shader", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_gpu_shader5", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_primitive_bounding_box", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_tessellation_shader", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_texture_buffer", behaviorString);
+ updateExtensionBehavior(line, "GL_EXT_texture_cube_map_array", behaviorString);
+ }
+ // geometry to io_blocks
+ else if (strcmp(extension, "GL_EXT_geometry_shader") == 0)
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_OES_geometry_shader") == 0)
+ updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString);
+ // tessellation to io_blocks
+ else if (strcmp(extension, "GL_EXT_tessellation_shader") == 0)
+ updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_OES_tessellation_shader") == 0)
+ updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString);
+ else if (strcmp(extension, "GL_GOOGLE_include_directive") == 0)
+ updateExtensionBehavior(line, "GL_GOOGLE_cpp_style_line_directive", behaviorString);
+ // subgroup_* to subgroup_basic
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_vote") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_arithmetic") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_ballot") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle_relative") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_clustered") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+ else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+#ifdef NV_EXTENSIONS
+ else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
+ updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
+#endif
+}
+
+void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior)
+{
+ // Update the current behavior
+ if (strcmp(extension, "all") == 0) {
+ // special case for the 'all' extension; apply it to every extension present
+ if (behavior == EBhRequire || behavior == EBhEnable) {
+ error(getCurrentLoc(), "extension 'all' cannot have 'require' or 'enable' behavior", "#extension", "");
+ return;
+ } else {
+ for (auto iter = extensionBehavior.begin(); iter != extensionBehavior.end(); ++iter)
+ iter->second = behavior;
+ }
+ } else {
+ // Do the update for this single extension
+ auto iter = extensionBehavior.find(TString(extension));
+ if (iter == extensionBehavior.end()) {
+ switch (behavior) {
+ case EBhRequire:
+ error(getCurrentLoc(), "extension not supported:", "#extension", extension);
+ break;
+ case EBhEnable:
+ case EBhWarn:
+ case EBhDisable:
+ warn(getCurrentLoc(), "extension not supported:", "#extension", extension);
+ break;
+ default:
+ assert(0 && "unexpected behavior");
+ }
+
+ return;
+ } else {
+ if (iter->second == EBhDisablePartial)
+ warn(getCurrentLoc(), "extension is only partially supported:", "#extension", extension);
+ if (behavior == EBhEnable || behavior == EBhRequire)
+ intermediate.addRequestedExtension(extension);
+ iter->second = behavior;
+ }
+ }
+}
+
+// Check if extension is used with correct shader stage.
+void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension)
+{
+#ifdef NV_EXTENSIONS
+ // GL_NV_mesh_shader extension is only allowed in task/mesh shaders
+ if (strcmp(extension, "GL_NV_mesh_shader") == 0) {
+ requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask),
+ "#extension GL_NV_mesh_shader");
+ profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader");
+ profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader");
+ }
+#endif
+}
+
+// Call for any operation needing full GLSL integer data-type support.
+void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op)
+{
+ profileRequires(loc, ENoProfile, 130, nullptr, op);
+ profileRequires(loc, EEsProfile, 300, nullptr, op);
+}
+
+// Call for any operation needing GLSL double data-type support.
+void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op)
+{
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+}
+
+// Call for any operation needing GLSL float16 data-type support.
+void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+bool TParseVersions::float16Arithmetic()
+{
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+bool TParseVersions::int16Arithmetic()
+{
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+bool TParseVersions::int8Arithmetic()
+{
+ const char* const extensions[] = {
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
+}
+
+void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc)
+{
+ TString combined;
+ combined = op;
+ combined += ": ";
+ combined += featureDesc;
+
+ const char* const extensions[] = {
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
+}
+
+void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_half_float,
+#endif
+ E_GL_EXT_shader_16bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL float32 data-type support.
+void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float32};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL float64 data-type support.
+void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_float64};
+ requireExtensions(loc, 2, extensions, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+
+// Call for any operation needing GLSL explicit int8 data-type support.
+void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+#ifdef AMD_EXTENSIONS
+// Call for any operation needing GLSL float16 opaque-type support
+void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float_fetch, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+#endif
+
+// Call for any operation needing GLSL explicit int16 data-type support.
+void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+#if AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_16bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+void TParseVersions::int8ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[] = {
+ E_GL_EXT_shader_8bit_storage,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int8};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL explicit int32 data-type support.
+void TParseVersions::explicitInt32Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int32};
+ requireExtensions(loc, 2, extensions, op);
+ }
+}
+
+// Call for any operation needing GLSL 64-bit integer data-type support.
+void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (! builtIn) {
+ const char* const extensions[3] = {E_GL_ARB_gpu_shader_int64,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int64};
+ requireExtensions(loc, 3, extensions, op);
+ requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
+ profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
+ }
+}
+
+void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
+{
+ if (!builtIn) {
+ const char* const extensions[] = {E_GL_NV_cooperative_matrix};
+ requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
+ }
+}
+
+// Call for any operation removed because SPIR-V is in use.
+void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.spv != 0)
+ error(loc, "not allowed when generating SPIR-V", op, "");
+}
+
+// Call for any operation removed because Vulkan SPIR-V is being generated.
+void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.vulkan > 0)
+ error(loc, "not allowed when using GLSL for Vulkan", op, "");
+}
+
+// Call for any operation that requires Vulkan.
+void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.vulkan == 0)
+ error(loc, "only allowed when using GLSL for Vulkan", op, "");
+}
+
+// Call for any operation that requires SPIR-V.
+void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op)
+{
+ if (spvVersion.spv == 0)
+ error(loc, "only allowed when generating SPIR-V", op, "");
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/Versions.h b/src/3rdparty/glslang/glslang/MachineIndependent/Versions.h
new file mode 100644
index 0000000..e571b51
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/Versions.h
@@ -0,0 +1,299 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _VERSIONS_INCLUDED_
+#define _VERSIONS_INCLUDED_
+
+//
+// Help manage multiple profiles, versions, extensions etc.
+//
+
+//
+// Profiles are set up for masking operations, so queries can be done on multiple
+// profiles at the same time.
+//
+// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
+// defects from mixing the two different forms.
+//
+typedef enum {
+ EBadProfile = 0,
+ ENoProfile = (1 << 0), // only for desktop, before profiles showed up
+ ECoreProfile = (1 << 1),
+ ECompatibilityProfile = (1 << 2),
+ EEsProfile = (1 << 3)
+} EProfile;
+
+namespace glslang {
+
+//
+// Map from profile enum to externally readable text name.
+//
+inline const char* ProfileName(EProfile profile)
+{
+ switch (profile) {
+ case ENoProfile: return "none";
+ case ECoreProfile: return "core";
+ case ECompatibilityProfile: return "compatibility";
+ case EEsProfile: return "es";
+ default: return "unknown profile";
+ }
+}
+
+//
+// What source rules, validation rules, target language, etc. are needed or
+// desired for SPIR-V?
+//
+// 0 means a target or rule set is not enabled (ignore rules from that entity).
+// Non-0 means to apply semantic rules arising from that version of its rule set.
+// The union of all requested rule sets will be applied.
+//
+struct SpvVersion {
+ SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0) {}
+ unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header
+ int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX"
+ int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use
+ int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX"
+};
+
+//
+// The behaviors from the GLSL "#extension extension_name : behavior"
+//
+typedef enum {
+ EBhMissing = 0,
+ EBhRequire,
+ EBhEnable,
+ EBhWarn,
+ EBhDisable,
+ EBhDisablePartial // use as initial state of an extension that is only partially implemented
+} TExtensionBehavior;
+
+//
+// Symbolic names for extensions. Strings may be directly used when calling the
+// functions, but better to have the compiler do spelling checks.
+//
+const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D";
+const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives";
+const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
+const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
+const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
+const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target";
+const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
+const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
+
+const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle";
+const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects";
+const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack";
+const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather";
+const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5";
+const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects";
+const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader";
+const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
+const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
+const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
+const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
+const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
+const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
+const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
+const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
+const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote";
+const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control";
+const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
+const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
+const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
+const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
+const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
+const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
+const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export";
+// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
+const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
+const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
+
+const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
+const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
+const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic";
+const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot";
+const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle";
+const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
+const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
+const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
+const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
+
+const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64";
+
+const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
+const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
+
+const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
+const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
+
+
+// EXT extensions
+const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
+const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
+const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
+const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
+const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
+const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
+const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
+const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
+const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
+
+// Arrays of extensions for the above viewportEXTs duplications
+
+const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage };
+const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]);
+
+// OVR extensions
+const char* const E_GL_OVR_multiview = "GL_OVR_multiview";
+const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2";
+
+const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 };
+const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]);
+
+// #line and #include
+const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
+const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
+
+#ifdef AMD_EXTENSIONS
+const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
+const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
+const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
+const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
+const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
+const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod";
+const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16";
+const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
+const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
+const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
+#endif
+
+#ifdef NV_EXTENSIONS
+
+const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
+const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
+const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2";
+const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering";
+const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes";
+const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64";
+const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
+const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
+const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
+const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image";
+const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing";
+const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric";
+const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives";
+const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint";
+const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader";
+
+// Arrays of extensions for the above viewportEXTs duplications
+
+const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
+const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
+#endif
+
+const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
+
+// AEP
+const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
+const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced";
+const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables";
+const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic";
+const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation";
+const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array";
+const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader";
+const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size";
+const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5";
+const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box";
+const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks";
+const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader";
+const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size";
+const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer";
+const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array";
+
+// OES matching AEP
+const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader";
+const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size";
+const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5";
+const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box";
+const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks";
+const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader";
+const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size";
+const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
+const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
+
+// KHX
+const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
+const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
+
+// Arrays of extensions for the above AEP duplications
+
+const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
+const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]);
+
+const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size };
+const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]);
+
+const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 };
+const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]);
+
+const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box };
+const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]);
+
+const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks };
+const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]);
+
+const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader };
+const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]);
+
+const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size };
+const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]);
+
+const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer };
+const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]);
+
+const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array };
+const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]);
+
+} // end namespace glslang
+
+#endif // _VERSIONS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/attribute.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/attribute.cpp
new file mode 100644
index 0000000..73b665d
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/attribute.cpp
@@ -0,0 +1,257 @@
+//
+// Copyright (C) 2017 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google, Inc., nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "attribute.h"
+#include "../Include/intermediate.h"
+#include "ParseHelper.h"
+
+namespace glslang {
+
+// extract integers out of attribute arguments stored in attribute aggregate
+bool TAttributeArgs::getInt(int& value, int argNum) const
+{
+ const TConstUnion* intConst = getConstUnion(EbtInt, argNum);
+
+ if (intConst == nullptr)
+ return false;
+
+ value = intConst->getIConst();
+ return true;
+}
+
+// extract strings out of attribute arguments stored in attribute aggregate.
+// convert to lower case if converToLower is true (for case-insensitive compare convenience)
+bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
+{
+ const TConstUnion* stringConst = getConstUnion(EbtString, argNum);
+
+ if (stringConst == nullptr)
+ return false;
+
+ value = *stringConst->getSConst();
+
+ // Convenience.
+ if (convertToLower)
+ std::transform(value.begin(), value.end(), value.begin(), ::tolower);
+
+ return true;
+}
+
+// How many arguments were supplied?
+int TAttributeArgs::size() const
+{
+ return args == nullptr ? 0 : (int)args->getSequence().size();
+}
+
+// Helper to get attribute const union. Returns nullptr on failure.
+const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNum) const
+{
+ if (args == nullptr)
+ return nullptr;
+
+ if (argNum >= (int)args->getSequence().size())
+ return nullptr;
+
+ const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0];
+ if (constVal == nullptr || constVal->getType() != basicType)
+ return nullptr;
+
+ return constVal;
+}
+
+// Implementation of TParseContext parts of attributes
+TAttributeType TParseContext::attributeFromName(const TString& name) const
+{
+ if (name == "branch" || name == "dont_flatten")
+ return EatBranch;
+ else if (name == "flatten")
+ return EatFlatten;
+ else if (name == "unroll")
+ return EatUnroll;
+ else if (name == "loop" || name == "dont_unroll")
+ return EatLoop;
+ else if (name == "dependency_infinite")
+ return EatDependencyInfinite;
+ else if (name == "dependency_length")
+ return EatDependencyLength;
+ else
+ return EatNone;
+}
+
+// Make an initial leaf for the grammar from a no-argument attribute
+TAttributes* TParseContext::makeAttributes(const TString& identifier) const
+{
+ TAttributes *attributes = nullptr;
+ attributes = NewPoolObject(attributes);
+ TAttributeArgs args = { attributeFromName(identifier), nullptr };
+ attributes->push_back(args);
+ return attributes;
+}
+
+// Make an initial leaf for the grammar from a one-argument attribute
+TAttributes* TParseContext::makeAttributes(const TString& identifier, TIntermNode* node) const
+{
+ TAttributes *attributes = nullptr;
+ attributes = NewPoolObject(attributes);
+
+ // for now, node is always a simple single expression, but other code expects
+ // a list, so make it so
+ TIntermAggregate* agg = intermediate.makeAggregate(node);
+ TAttributeArgs args = { attributeFromName(identifier), agg };
+ attributes->push_back(args);
+ return attributes;
+}
+
+// Merge two sets of attributes into a single set.
+// The second argument is destructively consumed.
+TAttributes* TParseContext::mergeAttributes(TAttributes* attr1, TAttributes* attr2) const
+{
+ attr1->splice(attr1->end(), *attr2);
+ return attr1;
+}
+
+//
+// Selection attributes
+//
+void TParseContext::handleSelectionAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermSelection* selection = node->getAsSelectionNode();
+ if (selection == nullptr)
+ return;
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+ if (it->size() > 0) {
+ warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
+ continue;
+ }
+
+ switch (it->name) {
+ case EatFlatten:
+ selection->setFlatten();
+ break;
+ case EatBranch:
+ selection->setDontFlatten();
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a selection", "", "");
+ break;
+ }
+ }
+}
+
+//
+// Switch attributes
+//
+void TParseContext::handleSwitchAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermSwitch* selection = node->getAsSwitchNode();
+ if (selection == nullptr)
+ return;
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+ if (it->size() > 0) {
+ warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
+ continue;
+ }
+
+ switch (it->name) {
+ case EatFlatten:
+ selection->setFlatten();
+ break;
+ case EatBranch:
+ selection->setDontFlatten();
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a switch", "", "");
+ break;
+ }
+ }
+}
+
+//
+// Loop attributes
+//
+void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermNode* node)
+{
+ TIntermLoop* loop = node->getAsLoopNode();
+ if (loop == nullptr) {
+ // the actual loop might be part of a sequence
+ TIntermAggregate* agg = node->getAsAggregate();
+ if (agg == nullptr)
+ return;
+ for (auto it = agg->getSequence().begin(); it != agg->getSequence().end(); ++it) {
+ loop = (*it)->getAsLoopNode();
+ if (loop != nullptr)
+ break;
+ }
+ if (loop == nullptr)
+ return;
+ }
+
+ for (auto it = attributes.begin(); it != attributes.end(); ++it) {
+ if (it->name != EatDependencyLength && it->size() > 0) {
+ warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
+ continue;
+ }
+
+ int value;
+ switch (it->name) {
+ case EatUnroll:
+ loop->setUnroll();
+ break;
+ case EatLoop:
+ loop->setDontUnroll();
+ break;
+ case EatDependencyInfinite:
+ loop->setLoopDependency(TIntermLoop::dependencyInfinite);
+ break;
+ case EatDependencyLength:
+ if (it->size() == 1 && it->getInt(value)) {
+ if (value <= 0)
+ error(node->getLoc(), "must be positive", "dependency_length", "");
+ loop->setLoopDependency(value);
+ } else
+ warn(node->getLoc(), "expected a single integer argument", "dependency_length", "");
+ break;
+ default:
+ warn(node->getLoc(), "attribute does not apply to a loop", "", "");
+ break;
+ }
+ }
+}
+
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/attribute.h b/src/3rdparty/glslang/glslang/MachineIndependent/attribute.h
new file mode 100644
index 0000000..8d0c5bc
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/attribute.h
@@ -0,0 +1,102 @@
+//
+// Copyright (C) 2017 LunarG, Inc.
+// Copyright (C) 2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _ATTRIBUTE_INCLUDED_
+#define _ATTRIBUTE_INCLUDED_
+
+#include "../Include/Common.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+ enum TAttributeType {
+ EatNone,
+ EatAllow_uav_condition,
+ EatBranch,
+ EatCall,
+ EatDomain,
+ EatEarlyDepthStencil,
+ EatFastOpt,
+ EatFlatten,
+ EatForceCase,
+ EatInstance,
+ EatMaxTessFactor,
+ EatNumThreads,
+ EatMaxVertexCount,
+ EatOutputControlPoints,
+ EatOutputTopology,
+ EatPartitioning,
+ EatPatchConstantFunc,
+ EatPatchSize,
+ EatUnroll,
+ EatLoop,
+ EatBinding,
+ EatGlobalBinding,
+ EatLocation,
+ EatInputAttachment,
+ EatBuiltIn,
+ EatPushConstant,
+ EatConstantId,
+ EatDependencyInfinite,
+ EatDependencyLength
+ };
+
+ class TIntermAggregate;
+
+ struct TAttributeArgs {
+ TAttributeType name;
+ const TIntermAggregate* args;
+
+ // Obtain attribute as integer
+ // Return false if it cannot be obtained
+ bool getInt(int& value, int argNum = 0) const;
+
+ // Obtain attribute as string, with optional to-lower transform
+ // Return false if it cannot be obtained
+ bool getString(TString& value, int argNum = 0, bool convertToLower = true) const;
+
+ // How many arguments were provided to the attribute?
+ int size() const;
+
+ protected:
+ const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const;
+ };
+
+ typedef TList<TAttributeArgs> TAttributes;
+
+} // end namespace glslang
+
+#endif // _ATTRIBUTE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/gl_types.h b/src/3rdparty/glslang/glslang/MachineIndependent/gl_types.h
new file mode 100644
index 0000000..c9fee9e
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/gl_types.h
@@ -0,0 +1,214 @@
+/*
+** Copyright (c) 2013 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+#pragma once
+
+#define GL_FLOAT 0x1406
+#define GL_FLOAT_VEC2 0x8B50
+#define GL_FLOAT_VEC3 0x8B51
+#define GL_FLOAT_VEC4 0x8B52
+
+#define GL_DOUBLE 0x140A
+#define GL_DOUBLE_VEC2 0x8FFC
+#define GL_DOUBLE_VEC3 0x8FFD
+#define GL_DOUBLE_VEC4 0x8FFE
+
+#define GL_INT 0x1404
+#define GL_INT_VEC2 0x8B53
+#define GL_INT_VEC3 0x8B54
+#define GL_INT_VEC4 0x8B55
+
+#define GL_UNSIGNED_INT 0x1405
+#define GL_UNSIGNED_INT_VEC2 0x8DC6
+#define GL_UNSIGNED_INT_VEC3 0x8DC7
+#define GL_UNSIGNED_INT_VEC4 0x8DC8
+
+#define GL_INT64_ARB 0x140E
+#define GL_INT64_VEC2_ARB 0x8FE9
+#define GL_INT64_VEC3_ARB 0x8FEA
+#define GL_INT64_VEC4_ARB 0x8FEB
+
+#define GL_UNSIGNED_INT64_ARB 0x140F
+#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FE5
+#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FE6
+#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FE7
+
+#define GL_BOOL 0x8B56
+#define GL_BOOL_VEC2 0x8B57
+#define GL_BOOL_VEC3 0x8B58
+#define GL_BOOL_VEC4 0x8B59
+
+#define GL_FLOAT_MAT2 0x8B5A
+#define GL_FLOAT_MAT3 0x8B5B
+#define GL_FLOAT_MAT4 0x8B5C
+#define GL_FLOAT_MAT2x3 0x8B65
+#define GL_FLOAT_MAT2x4 0x8B66
+#define GL_FLOAT_MAT3x2 0x8B67
+#define GL_FLOAT_MAT3x4 0x8B68
+#define GL_FLOAT_MAT4x2 0x8B69
+#define GL_FLOAT_MAT4x3 0x8B6A
+
+#define GL_DOUBLE_MAT2 0x8F46
+#define GL_DOUBLE_MAT3 0x8F47
+#define GL_DOUBLE_MAT4 0x8F48
+#define GL_DOUBLE_MAT2x3 0x8F49
+#define GL_DOUBLE_MAT2x4 0x8F4A
+#define GL_DOUBLE_MAT3x2 0x8F4B
+#define GL_DOUBLE_MAT3x4 0x8F4C
+#define GL_DOUBLE_MAT4x2 0x8F4D
+#define GL_DOUBLE_MAT4x3 0x8F4E
+
+#ifdef AMD_EXTENSIONS
+// Those constants are borrowed from extension NV_gpu_shader5
+#define GL_FLOAT16_NV 0x8FF8
+#define GL_FLOAT16_VEC2_NV 0x8FF9
+#define GL_FLOAT16_VEC3_NV 0x8FFA
+#define GL_FLOAT16_VEC4_NV 0x8FFB
+
+#define GL_FLOAT16_MAT2_AMD 0x91C5
+#define GL_FLOAT16_MAT3_AMD 0x91C6
+#define GL_FLOAT16_MAT4_AMD 0x91C7
+#define GL_FLOAT16_MAT2x3_AMD 0x91C8
+#define GL_FLOAT16_MAT2x4_AMD 0x91C9
+#define GL_FLOAT16_MAT3x2_AMD 0x91CA
+#define GL_FLOAT16_MAT3x4_AMD 0x91CB
+#define GL_FLOAT16_MAT4x2_AMD 0x91CC
+#define GL_FLOAT16_MAT4x3_AMD 0x91CD
+#endif
+
+#define GL_SAMPLER_1D 0x8B5D
+#define GL_SAMPLER_2D 0x8B5E
+#define GL_SAMPLER_3D 0x8B5F
+#define GL_SAMPLER_CUBE 0x8B60
+#define GL_SAMPLER_BUFFER 0x8DC2
+#define GL_SAMPLER_1D_ARRAY 0x8DC0
+#define GL_SAMPLER_2D_ARRAY 0x8DC1
+#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
+#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
+#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
+#define GL_SAMPLER_1D_SHADOW 0x8B61
+#define GL_SAMPLER_2D_SHADOW 0x8B62
+#define GL_SAMPLER_2D_RECT 0x8B63
+#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
+#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
+#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
+#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D
+#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
+
+#ifdef AMD_EXTENSIONS
+#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
+#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
+#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
+#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1
+#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2
+#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3
+#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4
+#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5
+#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6
+#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7
+#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8
+
+#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9
+#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA
+#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB
+#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC
+#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD
+#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE
+#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF
+
+#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0
+#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1
+#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2
+#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3
+#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4
+#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5
+#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6
+#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7
+#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
+#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
+#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
+#endif
+
+#define GL_INT_SAMPLER_1D 0x8DC9
+#define GL_INT_SAMPLER_2D 0x8DCA
+#define GL_INT_SAMPLER_3D 0x8DCB
+#define GL_INT_SAMPLER_CUBE 0x8DCC
+#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
+#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
+#define GL_INT_SAMPLER_2D_RECT 0x8DCD
+#define GL_INT_SAMPLER_BUFFER 0x8DD0
+#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
+#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E
+
+#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
+#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
+#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
+#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
+#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
+#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
+#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
+
+#define GL_IMAGE_1D 0x904C
+#define GL_IMAGE_2D 0x904D
+#define GL_IMAGE_3D 0x904E
+#define GL_IMAGE_2D_RECT 0x904F
+#define GL_IMAGE_CUBE 0x9050
+#define GL_IMAGE_BUFFER 0x9051
+#define GL_IMAGE_1D_ARRAY 0x9052
+#define GL_IMAGE_2D_ARRAY 0x9053
+#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054
+#define GL_IMAGE_2D_MULTISAMPLE 0x9055
+#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056
+#define GL_INT_IMAGE_1D 0x9057
+#define GL_INT_IMAGE_2D 0x9058
+#define GL_INT_IMAGE_3D 0x9059
+#define GL_INT_IMAGE_2D_RECT 0x905A
+#define GL_INT_IMAGE_CUBE 0x905B
+#define GL_INT_IMAGE_BUFFER 0x905C
+#define GL_INT_IMAGE_1D_ARRAY 0x905D
+#define GL_INT_IMAGE_2D_ARRAY 0x905E
+#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F
+#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060
+#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061
+#define GL_UNSIGNED_INT_IMAGE_1D 0x9062
+#define GL_UNSIGNED_INT_IMAGE_2D 0x9063
+#define GL_UNSIGNED_INT_IMAGE_3D 0x9064
+#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065
+#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066
+#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067
+#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068
+#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069
+#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C
+
+#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/glslang.y b/src/3rdparty/glslang/glslang/MachineIndependent/glslang.y
new file mode 100644
index 0000000..b5691a2
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/glslang.y
@@ -0,0 +1,3796 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+/**
+ * This is bison grammar and productions for parsing all versions of the
+ * GLSL shading languages.
+ */
+%{
+
+/* Based on:
+ANSI C Yacc grammar
+
+In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a
+matching Lex specification) for the April 30, 1985 draft version of the
+ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that
+original, as mentioned in the answer to question 17.25 of the comp.lang.c
+FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z.
+
+I intend to keep this version as close to the current C Standard grammar as
+possible; please let me know if you discover discrepancies.
+
+Jutta Degener, 1995
+*/
+
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "../Public/ShaderLang.h"
+#include "attribute.h"
+
+using namespace glslang;
+
+%}
+
+%define parse.error verbose
+
+%union {
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+}
+
+%{
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4065)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4244)
+#endif
+
+#define parseContext (*pParseContext)
+#define yyerror(context, msg) context->parserError(msg)
+
+extern int yylex(YYSTYPE*, TParseContext&);
+
+%}
+
+%parse-param {glslang::TParseContext* pParseContext}
+%lex-param {parseContext}
+%pure-parser // enable thread safety
+%expect 1 // One shift reduce conflict because of if | else
+
+%token <lex> ATTRIBUTE VARYING
+%token <lex> FLOAT16_T FLOAT FLOAT32_T DOUBLE FLOAT64_T
+%token <lex> CONST BOOL INT UINT INT64_T UINT64_T INT32_T UINT32_T INT16_T UINT16_T INT8_T UINT8_T
+%token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT SUBROUTINE
+%token <lex> BVEC2 BVEC3 BVEC4
+%token <lex> IVEC2 IVEC3 IVEC4
+%token <lex> UVEC2 UVEC3 UVEC4
+%token <lex> I64VEC2 I64VEC3 I64VEC4
+%token <lex> U64VEC2 U64VEC3 U64VEC4
+%token <lex> I32VEC2 I32VEC3 I32VEC4
+%token <lex> U32VEC2 U32VEC3 U32VEC4
+%token <lex> I16VEC2 I16VEC3 I16VEC4
+%token <lex> U16VEC2 U16VEC3 U16VEC4
+%token <lex> I8VEC2 I8VEC3 I8VEC4
+%token <lex> U8VEC2 U8VEC3 U8VEC4
+%token <lex> VEC2 VEC3 VEC4
+%token <lex> MAT2 MAT3 MAT4 CENTROID IN OUT INOUT
+%token <lex> UNIFORM PATCH SAMPLE BUFFER SHARED NONUNIFORM PAYLOADNV PAYLOADINNV HITATTRNV CALLDATANV CALLDATAINNV
+%token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT SUBGROUPCOHERENT NONPRIVATE
+%token <lex> DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4
+%token <lex> F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4
+%token <lex> F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4
+%token <lex> F64VEC2 F64VEC3 F64VEC4 F64MAT2 F64MAT3 F64MAT4
+%token <lex> NOPERSPECTIVE FLAT SMOOTH LAYOUT EXPLICITINTERPAMD PERVERTEXNV PERPRIMITIVENV PERVIEWNV PERTASKNV
+
+%token <lex> MAT2X2 MAT2X3 MAT2X4
+%token <lex> MAT3X2 MAT3X3 MAT3X4
+%token <lex> MAT4X2 MAT4X3 MAT4X4
+%token <lex> DMAT2X2 DMAT2X3 DMAT2X4
+%token <lex> DMAT3X2 DMAT3X3 DMAT3X4
+%token <lex> DMAT4X2 DMAT4X3 DMAT4X4
+%token <lex> F16MAT2X2 F16MAT2X3 F16MAT2X4
+%token <lex> F16MAT3X2 F16MAT3X3 F16MAT3X4
+%token <lex> F16MAT4X2 F16MAT4X3 F16MAT4X4
+%token <lex> F32MAT2X2 F32MAT2X3 F32MAT2X4
+%token <lex> F32MAT3X2 F32MAT3X3 F32MAT3X4
+%token <lex> F32MAT4X2 F32MAT4X3 F32MAT4X4
+%token <lex> F64MAT2X2 F64MAT2X3 F64MAT2X4
+%token <lex> F64MAT3X2 F64MAT3X3 F64MAT3X4
+%token <lex> F64MAT4X2 F64MAT4X3 F64MAT4X4
+%token <lex> ATOMIC_UINT
+%token <lex> ACCSTRUCTNV
+%token <lex> FCOOPMATNV
+
+// combined image/sampler
+%token <lex> SAMPLER1D SAMPLER2D SAMPLER3D SAMPLERCUBE SAMPLER1DSHADOW SAMPLER2DSHADOW
+%token <lex> SAMPLERCUBESHADOW SAMPLER1DARRAY SAMPLER2DARRAY SAMPLER1DARRAYSHADOW
+%token <lex> SAMPLER2DARRAYSHADOW ISAMPLER1D ISAMPLER2D ISAMPLER3D ISAMPLERCUBE
+%token <lex> ISAMPLER1DARRAY ISAMPLER2DARRAY USAMPLER1D USAMPLER2D USAMPLER3D
+%token <lex> USAMPLERCUBE USAMPLER1DARRAY USAMPLER2DARRAY
+%token <lex> SAMPLER2DRECT SAMPLER2DRECTSHADOW ISAMPLER2DRECT USAMPLER2DRECT
+%token <lex> SAMPLERBUFFER ISAMPLERBUFFER USAMPLERBUFFER
+%token <lex> SAMPLERCUBEARRAY SAMPLERCUBEARRAYSHADOW
+%token <lex> ISAMPLERCUBEARRAY USAMPLERCUBEARRAY
+%token <lex> SAMPLER2DMS ISAMPLER2DMS USAMPLER2DMS
+%token <lex> SAMPLER2DMSARRAY ISAMPLER2DMSARRAY USAMPLER2DMSARRAY
+%token <lex> SAMPLEREXTERNALOES
+%token <lex> SAMPLEREXTERNAL2DY2YEXT
+
+%token <lex> F16SAMPLER1D F16SAMPLER2D F16SAMPLER3D F16SAMPLER2DRECT F16SAMPLERCUBE
+%token <lex> F16SAMPLER1DARRAY F16SAMPLER2DARRAY F16SAMPLERCUBEARRAY
+%token <lex> F16SAMPLERBUFFER F16SAMPLER2DMS F16SAMPLER2DMSARRAY
+%token <lex> F16SAMPLER1DSHADOW F16SAMPLER2DSHADOW F16SAMPLER1DARRAYSHADOW F16SAMPLER2DARRAYSHADOW
+%token <lex> F16SAMPLER2DRECTSHADOW F16SAMPLERCUBESHADOW F16SAMPLERCUBEARRAYSHADOW
+
+// pure sampler
+%token <lex> SAMPLER SAMPLERSHADOW
+
+// texture without sampler
+%token <lex> TEXTURE1D TEXTURE2D TEXTURE3D TEXTURECUBE
+%token <lex> TEXTURE1DARRAY TEXTURE2DARRAY
+%token <lex> ITEXTURE1D ITEXTURE2D ITEXTURE3D ITEXTURECUBE
+%token <lex> ITEXTURE1DARRAY ITEXTURE2DARRAY UTEXTURE1D UTEXTURE2D UTEXTURE3D
+%token <lex> UTEXTURECUBE UTEXTURE1DARRAY UTEXTURE2DARRAY
+%token <lex> TEXTURE2DRECT ITEXTURE2DRECT UTEXTURE2DRECT
+%token <lex> TEXTUREBUFFER ITEXTUREBUFFER UTEXTUREBUFFER
+%token <lex> TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY
+%token <lex> TEXTURE2DMS ITEXTURE2DMS UTEXTURE2DMS
+%token <lex> TEXTURE2DMSARRAY ITEXTURE2DMSARRAY UTEXTURE2DMSARRAY
+
+%token <lex> F16TEXTURE1D F16TEXTURE2D F16TEXTURE3D F16TEXTURE2DRECT F16TEXTURECUBE
+%token <lex> F16TEXTURE1DARRAY F16TEXTURE2DARRAY F16TEXTURECUBEARRAY
+%token <lex> F16TEXTUREBUFFER F16TEXTURE2DMS F16TEXTURE2DMSARRAY
+
+// input attachments
+%token <lex> SUBPASSINPUT SUBPASSINPUTMS ISUBPASSINPUT ISUBPASSINPUTMS USUBPASSINPUT USUBPASSINPUTMS
+%token <lex> F16SUBPASSINPUT F16SUBPASSINPUTMS
+
+%token <lex> IMAGE1D IIMAGE1D UIMAGE1D IMAGE2D IIMAGE2D
+%token <lex> UIMAGE2D IMAGE3D IIMAGE3D UIMAGE3D
+%token <lex> IMAGE2DRECT IIMAGE2DRECT UIMAGE2DRECT
+%token <lex> IMAGECUBE IIMAGECUBE UIMAGECUBE
+%token <lex> IMAGEBUFFER IIMAGEBUFFER UIMAGEBUFFER
+%token <lex> IMAGE1DARRAY IIMAGE1DARRAY UIMAGE1DARRAY
+%token <lex> IMAGE2DARRAY IIMAGE2DARRAY UIMAGE2DARRAY
+%token <lex> IMAGECUBEARRAY IIMAGECUBEARRAY UIMAGECUBEARRAY
+%token <lex> IMAGE2DMS IIMAGE2DMS UIMAGE2DMS
+%token <lex> IMAGE2DMSARRAY IIMAGE2DMSARRAY UIMAGE2DMSARRAY
+
+%token <lex> F16IMAGE1D F16IMAGE2D F16IMAGE3D F16IMAGE2DRECT
+%token <lex> F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY
+%token <lex> F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY
+
+%token <lex> STRUCT VOID WHILE
+
+%token <lex> IDENTIFIER TYPE_NAME
+%token <lex> FLOATCONSTANT DOUBLECONSTANT INT16CONSTANT UINT16CONSTANT INT32CONSTANT UINT32CONSTANT INTCONSTANT UINTCONSTANT INT64CONSTANT UINT64CONSTANT BOOLCONSTANT FLOAT16CONSTANT
+%token <lex> LEFT_OP RIGHT_OP
+%token <lex> INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP
+%token <lex> AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN
+%token <lex> MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN
+%token <lex> SUB_ASSIGN
+
+%token <lex> LEFT_PAREN RIGHT_PAREN LEFT_BRACKET RIGHT_BRACKET LEFT_BRACE RIGHT_BRACE DOT
+%token <lex> COMMA COLON EQUAL SEMICOLON BANG DASH TILDE PLUS STAR SLASH PERCENT
+%token <lex> LEFT_ANGLE RIGHT_ANGLE VERTICAL_BAR CARET AMPERSAND QUESTION
+
+%token <lex> INVARIANT PRECISE
+%token <lex> HIGH_PRECISION MEDIUM_PRECISION LOW_PRECISION PRECISION
+
+%token <lex> PACKED RESOURCE SUPERP
+
+%type <interm> assignment_operator unary_operator
+%type <interm.intermTypedNode> variable_identifier primary_expression postfix_expression
+%type <interm.intermTypedNode> expression integer_expression assignment_expression
+%type <interm.intermTypedNode> unary_expression multiplicative_expression additive_expression
+%type <interm.intermTypedNode> relational_expression equality_expression
+%type <interm.intermTypedNode> conditional_expression constant_expression
+%type <interm.intermTypedNode> logical_or_expression logical_xor_expression logical_and_expression
+%type <interm.intermTypedNode> shift_expression and_expression exclusive_or_expression inclusive_or_expression
+%type <interm.intermTypedNode> function_call initializer initializer_list condition conditionopt
+
+%type <interm.intermNode> translation_unit function_definition
+%type <interm.intermNode> statement simple_statement
+%type <interm.intermNode> statement_list switch_statement_list compound_statement
+%type <interm.intermNode> declaration_statement selection_statement selection_statement_nonattributed expression_statement
+%type <interm.intermNode> switch_statement switch_statement_nonattributed case_label
+%type <interm.intermNode> declaration external_declaration
+%type <interm.intermNode> for_init_statement compound_statement_no_new_scope
+%type <interm.nodePair> selection_rest_statement for_rest_statement
+%type <interm.intermNode> iteration_statement iteration_statement_nonattributed jump_statement statement_no_new_scope statement_scoped
+%type <interm> single_declaration init_declarator_list
+
+%type <interm> parameter_declaration parameter_declarator parameter_type_specifier
+
+%type <interm> array_specifier
+%type <interm.type> precise_qualifier invariant_qualifier interpolation_qualifier storage_qualifier precision_qualifier
+%type <interm.type> layout_qualifier layout_qualifier_id_list layout_qualifier_id
+%type <interm.type> non_uniform_qualifier
+
+%type <interm.typeParameters> type_parameter_specifier
+%type <interm.typeParameters> type_parameter_specifier_opt
+%type <interm.typeParameters> type_parameter_specifier_list
+
+%type <interm.type> type_qualifier fully_specified_type type_specifier
+%type <interm.type> single_type_qualifier
+%type <interm.type> type_specifier_nonarray
+%type <interm.type> struct_specifier
+%type <interm.typeLine> struct_declarator
+%type <interm.typeList> struct_declarator_list struct_declaration struct_declaration_list type_name_list
+%type <interm> block_structure
+%type <interm.function> function_header function_declarator
+%type <interm.function> function_header_with_parameters
+%type <interm> function_call_header_with_parameters function_call_header_no_parameters function_call_generic function_prototype
+%type <interm> function_call_or_method function_identifier function_call_header
+
+%type <interm.identifierList> identifier_list
+
+%type <interm.attributes> attribute attribute_list single_attribute
+
+%start translation_unit
+%%
+
+variable_identifier
+ : IDENTIFIER {
+ $$ = parseContext.handleVariable($1.loc, $1.symbol, $1.string);
+ }
+ ;
+
+primary_expression
+ : variable_identifier {
+ $$ = $1;
+ }
+ | INT32CONSTANT {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
+ }
+ | UINT32CONSTANT {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true);
+ }
+ | INTCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
+ }
+ | UINTCONSTANT {
+ parseContext.fullIntegerCheck($1.loc, "unsigned literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true);
+ }
+ | INT64CONSTANT {
+ parseContext.int64Check($1.loc, "64-bit integer literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.i64, $1.loc, true);
+ }
+ | UINT64CONSTANT {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true);
+ }
+ | INT16CONSTANT {
+ parseContext.explicitInt16Check($1.loc, "16-bit integer literal");
+ $$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true);
+ }
+ | UINT16CONSTANT {
+ parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal");
+ $$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true);
+ }
+ | FLOATCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true);
+ }
+ | DOUBLECONSTANT {
+ parseContext.doubleCheck($1.loc, "double literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true);
+ }
+ | FLOAT16CONSTANT {
+ parseContext.float16Check($1.loc, "half float literal");
+ $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true);
+ }
+ | BOOLCONSTANT {
+ $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true);
+ }
+ | LEFT_PAREN expression RIGHT_PAREN {
+ $$ = $2;
+ if ($$->getAsConstantUnion())
+ $$->getAsConstantUnion()->setExpression();
+ }
+ ;
+
+postfix_expression
+ : primary_expression {
+ $$ = $1;
+ }
+ | postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET {
+ $$ = parseContext.handleBracketDereference($2.loc, $1, $3);
+ }
+ | function_call {
+ $$ = $1;
+ }
+ | postfix_expression DOT IDENTIFIER {
+ $$ = parseContext.handleDotDereference($3.loc, $1, *$3.string);
+ }
+ | postfix_expression INC_OP {
+ parseContext.variableCheck($1);
+ parseContext.lValueErrorCheck($2.loc, "++", $1);
+ $$ = parseContext.handleUnaryMath($2.loc, "++", EOpPostIncrement, $1);
+ }
+ | postfix_expression DEC_OP {
+ parseContext.variableCheck($1);
+ parseContext.lValueErrorCheck($2.loc, "--", $1);
+ $$ = parseContext.handleUnaryMath($2.loc, "--", EOpPostDecrement, $1);
+ }
+ ;
+
+integer_expression
+ : expression {
+ parseContext.integerCheck($1, "[]");
+ $$ = $1;
+ }
+ ;
+
+function_call
+ : function_call_or_method {
+ $$ = parseContext.handleFunctionCall($1.loc, $1.function, $1.intermNode);
+ delete $1.function;
+ }
+ ;
+
+function_call_or_method
+ : function_call_generic {
+ $$ = $1;
+ }
+ ;
+
+function_call_generic
+ : function_call_header_with_parameters RIGHT_PAREN {
+ $$ = $1;
+ $$.loc = $2.loc;
+ }
+ | function_call_header_no_parameters RIGHT_PAREN {
+ $$ = $1;
+ $$.loc = $2.loc;
+ }
+ ;
+
+function_call_header_no_parameters
+ : function_call_header VOID {
+ $$ = $1;
+ }
+ | function_call_header {
+ $$ = $1;
+ }
+ ;
+
+function_call_header_with_parameters
+ : function_call_header assignment_expression {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy($2->getType());
+ $1.function->addParameter(param);
+ $$.function = $1.function;
+ $$.intermNode = $2;
+ }
+ | function_call_header_with_parameters COMMA assignment_expression {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy($3->getType());
+ $1.function->addParameter(param);
+ $$.function = $1.function;
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, $3, $2.loc);
+ }
+ ;
+
+function_call_header
+ : function_identifier LEFT_PAREN {
+ $$ = $1;
+ }
+ ;
+
+// Grammar Note: Constructors look like functions, but are recognized as types.
+
+function_identifier
+ : type_specifier {
+ // Constructor
+ $$.intermNode = 0;
+ $$.function = parseContext.handleConstructorCall($1.loc, $1);
+ }
+ | postfix_expression {
+ //
+ // Should be a method or subroutine call, but we haven't recognized the arguments yet.
+ //
+ $$.function = 0;
+ $$.intermNode = 0;
+
+ TIntermMethod* method = $1->getAsMethodNode();
+ if (method) {
+ $$.function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength);
+ $$.intermNode = method->getObject();
+ } else {
+ TIntermSymbol* symbol = $1->getAsSymbolNode();
+ if (symbol) {
+ parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName());
+ TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid));
+ $$.function = function;
+ } else
+ parseContext.error($1->getLoc(), "function call, method, or subroutine call expected", "", "");
+ }
+
+ if ($$.function == 0) {
+ // error recover
+ TString* empty = NewPoolTString("");
+ $$.function = new TFunction(empty, TType(EbtVoid), EOpNull);
+ }
+ }
+ | non_uniform_qualifier {
+ // Constructor
+ $$.intermNode = 0;
+ $$.function = parseContext.handleConstructorCall($1.loc, $1);
+ }
+ ;
+
+unary_expression
+ : postfix_expression {
+ parseContext.variableCheck($1);
+ $$ = $1;
+ if (TIntermMethod* method = $1->getAsMethodNode())
+ parseContext.error($1->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), "");
+ }
+ | INC_OP unary_expression {
+ parseContext.lValueErrorCheck($1.loc, "++", $2);
+ $$ = parseContext.handleUnaryMath($1.loc, "++", EOpPreIncrement, $2);
+ }
+ | DEC_OP unary_expression {
+ parseContext.lValueErrorCheck($1.loc, "--", $2);
+ $$ = parseContext.handleUnaryMath($1.loc, "--", EOpPreDecrement, $2);
+ }
+ | unary_operator unary_expression {
+ if ($1.op != EOpNull) {
+ char errorOp[2] = {0, 0};
+ switch($1.op) {
+ case EOpNegative: errorOp[0] = '-'; break;
+ case EOpLogicalNot: errorOp[0] = '!'; break;
+ case EOpBitwiseNot: errorOp[0] = '~'; break;
+ default: break; // some compilers want this
+ }
+ $$ = parseContext.handleUnaryMath($1.loc, errorOp, $1.op, $2);
+ } else {
+ $$ = $2;
+ if ($$->getAsConstantUnion())
+ $$->getAsConstantUnion()->setExpression();
+ }
+ }
+ ;
+// Grammar Note: No traditional style type casts.
+
+unary_operator
+ : PLUS { $$.loc = $1.loc; $$.op = EOpNull; }
+ | DASH { $$.loc = $1.loc; $$.op = EOpNegative; }
+ | BANG { $$.loc = $1.loc; $$.op = EOpLogicalNot; }
+ | TILDE { $$.loc = $1.loc; $$.op = EOpBitwiseNot;
+ parseContext.fullIntegerCheck($1.loc, "bitwise not"); }
+ ;
+// Grammar Note: No '*' or '&' unary ops. Pointers are not supported.
+
+multiplicative_expression
+ : unary_expression { $$ = $1; }
+ | multiplicative_expression STAR unary_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "*", EOpMul, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | multiplicative_expression SLASH unary_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "/", EOpDiv, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | multiplicative_expression PERCENT unary_expression {
+ parseContext.fullIntegerCheck($2.loc, "%");
+ $$ = parseContext.handleBinaryMath($2.loc, "%", EOpMod, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+additive_expression
+ : multiplicative_expression { $$ = $1; }
+ | additive_expression PLUS multiplicative_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "+", EOpAdd, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | additive_expression DASH multiplicative_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "-", EOpSub, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+shift_expression
+ : additive_expression { $$ = $1; }
+ | shift_expression LEFT_OP additive_expression {
+ parseContext.fullIntegerCheck($2.loc, "bit shift left");
+ $$ = parseContext.handleBinaryMath($2.loc, "<<", EOpLeftShift, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ | shift_expression RIGHT_OP additive_expression {
+ parseContext.fullIntegerCheck($2.loc, "bit shift right");
+ $$ = parseContext.handleBinaryMath($2.loc, ">>", EOpRightShift, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+relational_expression
+ : shift_expression { $$ = $1; }
+ | relational_expression LEFT_ANGLE shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "<", EOpLessThan, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression RIGHT_ANGLE shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, ">", EOpGreaterThan, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression LE_OP shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "<=", EOpLessThanEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | relational_expression GE_OP shift_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, ">=", EOpGreaterThanEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+equality_expression
+ : relational_expression { $$ = $1; }
+ | equality_expression EQ_OP relational_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "==");
+ parseContext.specializationCheck($2.loc, $1->getType(), "==");
+ parseContext.referenceCheck($2.loc, $1->getType(), "==");
+ $$ = parseContext.handleBinaryMath($2.loc, "==", EOpEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ | equality_expression NE_OP relational_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "!=");
+ parseContext.specializationCheck($2.loc, $1->getType(), "!=");
+ parseContext.referenceCheck($2.loc, $1->getType(), "!=");
+ $$ = parseContext.handleBinaryMath($2.loc, "!=", EOpNotEqual, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+and_expression
+ : equality_expression { $$ = $1; }
+ | and_expression AMPERSAND equality_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise and");
+ $$ = parseContext.handleBinaryMath($2.loc, "&", EOpAnd, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+exclusive_or_expression
+ : and_expression { $$ = $1; }
+ | exclusive_or_expression CARET and_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise exclusive or");
+ $$ = parseContext.handleBinaryMath($2.loc, "^", EOpExclusiveOr, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+inclusive_or_expression
+ : exclusive_or_expression { $$ = $1; }
+ | inclusive_or_expression VERTICAL_BAR exclusive_or_expression {
+ parseContext.fullIntegerCheck($2.loc, "bitwise inclusive or");
+ $$ = parseContext.handleBinaryMath($2.loc, "|", EOpInclusiveOr, $1, $3);
+ if ($$ == 0)
+ $$ = $1;
+ }
+ ;
+
+logical_and_expression
+ : inclusive_or_expression { $$ = $1; }
+ | logical_and_expression AND_OP inclusive_or_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "&&", EOpLogicalAnd, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+logical_xor_expression
+ : logical_and_expression { $$ = $1; }
+ | logical_xor_expression XOR_OP logical_and_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "^^", EOpLogicalXor, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+logical_or_expression
+ : logical_xor_expression { $$ = $1; }
+ | logical_or_expression OR_OP logical_xor_expression {
+ $$ = parseContext.handleBinaryMath($2.loc, "||", EOpLogicalOr, $1, $3);
+ if ($$ == 0)
+ $$ = parseContext.intermediate.addConstantUnion(false, $2.loc);
+ }
+ ;
+
+conditional_expression
+ : logical_or_expression { $$ = $1; }
+ | logical_or_expression QUESTION {
+ ++parseContext.controlFlowNestingLevel;
+ }
+ expression COLON assignment_expression {
+ --parseContext.controlFlowNestingLevel;
+ parseContext.boolCheck($2.loc, $1);
+ parseContext.rValueErrorCheck($2.loc, "?", $1);
+ parseContext.rValueErrorCheck($5.loc, ":", $4);
+ parseContext.rValueErrorCheck($5.loc, ":", $6);
+ $$ = parseContext.intermediate.addSelection($1, $4, $6, $2.loc);
+ if ($$ == 0) {
+ parseContext.binaryOpError($2.loc, ":", $4->getCompleteString(), $6->getCompleteString());
+ $$ = $6;
+ }
+ }
+ ;
+
+assignment_expression
+ : conditional_expression { $$ = $1; }
+ | unary_expression assignment_operator assignment_expression {
+ parseContext.arrayObjectCheck($2.loc, $1->getType(), "array assignment");
+ parseContext.opaqueCheck($2.loc, $1->getType(), "=");
+ parseContext.storage16BitAssignmentCheck($2.loc, $1->getType(), "=");
+ parseContext.specializationCheck($2.loc, $1->getType(), "=");
+ parseContext.lValueErrorCheck($2.loc, "assign", $1);
+ parseContext.rValueErrorCheck($2.loc, "assign", $3);
+ $$ = parseContext.intermediate.addAssign($2.op, $1, $3, $2.loc);
+ if ($$ == 0) {
+ parseContext.assignError($2.loc, "assign", $1->getCompleteString(), $3->getCompleteString());
+ $$ = $1;
+ }
+ }
+ ;
+
+assignment_operator
+ : EQUAL {
+ $$.loc = $1.loc;
+ $$.op = EOpAssign;
+ }
+ | MUL_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpMulAssign;
+ }
+ | DIV_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpDivAssign;
+ }
+ | MOD_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "%=");
+ $$.loc = $1.loc;
+ $$.op = EOpModAssign;
+ }
+ | ADD_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpAddAssign;
+ }
+ | SUB_ASSIGN {
+ $$.loc = $1.loc;
+ $$.op = EOpSubAssign;
+ }
+ | LEFT_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bit-shift left assign");
+ $$.loc = $1.loc; $$.op = EOpLeftShiftAssign;
+ }
+ | RIGHT_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bit-shift right assign");
+ $$.loc = $1.loc; $$.op = EOpRightShiftAssign;
+ }
+ | AND_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-and assign");
+ $$.loc = $1.loc; $$.op = EOpAndAssign;
+ }
+ | XOR_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-xor assign");
+ $$.loc = $1.loc; $$.op = EOpExclusiveOrAssign;
+ }
+ | OR_ASSIGN {
+ parseContext.fullIntegerCheck($1.loc, "bitwise-or assign");
+ $$.loc = $1.loc; $$.op = EOpInclusiveOrAssign;
+ }
+ ;
+
+expression
+ : assignment_expression {
+ $$ = $1;
+ }
+ | expression COMMA assignment_expression {
+ parseContext.samplerConstructorLocationCheck($2.loc, ",", $3);
+ $$ = parseContext.intermediate.addComma($1, $3, $2.loc);
+ if ($$ == 0) {
+ parseContext.binaryOpError($2.loc, ",", $1->getCompleteString(), $3->getCompleteString());
+ $$ = $3;
+ }
+ }
+ ;
+
+constant_expression
+ : conditional_expression {
+ parseContext.constantValueCheck($1, "");
+ $$ = $1;
+ }
+ ;
+
+declaration
+ : function_prototype SEMICOLON {
+ parseContext.handleFunctionDeclarator($1.loc, *$1.function, true /* prototype */);
+ $$ = 0;
+ // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature
+ }
+ | init_declarator_list SEMICOLON {
+ if ($1.intermNode && $1.intermNode->getAsAggregate())
+ $1.intermNode->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $1.intermNode;
+ }
+ | PRECISION precision_qualifier type_specifier SEMICOLON {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "precision statement");
+
+ // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope
+ parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]);
+ parseContext.setDefaultPrecision($1.loc, $3, $2.qualifier.precision);
+ $$ = 0;
+ }
+ | block_structure SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList);
+ $$ = 0;
+ }
+ | block_structure IDENTIFIER SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList, $2.string);
+ $$ = 0;
+ }
+ | block_structure IDENTIFIER array_specifier SEMICOLON {
+ parseContext.declareBlock($1.loc, *$1.typeList, $2.string, $3.arraySizes);
+ $$ = 0;
+ }
+ | type_qualifier SEMICOLON {
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.updateStandaloneQualifierDefaults($1.loc, $1);
+ $$ = 0;
+ }
+ | type_qualifier IDENTIFIER SEMICOLON {
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$2.string);
+ $$ = 0;
+ }
+ | type_qualifier IDENTIFIER identifier_list SEMICOLON {
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ $3->push_back($2.string);
+ parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$3);
+ $$ = 0;
+ }
+ ;
+
+block_structure
+ : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ --parseContext.structNestingLevel;
+ parseContext.blockName = $2.string;
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.currentBlockQualifier = $1.qualifier;
+ $$.loc = $1.loc;
+ $$.typeList = $5;
+ }
+
+identifier_list
+ : COMMA IDENTIFIER {
+ $$ = new TIdentifierList;
+ $$->push_back($2.string);
+ }
+ | identifier_list COMMA IDENTIFIER {
+ $$ = $1;
+ $$->push_back($3.string);
+ }
+ ;
+
+function_prototype
+ : function_declarator RIGHT_PAREN {
+ $$.function = $1;
+ $$.loc = $2.loc;
+ }
+ ;
+
+function_declarator
+ : function_header {
+ $$ = $1;
+ }
+ | function_header_with_parameters {
+ $$ = $1;
+ }
+ ;
+
+
+function_header_with_parameters
+ : function_header parameter_declaration {
+ // Add the parameter
+ $$ = $1;
+ if ($2.param.type->getBasicType() != EbtVoid)
+ $1->addParameter($2.param);
+ else
+ delete $2.param.type;
+ }
+ | function_header_with_parameters COMMA parameter_declaration {
+ //
+ // Only first parameter of one-parameter functions can be void
+ // The check for named parameters not being void is done in parameter_declarator
+ //
+ if ($3.param.type->getBasicType() == EbtVoid) {
+ //
+ // This parameter > first is void
+ //
+ parseContext.error($2.loc, "cannot be an argument type except for '(void)'", "void", "");
+ delete $3.param.type;
+ } else {
+ // Add the parameter
+ $$ = $1;
+ $1->addParameter($3.param);
+ }
+ }
+ ;
+
+function_header
+ : fully_specified_type IDENTIFIER LEFT_PAREN {
+ if ($1.qualifier.storage != EvqGlobal && $1.qualifier.storage != EvqTemporary) {
+ parseContext.error($2.loc, "no qualifiers allowed for function return",
+ GetStorageQualifierString($1.qualifier.storage), "");
+ }
+ if ($1.arraySizes)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+
+ // Add the function as a prototype after parsing it (we do not support recursion)
+ TFunction *function;
+ TType type($1);
+
+ // Potentially rename shader entry point function. No-op most of the time.
+ parseContext.renameShaderFunction($2.string);
+
+ // Make the function
+ function = new TFunction($2.string, type);
+ $$ = function;
+ }
+ ;
+
+parameter_declarator
+ // Type + name
+ : type_specifier IDENTIFIER {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ if ($1.basicType == EbtVoid) {
+ parseContext.error($2.loc, "illegal use of type 'void'", $2.string->c_str(), "");
+ }
+ parseContext.reservedErrorCheck($2.loc, *$2.string);
+
+ TParameter param = {$2.string, new TType($1)};
+ $$.loc = $2.loc;
+ $$.param = param;
+ }
+ | type_specifier IDENTIFIER array_specifier {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ TType* type = new TType($1);
+ type->transferArraySizes($3.arraySizes);
+ type->copyArrayInnerSizes($1.arraySizes);
+
+ parseContext.arrayOfArrayVersionCheck($2.loc, type->getArraySizes());
+ parseContext.arraySizeRequiredCheck($3.loc, *$3.arraySizes);
+ parseContext.reservedErrorCheck($2.loc, *$2.string);
+
+ TParameter param = { $2.string, type };
+
+ $$.loc = $2.loc;
+ $$.param = param;
+ }
+ ;
+
+parameter_declaration
+ //
+ // With name
+ //
+ : type_qualifier parameter_declarator {
+ $$ = $2;
+ if ($1.qualifier.precision != EpqNone)
+ $$.param.type->getQualifier().precision = $1.qualifier.precision;
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type);
+ parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type);
+
+ }
+ | parameter_declarator {
+ $$ = $1;
+
+ parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type);
+ parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type);
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+ }
+ //
+ // Without name
+ //
+ | type_qualifier parameter_type_specifier {
+ $$ = $2;
+ if ($1.qualifier.precision != EpqNone)
+ $$.param.type->getQualifier().precision = $1.qualifier.precision;
+ parseContext.precisionQualifierCheck($1.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
+ parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type);
+ parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type);
+ }
+ | parameter_type_specifier {
+ $$ = $1;
+
+ parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type);
+ parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type);
+ parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier());
+ }
+ ;
+
+parameter_type_specifier
+ : type_specifier {
+ TParameter param = { 0, new TType($1) };
+ $$.param = param;
+ if ($1.arraySizes)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+ ;
+
+init_declarator_list
+ : single_declaration {
+ $$ = $1;
+ }
+ | init_declarator_list COMMA IDENTIFIER {
+ $$ = $1;
+ parseContext.declareVariable($3.loc, *$3.string, $1.type);
+ }
+ | init_declarator_list COMMA IDENTIFIER array_specifier {
+ $$ = $1;
+ parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes);
+ }
+ | init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer {
+ $$.type = $1.type;
+ TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes, $6);
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $5.loc);
+ }
+ | init_declarator_list COMMA IDENTIFIER EQUAL initializer {
+ $$.type = $1.type;
+ TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, 0, $5);
+ $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $4.loc);
+ }
+ ;
+
+single_declaration
+ : fully_specified_type {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareTypeDefaults($$.loc, $$.type);
+ }
+ | fully_specified_type IDENTIFIER {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareVariable($2.loc, *$2.string, $1);
+ }
+ | fully_specified_type IDENTIFIER array_specifier {
+ $$.type = $1;
+ $$.intermNode = 0;
+ parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes);
+ }
+ | fully_specified_type IDENTIFIER array_specifier EQUAL initializer {
+ $$.type = $1;
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes, $5);
+ $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $4.loc);
+ }
+ | fully_specified_type IDENTIFIER EQUAL initializer {
+ $$.type = $1;
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4);
+ $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $3.loc);
+ }
+
+// Grammar Note: No 'enum', or 'typedef'.
+
+fully_specified_type
+ : type_specifier {
+ $$ = $1;
+
+ parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $$);
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ parseContext.precisionQualifierCheck($$.loc, $$.basicType, $$.qualifier);
+ }
+ | type_qualifier type_specifier {
+ parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
+ parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $2);
+
+ if ($2.arraySizes) {
+ parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ if ($2.arraySizes && parseContext.arrayQualifierError($2.loc, $1.qualifier))
+ $2.arraySizes = nullptr;
+
+ parseContext.checkNoShaderLayouts($2.loc, $1.shaderQualifiers);
+ $2.shaderQualifiers.merge($1.shaderQualifiers);
+ parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true);
+ parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier);
+
+ $$ = $2;
+
+ if (! $$.qualifier.isInterpolation() &&
+ ((parseContext.language == EShLangVertex && $$.qualifier.storage == EvqVaryingOut) ||
+ (parseContext.language == EShLangFragment && $$.qualifier.storage == EvqVaryingIn)))
+ $$.qualifier.smooth = true;
+ }
+ ;
+
+invariant_qualifier
+ : INVARIANT {
+ parseContext.globalCheck($1.loc, "invariant");
+ parseContext.profileRequires($$.loc, ENoProfile, 120, 0, "invariant");
+ $$.init($1.loc);
+ $$.qualifier.invariant = true;
+ }
+ ;
+
+interpolation_qualifier
+ : SMOOTH {
+ parseContext.globalCheck($1.loc, "smooth");
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "smooth");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "smooth");
+ $$.init($1.loc);
+ $$.qualifier.smooth = true;
+ }
+ | FLAT {
+ parseContext.globalCheck($1.loc, "flat");
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "flat");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "flat");
+ $$.init($1.loc);
+ $$.qualifier.flat = true;
+ }
+ | NOPERSPECTIVE {
+ parseContext.globalCheck($1.loc, "noperspective");
+#ifdef NV_EXTENSIONS
+ parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective");
+#else
+ parseContext.requireProfile($1.loc, ~EEsProfile, "noperspective");
+#endif
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective");
+ $$.init($1.loc);
+ $$.qualifier.nopersp = true;
+ }
+ | EXPLICITINTERPAMD {
+#ifdef AMD_EXTENSIONS
+ parseContext.globalCheck($1.loc, "__explicitInterpAMD");
+ parseContext.profileRequires($1.loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ parseContext.profileRequires($1.loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ $$.init($1.loc);
+ $$.qualifier.explicitInterp = true;
+#endif
+ }
+ | PERVERTEXNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "pervertexNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires($1.loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ $$.init($1.loc);
+ $$.qualifier.pervertexNV = true;
+#endif
+ }
+ | PERPRIMITIVENV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "perprimitiveNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV");
+ // Fragment shader stage doesn't check for extension. So we explicitly add below extension check.
+ if (parseContext.language == EShLangFragment)
+ parseContext.requireExtensions($1.loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV");
+ $$.init($1.loc);
+ $$.qualifier.perPrimitiveNV = true;
+#endif
+ }
+ | PERVIEWNV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "perviewNV");
+ parseContext.requireStage($1.loc, EShLangMeshNV, "perviewNV");
+ $$.init($1.loc);
+ $$.qualifier.perViewNV = true;
+#endif
+ }
+ | PERTASKNV {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck($1.loc, "taskNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV");
+ $$.init($1.loc);
+ $$.qualifier.perTaskNV = true;
+#endif
+ }
+ ;
+
+layout_qualifier
+ : LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN {
+ $$ = $3;
+ }
+ ;
+
+layout_qualifier_id_list
+ : layout_qualifier_id {
+ $$ = $1;
+ }
+ | layout_qualifier_id_list COMMA layout_qualifier_id {
+ $$ = $1;
+ $$.shaderQualifiers.merge($3.shaderQualifiers);
+ parseContext.mergeObjectLayoutQualifiers($$.qualifier, $3.qualifier, false);
+ }
+
+layout_qualifier_id
+ : IDENTIFIER {
+ $$.init($1.loc);
+ parseContext.setLayoutQualifier($1.loc, $$, *$1.string);
+ }
+ | IDENTIFIER EQUAL constant_expression {
+ $$.init($1.loc);
+ parseContext.setLayoutQualifier($1.loc, $$, *$1.string, $3);
+ }
+ | SHARED { // because "shared" is both an identifier and a keyword
+ $$.init($1.loc);
+ TString strShared("shared");
+ parseContext.setLayoutQualifier($1.loc, $$, strShared);
+ }
+ ;
+
+precise_qualifier
+ : PRECISE {
+ parseContext.profileRequires($$.loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise");
+ parseContext.profileRequires($1.loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise");
+ $$.init($1.loc);
+ $$.qualifier.noContraction = true;
+ }
+ ;
+
+type_qualifier
+ : single_type_qualifier {
+ $$ = $1;
+ }
+ | type_qualifier single_type_qualifier {
+ $$ = $1;
+ if ($$.basicType == EbtVoid)
+ $$.basicType = $2.basicType;
+
+ $$.shaderQualifiers.merge($2.shaderQualifiers);
+ parseContext.mergeQualifiers($$.loc, $$.qualifier, $2.qualifier, false);
+ }
+ ;
+
+single_type_qualifier
+ : storage_qualifier {
+ $$ = $1;
+ }
+ | layout_qualifier {
+ $$ = $1;
+ }
+ | precision_qualifier {
+ parseContext.checkPrecisionQualifier($1.loc, $1.qualifier.precision);
+ $$ = $1;
+ }
+ | interpolation_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | invariant_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | precise_qualifier {
+ // allow inheritance of storage qualifier from block declaration
+ $$ = $1;
+ }
+ | non_uniform_qualifier {
+ $$ = $1;
+ }
+ ;
+
+storage_qualifier
+ : CONST {
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant
+ }
+ | ATTRIBUTE {
+ parseContext.requireStage($1.loc, EShLangVertex, "attribute");
+ parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "attribute");
+ parseContext.checkDeprecated($1.loc, ENoProfile, 130, "attribute");
+ parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "attribute");
+ parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "attribute");
+
+ parseContext.globalCheck($1.loc, "attribute");
+
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqVaryingIn;
+ }
+ | VARYING {
+ parseContext.checkDeprecated($1.loc, ENoProfile, 130, "varying");
+ parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "varying");
+ parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "varying");
+ parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "varying");
+
+ parseContext.globalCheck($1.loc, "varying");
+
+ $$.init($1.loc);
+ if (parseContext.language == EShLangVertex)
+ $$.qualifier.storage = EvqVaryingOut;
+ else
+ $$.qualifier.storage = EvqVaryingIn;
+ }
+ | INOUT {
+ parseContext.globalCheck($1.loc, "inout");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqInOut;
+ }
+ | IN {
+ parseContext.globalCheck($1.loc, "in");
+ $$.init($1.loc);
+ // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later
+ $$.qualifier.storage = EvqIn;
+ }
+ | OUT {
+ parseContext.globalCheck($1.loc, "out");
+ $$.init($1.loc);
+ // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later
+ $$.qualifier.storage = EvqOut;
+ }
+ | CENTROID {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, 0, "centroid");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "centroid");
+ parseContext.globalCheck($1.loc, "centroid");
+ $$.init($1.loc);
+ $$.qualifier.centroid = true;
+ }
+ | PATCH {
+ parseContext.globalCheck($1.loc, "patch");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch");
+ $$.init($1.loc);
+ $$.qualifier.patch = true;
+ }
+ | SAMPLE {
+ parseContext.globalCheck($1.loc, "sample");
+ $$.init($1.loc);
+ $$.qualifier.sample = true;
+ }
+ | UNIFORM {
+ parseContext.globalCheck($1.loc, "uniform");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqUniform;
+ }
+ | BUFFER {
+ parseContext.globalCheck($1.loc, "buffer");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqBuffer;
+ }
+ | HITATTRNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "hitAttributeNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangClosestHitNVMask
+ | EShLangAnyHitNVMask), "hitAttributeNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqHitAttrNV;
+#endif
+ }
+ | PAYLOADNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "rayPayloadNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqPayloadNV;
+#endif
+ }
+ | PAYLOADINNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "rayPayloadInNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadInNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqPayloadInNV;
+#endif
+ }
+ | CALLDATANV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "callableDataNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenNVMask |
+ EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask), "callableDataNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqCallableDataNV;
+#endif
+ }
+ | CALLDATAINNV {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck($1.loc, "callableDataInNV");
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV");
+ parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV");
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqCallableDataInNV;
+#endif
+ }
+ | SHARED {
+ parseContext.globalCheck($1.loc, "shared");
+ parseContext.profileRequires($1.loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared");
+ parseContext.profileRequires($1.loc, EEsProfile, 310, 0, "shared");
+#ifdef NV_EXTENSIONS
+ parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared");
+#else
+ parseContext.requireStage($1.loc, EShLangCompute, "shared");
+#endif
+ $$.init($1.loc);
+ $$.qualifier.storage = EvqShared;
+ }
+ | COHERENT {
+ $$.init($1.loc);
+ $$.qualifier.coherent = true;
+ }
+ | DEVICECOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent");
+ $$.qualifier.devicecoherent = true;
+ }
+ | QUEUEFAMILYCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent");
+ $$.qualifier.queuefamilycoherent = true;
+ }
+ | WORKGROUPCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent");
+ $$.qualifier.workgroupcoherent = true;
+ }
+ | SUBGROUPCOHERENT {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent");
+ $$.qualifier.subgroupcoherent = true;
+ }
+ | NONPRIVATE {
+ $$.init($1.loc);
+ parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate");
+ $$.qualifier.nonprivate = true;
+ }
+ | VOLATILE {
+ $$.init($1.loc);
+ $$.qualifier.volatil = true;
+ }
+ | RESTRICT {
+ $$.init($1.loc);
+ $$.qualifier.restrict = true;
+ }
+ | READONLY {
+ $$.init($1.loc);
+ $$.qualifier.readonly = true;
+ }
+ | WRITEONLY {
+ $$.init($1.loc);
+ $$.qualifier.writeonly = true;
+ }
+ | SUBROUTINE {
+ parseContext.spvRemoved($1.loc, "subroutine");
+ parseContext.globalCheck($1.loc, "subroutine");
+ parseContext.unimplemented($1.loc, "subroutine");
+ $$.init($1.loc);
+ }
+ | SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN {
+ parseContext.spvRemoved($1.loc, "subroutine");
+ parseContext.globalCheck($1.loc, "subroutine");
+ parseContext.unimplemented($1.loc, "subroutine");
+ $$.init($1.loc);
+ }
+ ;
+
+non_uniform_qualifier
+ : NONUNIFORM {
+ $$.init($1.loc);
+ $$.qualifier.nonUniform = true;
+ }
+ ;
+
+type_name_list
+ : IDENTIFIER {
+ // TODO
+ }
+ | type_name_list COMMA IDENTIFIER {
+ // TODO: 4.0 semantics: subroutines
+ // 1) make sure each identifier is a type declared earlier with SUBROUTINE
+ // 2) save all of the identifiers for future comparison with the declared function
+ }
+ ;
+
+type_specifier
+ : type_specifier_nonarray type_parameter_specifier_opt {
+ $$ = $1;
+ $$.qualifier.precision = parseContext.getDefaultPrecision($$);
+ $$.typeParameters = $2;
+ }
+ | type_specifier_nonarray type_parameter_specifier_opt array_specifier {
+ parseContext.arrayOfArrayVersionCheck($3.loc, $3.arraySizes);
+ $$ = $1;
+ $$.qualifier.precision = parseContext.getDefaultPrecision($$);
+ $$.typeParameters = $2;
+ $$.arraySizes = $3.arraySizes;
+ }
+ ;
+
+array_specifier
+ : LEFT_BRACKET RIGHT_BRACKET {
+ $$.loc = $1.loc;
+ $$.arraySizes = new TArraySizes;
+ $$.arraySizes->addInnerSize();
+ }
+ | LEFT_BRACKET conditional_expression RIGHT_BRACKET {
+ $$.loc = $1.loc;
+ $$.arraySizes = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($2->getLoc(), $2, size, "array size");
+ $$.arraySizes->addInnerSize(size);
+ }
+ | array_specifier LEFT_BRACKET RIGHT_BRACKET {
+ $$ = $1;
+ $$.arraySizes->addInnerSize();
+ }
+ | array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET {
+ $$ = $1;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($3->getLoc(), $3, size, "array size");
+ $$.arraySizes->addInnerSize(size);
+ }
+ ;
+
+type_parameter_specifier_opt
+ : type_parameter_specifier {
+ $$ = $1;
+ }
+ | /* May be null */ {
+ $$ = 0;
+ }
+ ;
+
+type_parameter_specifier
+ : LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE {
+ $$ = $2;
+ }
+ ;
+
+type_parameter_specifier_list
+ : unary_expression {
+ $$ = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($1->getLoc(), $1, size, "type parameter");
+ $$->addInnerSize(size);
+ }
+ | type_parameter_specifier_list COMMA unary_expression {
+ $$ = $1;
+
+ TArraySize size;
+ parseContext.arraySizeCheck($3->getLoc(), $3, size, "type parameter");
+ $$->addInnerSize(size);
+ }
+ ;
+
+type_specifier_nonarray
+ : VOID {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtVoid;
+ }
+ | FLOAT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ }
+ | DOUBLE {
+ parseContext.doubleCheck($1.loc, "double");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ }
+ | FLOAT16_T {
+ parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ }
+ | FLOAT32_T {
+ parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ }
+ | FLOAT64_T {
+ parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ }
+ | INT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ }
+ | UINT {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ }
+ | INT8_T {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ }
+ | UINT8_T {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ }
+ | INT16_T {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ }
+ | UINT16_T {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ }
+ | INT32_T {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ }
+ | UINT32_T {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ }
+ | INT64_T {
+ parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ }
+ | UINT64_T {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ }
+ | BOOL {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ }
+ | VEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(2);
+ }
+ | VEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(3);
+ }
+ | VEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(4);
+ }
+ | DVEC2 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(2);
+ }
+ | DVEC3 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(3);
+ }
+ | DVEC4 {
+ parseContext.doubleCheck($1.loc, "double vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(4);
+ }
+ | F16VEC2 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(2);
+ }
+ | F16VEC3 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(3);
+ }
+ | F16VEC4 {
+ parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setVector(4);
+ }
+ | F32VEC2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(2);
+ }
+ | F32VEC3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(3);
+ }
+ | F32VEC4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setVector(4);
+ }
+ | F64VEC2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(2);
+ }
+ | F64VEC3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(3);
+ }
+ | F64VEC4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setVector(4);
+ }
+ | BVEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(2);
+ }
+ | BVEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(3);
+ }
+ | BVEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtBool;
+ $$.setVector(4);
+ }
+ | IVEC2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(2);
+ }
+ | IVEC3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(3);
+ }
+ | IVEC4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(4);
+ }
+ | I8VEC2 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(2);
+ }
+ | I8VEC3 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(3);
+ }
+ | I8VEC4 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt8;
+ $$.setVector(4);
+ }
+ | I16VEC2 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(2);
+ }
+ | I16VEC3 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(3);
+ }
+ | I16VEC4 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt16;
+ $$.setVector(4);
+ }
+ | I32VEC2 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(2);
+ }
+ | I32VEC3 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(3);
+ }
+ | I32VEC4 {
+ parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt;
+ $$.setVector(4);
+ }
+ | I64VEC2 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(2);
+ }
+ | I64VEC3 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(3);
+ }
+ | I64VEC4 {
+ parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtInt64;
+ $$.setVector(4);
+ }
+ | UVEC2 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(2);
+ }
+ | UVEC3 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(3);
+ }
+ | UVEC4 {
+ parseContext.fullIntegerCheck($1.loc, "unsigned integer vector");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(4);
+ }
+ | U8VEC2 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(2);
+ }
+ | U8VEC3 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(3);
+ }
+ | U8VEC4 {
+ parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint8;
+ $$.setVector(4);
+ }
+ | U16VEC2 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(2);
+ }
+ | U16VEC3 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(3);
+ }
+ | U16VEC4 {
+ parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint16;
+ $$.setVector(4);
+ }
+ | U32VEC2 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(2);
+ }
+ | U32VEC3 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(3);
+ }
+ | U32VEC4 {
+ parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint;
+ $$.setVector(4);
+ }
+ | U64VEC2 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(2);
+ }
+ | U64VEC3 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(3);
+ }
+ | U64VEC4 {
+ parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtUint64;
+ $$.setVector(4);
+ }
+ | MAT2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | MAT3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | MAT4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | MAT2X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | MAT2X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 3);
+ }
+ | MAT2X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 4);
+ }
+ | MAT3X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 2);
+ }
+ | MAT3X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | MAT3X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 4);
+ }
+ | MAT4X2 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 2);
+ }
+ | MAT4X3 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 3);
+ }
+ | MAT4X4 {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | DMAT2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | DMAT3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | DMAT4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | DMAT2X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | DMAT2X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 3);
+ }
+ | DMAT2X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 4);
+ }
+ | DMAT3X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 2);
+ }
+ | DMAT3X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | DMAT3X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 4);
+ }
+ | DMAT4X2 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 2);
+ }
+ | DMAT4X3 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 3);
+ }
+ | DMAT4X4 {
+ parseContext.doubleCheck($1.loc, "double matrix");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | F16MAT2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 2);
+ }
+ | F16MAT3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 3);
+ }
+ | F16MAT4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 4);
+ }
+ | F16MAT2X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 2);
+ }
+ | F16MAT2X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 3);
+ }
+ | F16MAT2X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(2, 4);
+ }
+ | F16MAT3X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 2);
+ }
+ | F16MAT3X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 3);
+ }
+ | F16MAT3X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(3, 4);
+ }
+ | F16MAT4X2 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 2);
+ }
+ | F16MAT4X3 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 3);
+ }
+ | F16MAT4X4 {
+ parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat16;
+ $$.setMatrix(4, 4);
+ }
+ | F32MAT2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | F32MAT3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | F32MAT4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | F32MAT2X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 2);
+ }
+ | F32MAT2X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 3);
+ }
+ | F32MAT2X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(2, 4);
+ }
+ | F32MAT3X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 2);
+ }
+ | F32MAT3X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 3);
+ }
+ | F32MAT3X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(3, 4);
+ }
+ | F32MAT4X2 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 2);
+ }
+ | F32MAT4X3 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 3);
+ }
+ | F32MAT4X4 {
+ parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.setMatrix(4, 4);
+ }
+ | F64MAT2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | F64MAT3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | F64MAT4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | F64MAT2X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 2);
+ }
+ | F64MAT2X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 3);
+ }
+ | F64MAT2X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(2, 4);
+ }
+ | F64MAT3X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 2);
+ }
+ | F64MAT3X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 3);
+ }
+ | F64MAT3X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(3, 4);
+ }
+ | F64MAT4X2 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 2);
+ }
+ | F64MAT4X3 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 3);
+ }
+ | F64MAT4X4 {
+ parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtDouble;
+ $$.setMatrix(4, 4);
+ }
+ | ACCSTRUCTNV {
+#ifdef NV_EXTENSIONS
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtAccStructNV;
+#endif
+ }
+ | ATOMIC_UINT {
+ parseContext.vulkanRemoved($1.loc, "atomic counter types");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtAtomicUint;
+ }
+ | SAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D);
+ }
+ | SAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ }
+ | SAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd3D);
+ }
+ | SAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube);
+ }
+ | SAMPLER1DSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, false, true);
+ }
+ | SAMPLER2DSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, false, true);
+ }
+ | SAMPLERCUBESHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, false, true);
+ }
+ | SAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, true);
+ }
+ | SAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true);
+ }
+ | SAMPLER1DARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd1D, true, true);
+ }
+ | SAMPLER2DARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true, true);
+ }
+ | SAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, true);
+ }
+ | SAMPLERCUBEARRAYSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdCube, true, true);
+ }
+ | F16SAMPLER1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D);
+#endif
+ }
+ | F16SAMPLER2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D);
+#endif
+ }
+ | F16SAMPLER3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd3D);
+#endif
+ }
+ | F16SAMPLERCUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube);
+#endif
+ }
+ | F16SAMPLER1DSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, false, true);
+#endif
+ }
+ | F16SAMPLER2DSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, false, true);
+#endif
+ }
+ | F16SAMPLERCUBESHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, false, true);
+#endif
+ }
+ | F16SAMPLER1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | F16SAMPLER2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | F16SAMPLER1DARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd1D, true, true);
+#endif
+ }
+ | F16SAMPLER2DARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true, true);
+#endif
+ }
+ | F16SAMPLERCUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | F16SAMPLERCUBEARRAYSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdCube, true, true);
+#endif
+ }
+ | ISAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd1D);
+ }
+ | ISAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D);
+ }
+ | ISAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd3D);
+ }
+ | ISAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdCube);
+ }
+ | ISAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd1D, true);
+ }
+ | ISAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, true);
+ }
+ | ISAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdCube, true);
+ }
+ | USAMPLER1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd1D);
+ }
+ | USAMPLER2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D);
+ }
+ | USAMPLER3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd3D);
+ }
+ | USAMPLERCUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdCube);
+ }
+ | USAMPLER1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd1D, true);
+ }
+ | USAMPLER2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, true);
+ }
+ | USAMPLERCUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdCube, true);
+ }
+ | SAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdRect);
+ }
+ | SAMPLER2DRECTSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdRect, false, true);
+ }
+ | F16SAMPLER2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdRect);
+#endif
+ }
+ | F16SAMPLER2DRECTSHADOW {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdRect, false, true);
+#endif
+ }
+ | ISAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdRect);
+ }
+ | USAMPLER2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdRect);
+ }
+ | SAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, EsdBuffer);
+ }
+ | F16SAMPLERBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | ISAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, EsdBuffer);
+ }
+ | USAMPLERBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, EsdBuffer);
+ }
+ | SAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16SAMPLER2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | ISAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, false, false, true);
+ }
+ | USAMPLER2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, false, false, true);
+ }
+ | SAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16SAMPLER2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | ISAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtInt, Esd2D, true, false, true);
+ }
+ | USAMPLER2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtUint, Esd2D, true, false, true);
+ }
+ | SAMPLER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setPureSampler(false);
+ }
+ | SAMPLERSHADOW {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setPureSampler(true);
+ }
+ | TEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd1D);
+ }
+ | F16TEXTURE1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd1D);
+#endif
+ }
+ | TEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D);
+ }
+ | F16TEXTURE2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D);
+#endif
+ }
+ | TEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd3D);
+ }
+ | F16TEXTURE3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd3D);
+#endif
+ }
+ | TEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdCube);
+ }
+ | F16TEXTURECUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdCube);
+#endif
+ }
+ | TEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd1D, true);
+ }
+ | F16TEXTURE1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | TEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, true);
+ }
+ | F16TEXTURE2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | TEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdCube, true);
+ }
+ | F16TEXTURECUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | ITEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd1D);
+ }
+ | ITEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D);
+ }
+ | ITEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd3D);
+ }
+ | ITEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdCube);
+ }
+ | ITEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd1D, true);
+ }
+ | ITEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, true);
+ }
+ | ITEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdCube, true);
+ }
+ | UTEXTURE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd1D);
+ }
+ | UTEXTURE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D);
+ }
+ | UTEXTURE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd3D);
+ }
+ | UTEXTURECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdCube);
+ }
+ | UTEXTURE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd1D, true);
+ }
+ | UTEXTURE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, true);
+ }
+ | UTEXTURECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdCube, true);
+ }
+ | TEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdRect);
+ }
+ | F16TEXTURE2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdRect);
+#endif
+ }
+ | ITEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdRect);
+ }
+ | UTEXTURE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdRect);
+ }
+ | TEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, EsdBuffer);
+ }
+ | F16TEXTUREBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | ITEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, EsdBuffer);
+ }
+ | UTEXTUREBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, EsdBuffer);
+ }
+ | TEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16TEXTURE2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | ITEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, false, false, true);
+ }
+ | UTEXTURE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, false, false, true);
+ }
+ | TEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16TEXTURE2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | ITEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtInt, Esd2D, true, false, true);
+ }
+ | UTEXTURE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setTexture(EbtUint, Esd2D, true, false, true);
+ }
+ | IMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd1D);
+ }
+ | F16IMAGE1D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd1D);
+#endif
+ }
+ | IIMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd1D);
+ }
+ | UIMAGE1D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd1D);
+ }
+ | IMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D);
+ }
+ | F16IMAGE2D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D);
+#endif
+ }
+ | IIMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D);
+ }
+ | UIMAGE2D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D);
+ }
+ | IMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd3D);
+ }
+ | F16IMAGE3D {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd3D);
+#endif
+ }
+ | IIMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd3D);
+ }
+ | UIMAGE3D {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd3D);
+ }
+ | IMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdRect);
+ }
+ | F16IMAGE2DRECT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdRect);
+#endif
+ }
+ | IIMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdRect);
+ }
+ | UIMAGE2DRECT {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdRect);
+ }
+ | IMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdCube);
+ }
+ | F16IMAGECUBE {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdCube);
+#endif
+ }
+ | IIMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdCube);
+ }
+ | UIMAGECUBE {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdCube);
+ }
+ | IMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdBuffer);
+ }
+ | F16IMAGEBUFFER {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdBuffer);
+#endif
+ }
+ | IIMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdBuffer);
+ }
+ | UIMAGEBUFFER {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdBuffer);
+ }
+ | IMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd1D, true);
+ }
+ | F16IMAGE1DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd1D, true);
+#endif
+ }
+ | IIMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd1D, true);
+ }
+ | UIMAGE1DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd1D, true);
+ }
+ | IMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, true);
+ }
+ | F16IMAGE2DARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, true);
+#endif
+ }
+ | IIMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, true);
+ }
+ | UIMAGE2DARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, true);
+ }
+ | IMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, EsdCube, true);
+ }
+ | F16IMAGECUBEARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, EsdCube, true);
+#endif
+ }
+ | IIMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, EsdCube, true);
+ }
+ | UIMAGECUBEARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, EsdCube, true);
+ }
+ | IMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, false, false, true);
+ }
+ | F16IMAGE2DMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+ | IIMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, false, false, true);
+ }
+ | UIMAGE2DMS {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, false, false, true);
+ }
+ | IMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat, Esd2D, true, false, true);
+ }
+ | F16IMAGE2DMSARRAY {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+ | IIMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtInt, Esd2D, true, false, true);
+ }
+ | UIMAGE2DMSARRAY {
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setImage(EbtUint, Esd2D, true, false, true);
+ }
+ | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ $$.sampler.external = true;
+ }
+ | SAMPLEREXTERNAL2DY2YEXT { // GL_EXT_YUV_target
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.set(EbtFloat, Esd2D);
+ $$.sampler.yuv = true;
+ }
+ | SUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat);
+ }
+ | SUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat, true);
+ }
+ | F16SUBPASSINPUT {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat16);
+#endif
+ }
+ | F16SUBPASSINPUTMS {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtFloat16, true);
+#endif
+ }
+ | ISUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtInt);
+ }
+ | ISUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtInt, true);
+ }
+ | USUBPASSINPUT {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtUint);
+ }
+ | USUBPASSINPUTMS {
+ parseContext.requireStage($1.loc, EShLangFragment, "subpass input");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtSampler;
+ $$.sampler.setSubpass(EbtUint, true);
+ }
+ | FCOOPMATNV {
+ parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel());
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtFloat;
+ $$.coopmat = true;
+ }
+ | struct_specifier {
+ $$ = $1;
+ $$.qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary;
+ parseContext.structTypeCheck($$.loc, $$);
+ }
+ | TYPE_NAME {
+ //
+ // This is for user defined type names. The lexical phase looked up the
+ // type.
+ //
+ if (const TVariable* variable = ($1.symbol)->getAsVariable()) {
+ const TType& structure = variable->getType();
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ $$.basicType = EbtStruct;
+ $$.userDef = &structure;
+ } else
+ parseContext.error($1.loc, "expected type name", $1.string->c_str(), "");
+ }
+ ;
+
+precision_qualifier
+ : HIGH_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "highp precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqHigh);
+ }
+ | MEDIUM_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "mediump precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqMedium);
+ }
+ | LOW_PRECISION {
+ parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "lowp precision qualifier");
+ $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqLow);
+ }
+ ;
+
+struct_specifier
+ : STRUCT IDENTIFIER LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ TType* structure = new TType($5, *$2.string);
+ parseContext.structArrayCheck($2.loc, *structure);
+ TVariable* userTypeDef = new TVariable($2.string, *structure, true);
+ if (! parseContext.symbolTable.insert(*userTypeDef))
+ parseContext.error($2.loc, "redefinition", $2.string->c_str(), "struct");
+ $$.init($1.loc);
+ $$.basicType = EbtStruct;
+ $$.userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+ | STRUCT LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
+ TType* structure = new TType($4, TString(""));
+ $$.init($1.loc);
+ $$.basicType = EbtStruct;
+ $$.userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+ ;
+
+struct_declaration_list
+ : struct_declaration {
+ $$ = $1;
+ }
+ | struct_declaration_list struct_declaration {
+ $$ = $1;
+ for (unsigned int i = 0; i < $2->size(); ++i) {
+ for (unsigned int j = 0; j < $$->size(); ++j) {
+ if ((*$$)[j].type->getFieldName() == (*$2)[i].type->getFieldName())
+ parseContext.error((*$2)[i].loc, "duplicate member name:", "", (*$2)[i].type->getFieldName().c_str());
+ }
+ $$->push_back((*$2)[i]);
+ }
+ }
+ ;
+
+struct_declaration
+ : type_specifier struct_declarator_list SEMICOLON {
+ if ($1.arraySizes) {
+ parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes);
+ }
+
+ $$ = $2;
+
+ parseContext.voidErrorCheck($1.loc, (*$2)[0].type->getFieldName(), $1.basicType);
+ parseContext.precisionQualifierCheck($1.loc, $1.basicType, $1.qualifier);
+
+ for (unsigned int i = 0; i < $$->size(); ++i) {
+ TType type($1);
+ type.setFieldName((*$$)[i].type->getFieldName());
+ type.transferArraySizes((*$$)[i].type->getArraySizes());
+ type.copyArrayInnerSizes($1.arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes());
+ (*$$)[i].type->shallowCopy(type);
+ }
+ }
+ | type_qualifier type_specifier struct_declarator_list SEMICOLON {
+ if ($2.arraySizes) {
+ parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck($2.loc, *$2.arraySizes);
+ }
+
+ $$ = $3;
+
+ parseContext.memberQualifierCheck($1);
+ parseContext.voidErrorCheck($2.loc, (*$3)[0].type->getFieldName(), $2.basicType);
+ parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true);
+ parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier);
+
+ for (unsigned int i = 0; i < $$->size(); ++i) {
+ TType type($2);
+ type.setFieldName((*$$)[i].type->getFieldName());
+ type.transferArraySizes((*$$)[i].type->getArraySizes());
+ type.copyArrayInnerSizes($2.arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes());
+ (*$$)[i].type->shallowCopy(type);
+ }
+ }
+ ;
+
+struct_declarator_list
+ : struct_declarator {
+ $$ = new TTypeList;
+ $$->push_back($1);
+ }
+ | struct_declarator_list COMMA struct_declarator {
+ $$->push_back($3);
+ }
+ ;
+
+struct_declarator
+ : IDENTIFIER {
+ $$.type = new TType(EbtVoid);
+ $$.loc = $1.loc;
+ $$.type->setFieldName(*$1.string);
+ }
+ | IDENTIFIER array_specifier {
+ parseContext.arrayOfArrayVersionCheck($1.loc, $2.arraySizes);
+
+ $$.type = new TType(EbtVoid);
+ $$.loc = $1.loc;
+ $$.type->setFieldName(*$1.string);
+ $$.type->transferArraySizes($2.arraySizes);
+ }
+ ;
+
+initializer
+ : assignment_expression {
+ $$ = $1;
+ }
+ | LEFT_BRACE initializer_list RIGHT_BRACE {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile($1.loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ $$ = $2;
+ }
+ | LEFT_BRACE initializer_list COMMA RIGHT_BRACE {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile($1.loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ $$ = $2;
+ }
+ ;
+
+initializer_list
+ : initializer {
+ $$ = parseContext.intermediate.growAggregate(0, $1, $1->getLoc());
+ }
+ | initializer_list COMMA initializer {
+ $$ = parseContext.intermediate.growAggregate($1, $3);
+ }
+ ;
+
+declaration_statement
+ : declaration { $$ = $1; }
+ ;
+
+statement
+ : compound_statement { $$ = $1; }
+ | simple_statement { $$ = $1; }
+ ;
+
+// Grammar Note: labeled statements for switch statements only; 'goto' is not supported.
+
+simple_statement
+ : declaration_statement { $$ = $1; }
+ | expression_statement { $$ = $1; }
+ | selection_statement { $$ = $1; }
+ | switch_statement { $$ = $1; }
+ | case_label { $$ = $1; }
+ | iteration_statement { $$ = $1; }
+ | jump_statement { $$ = $1; }
+ ;
+
+compound_statement
+ : LEFT_BRACE RIGHT_BRACE { $$ = 0; }
+ | LEFT_BRACE {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ }
+ statement_list {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ }
+ RIGHT_BRACE {
+ if ($3 && $3->getAsAggregate())
+ $3->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $3;
+ }
+ ;
+
+statement_no_new_scope
+ : compound_statement_no_new_scope { $$ = $1; }
+ | simple_statement { $$ = $1; }
+ ;
+
+statement_scoped
+ : {
+ ++parseContext.controlFlowNestingLevel;
+ }
+ compound_statement {
+ --parseContext.controlFlowNestingLevel;
+ $$ = $2;
+ }
+ | {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ simple_statement {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ $$ = $2;
+ }
+
+compound_statement_no_new_scope
+ // Statement that doesn't create a new scope, for selection_statement, iteration_statement
+ : LEFT_BRACE RIGHT_BRACE {
+ $$ = 0;
+ }
+ | LEFT_BRACE statement_list RIGHT_BRACE {
+ if ($2 && $2->getAsAggregate())
+ $2->getAsAggregate()->setOperator(EOpSequence);
+ $$ = $2;
+ }
+ ;
+
+statement_list
+ : statement {
+ $$ = parseContext.intermediate.makeAggregate($1);
+ if ($1 && $1->getAsBranchNode() && ($1->getAsBranchNode()->getFlowOp() == EOpCase ||
+ $1->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence(0, $1);
+ $$ = 0; // start a fresh subsequence for what's after this case
+ }
+ }
+ | statement_list statement {
+ if ($2 && $2->getAsBranchNode() && ($2->getAsBranchNode()->getFlowOp() == EOpCase ||
+ $2->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence($1 ? $1->getAsAggregate() : 0, $2);
+ $$ = 0; // start a fresh subsequence for what's after this case
+ } else
+ $$ = parseContext.intermediate.growAggregate($1, $2);
+ }
+ ;
+
+expression_statement
+ : SEMICOLON { $$ = 0; }
+ | expression SEMICOLON { $$ = static_cast<TIntermNode*>($1); }
+ ;
+
+selection_statement
+ : selection_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute selection_statement_nonattributed {
+ parseContext.handleSelectionAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+selection_statement_nonattributed
+ : IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement {
+ parseContext.boolCheck($1.loc, $3);
+ $$ = parseContext.intermediate.addSelection($3, $5, $1.loc);
+ }
+ ;
+
+selection_rest_statement
+ : statement_scoped ELSE statement_scoped {
+ $$.node1 = $1;
+ $$.node2 = $3;
+ }
+ | statement_scoped {
+ $$.node1 = $1;
+ $$.node2 = 0;
+ }
+ ;
+
+condition
+ // In 1996 c++ draft, conditions can include single declarations
+ : expression {
+ $$ = $1;
+ parseContext.boolCheck($1->getLoc(), $1);
+ }
+ | fully_specified_type IDENTIFIER EQUAL initializer {
+ parseContext.boolCheck($2.loc, $1);
+
+ TType type($1);
+ TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4);
+ if (initNode)
+ $$ = initNode->getAsTyped();
+ else
+ $$ = 0;
+ }
+ ;
+
+switch_statement
+ : switch_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute switch_statement_nonattributed {
+ parseContext.handleSwitchAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+switch_statement_nonattributed
+ : SWITCH LEFT_PAREN expression RIGHT_PAREN {
+ // start new switch sequence on the switch stack
+ ++parseContext.controlFlowNestingLevel;
+ ++parseContext.statementNestingLevel;
+ parseContext.switchSequenceStack.push_back(new TIntermSequence);
+ parseContext.switchLevel.push_back(parseContext.statementNestingLevel);
+ parseContext.symbolTable.push();
+ }
+ LEFT_BRACE switch_statement_list RIGHT_BRACE {
+ $$ = parseContext.addSwitch($1.loc, $3, $7 ? $7->getAsAggregate() : 0);
+ delete parseContext.switchSequenceStack.back();
+ parseContext.switchSequenceStack.pop_back();
+ parseContext.switchLevel.pop_back();
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ ;
+
+switch_statement_list
+ : /* nothing */ {
+ $$ = 0;
+ }
+ | statement_list {
+ $$ = $1;
+ }
+ ;
+
+case_label
+ : CASE expression COLON {
+ $$ = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error($1.loc, "cannot appear outside switch statement", "case", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error($1.loc, "cannot be nested inside control flow", "case", "");
+ else {
+ parseContext.constantValueCheck($2, "case");
+ parseContext.integerCheck($2, "case");
+ $$ = parseContext.intermediate.addBranch(EOpCase, $2, $1.loc);
+ }
+ }
+ | DEFAULT COLON {
+ $$ = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error($1.loc, "cannot appear outside switch statement", "default", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error($1.loc, "cannot be nested inside control flow", "default", "");
+ else
+ $$ = parseContext.intermediate.addBranch(EOpDefault, $1.loc);
+ }
+ ;
+
+iteration_statement
+ : iteration_statement_nonattributed {
+ $$ = $1;
+ }
+ | attribute iteration_statement_nonattributed {
+ parseContext.handleLoopAttributes(*$1, $2);
+ $$ = $2;
+ }
+
+iteration_statement_nonattributed
+ : WHILE LEFT_PAREN {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error($1.loc, "while loops not available", "limitation", "");
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ condition RIGHT_PAREN statement_no_new_scope {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.addLoop($6, $4, 0, true, $1.loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ | DO {
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error($1.loc, "do-while loops not available", "limitation", "");
+
+ parseContext.boolCheck($8.loc, $6);
+
+ $$ = parseContext.intermediate.addLoop($3, $6, 0, false, $4.loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ | FOR LEFT_PAREN {
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+ for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.makeAggregate($4, $2.loc);
+ TIntermLoop* forLoop = parseContext.intermediate.addLoop($7, reinterpret_cast<TIntermTyped*>($5.node1), reinterpret_cast<TIntermTyped*>($5.node2), true, $1.loc);
+ if (! parseContext.limits.nonInductiveForLoops)
+ parseContext.inductiveLoopCheck($1.loc, $4, forLoop);
+ $$ = parseContext.intermediate.growAggregate($$, forLoop, $1.loc);
+ $$->getAsAggregate()->setOperator(EOpSequence);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+ ;
+
+for_init_statement
+ : expression_statement {
+ $$ = $1;
+ }
+ | declaration_statement {
+ $$ = $1;
+ }
+ ;
+
+conditionopt
+ : condition {
+ $$ = $1;
+ }
+ | /* May be null */ {
+ $$ = 0;
+ }
+ ;
+
+for_rest_statement
+ : conditionopt SEMICOLON {
+ $$.node1 = $1;
+ $$.node2 = 0;
+ }
+ | conditionopt SEMICOLON expression {
+ $$.node1 = $1;
+ $$.node2 = $3;
+ }
+ ;
+
+jump_statement
+ : CONTINUE SEMICOLON {
+ if (parseContext.loopNestingLevel <= 0)
+ parseContext.error($1.loc, "continue statement only allowed in loops", "", "");
+ $$ = parseContext.intermediate.addBranch(EOpContinue, $1.loc);
+ }
+ | BREAK SEMICOLON {
+ if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0)
+ parseContext.error($1.loc, "break statement only allowed in switch and loops", "", "");
+ $$ = parseContext.intermediate.addBranch(EOpBreak, $1.loc);
+ }
+ | RETURN SEMICOLON {
+ $$ = parseContext.intermediate.addBranch(EOpReturn, $1.loc);
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid)
+ parseContext.error($1.loc, "non-void function must return a value", "return", "");
+ if (parseContext.inMain)
+ parseContext.postEntryPointReturn = true;
+ }
+ | RETURN expression SEMICOLON {
+ $$ = parseContext.handleReturnValue($1.loc, $2);
+ }
+ | DISCARD SEMICOLON {
+ parseContext.requireStage($1.loc, EShLangFragment, "discard");
+ $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc);
+ }
+ ;
+
+// Grammar Note: No 'goto'. Gotos are not supported.
+
+translation_unit
+ : external_declaration {
+ $$ = $1;
+ parseContext.intermediate.setTreeRoot($$);
+ }
+ | translation_unit external_declaration {
+ if ($2 != nullptr) {
+ $$ = parseContext.intermediate.growAggregate($1, $2);
+ parseContext.intermediate.setTreeRoot($$);
+ }
+ }
+ ;
+
+external_declaration
+ : function_definition {
+ $$ = $1;
+ }
+ | declaration {
+ $$ = $1;
+ }
+ | SEMICOLON {
+ parseContext.requireProfile($1.loc, ~EEsProfile, "extraneous semicolon");
+ parseContext.profileRequires($1.loc, ~EEsProfile, 460, nullptr, "extraneous semicolon");
+ $$ = nullptr;
+ }
+ ;
+
+function_definition
+ : function_prototype {
+ $1.function = parseContext.handleFunctionDeclarator($1.loc, *$1.function, false /* not prototype */);
+ $1.intermNode = parseContext.handleFunctionDefinition($1.loc, *$1.function);
+ }
+ compound_statement_no_new_scope {
+ // May be best done as post process phase on intermediate code
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue)
+ parseContext.error($1.loc, "function does not return a value:", "", $1.function->getName().c_str());
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ $$ = parseContext.intermediate.growAggregate($1.intermNode, $3);
+ parseContext.intermediate.setAggregateOperator($$, EOpFunction, $1.function->getType(), $1.loc);
+ $$->getAsAggregate()->setName($1.function->getMangledName().c_str());
+
+ // store the pragma information for debug and optimize and other vendor specific
+ // information. This information can be queried from the parse tree
+ $$->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize);
+ $$->getAsAggregate()->setDebug(parseContext.contextPragma.debug);
+ $$->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable);
+ }
+ ;
+
+attribute
+ : LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET {
+ $$ = $3;
+ parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_control_flow_attributes, "attribute");
+ }
+
+attribute_list
+ : single_attribute {
+ $$ = $1;
+ }
+ | attribute_list COMMA single_attribute {
+ $$ = parseContext.mergeAttributes($1, $3);
+ }
+
+single_attribute
+ : IDENTIFIER {
+ $$ = parseContext.makeAttributes(*$1.string);
+ }
+ | IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN {
+ $$ = parseContext.makeAttributes(*$1.string, $3);
+ }
+
+%%
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp
new file mode 100644
index 0000000..07feffe
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp
@@ -0,0 +1,10468 @@
+/* A Bison parser, made by GNU Bison 3.0.4. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.0.4"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 1
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+
+
+/* Copy the first part of user declarations. */
+#line 43 "MachineIndependent/glslang.y" /* yacc.c:339 */
+
+
+/* Based on:
+ANSI C Yacc grammar
+
+In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a
+matching Lex specification) for the April 30, 1985 draft version of the
+ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that
+original, as mentioned in the answer to question 17.25 of the comp.lang.c
+FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z.
+
+I intend to keep this version as close to the current C Standard grammar as
+possible; please let me know if you discover discrepancies.
+
+Jutta Degener, 1995
+*/
+
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "../Public/ShaderLang.h"
+#include "attribute.h"
+
+using namespace glslang;
+
+
+#line 92 "MachineIndependent/glslang_tab.cpp" /* yacc.c:339 */
+
+# ifndef YY_NULLPTR
+# if defined __cplusplus && 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 1
+#endif
+
+/* In a future release of Bison, this section will be replaced
+ by #include "glslang_tab.cpp.h". */
+#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ VARYING = 259,
+ FLOAT16_T = 260,
+ FLOAT = 261,
+ FLOAT32_T = 262,
+ DOUBLE = 263,
+ FLOAT64_T = 264,
+ CONST = 265,
+ BOOL = 266,
+ INT = 267,
+ UINT = 268,
+ INT64_T = 269,
+ UINT64_T = 270,
+ INT32_T = 271,
+ UINT32_T = 272,
+ INT16_T = 273,
+ UINT16_T = 274,
+ INT8_T = 275,
+ UINT8_T = 276,
+ BREAK = 277,
+ CONTINUE = 278,
+ DO = 279,
+ ELSE = 280,
+ FOR = 281,
+ IF = 282,
+ DISCARD = 283,
+ RETURN = 284,
+ SWITCH = 285,
+ CASE = 286,
+ DEFAULT = 287,
+ SUBROUTINE = 288,
+ BVEC2 = 289,
+ BVEC3 = 290,
+ BVEC4 = 291,
+ IVEC2 = 292,
+ IVEC3 = 293,
+ IVEC4 = 294,
+ UVEC2 = 295,
+ UVEC3 = 296,
+ UVEC4 = 297,
+ I64VEC2 = 298,
+ I64VEC3 = 299,
+ I64VEC4 = 300,
+ U64VEC2 = 301,
+ U64VEC3 = 302,
+ U64VEC4 = 303,
+ I32VEC2 = 304,
+ I32VEC3 = 305,
+ I32VEC4 = 306,
+ U32VEC2 = 307,
+ U32VEC3 = 308,
+ U32VEC4 = 309,
+ I16VEC2 = 310,
+ I16VEC3 = 311,
+ I16VEC4 = 312,
+ U16VEC2 = 313,
+ U16VEC3 = 314,
+ U16VEC4 = 315,
+ I8VEC2 = 316,
+ I8VEC3 = 317,
+ I8VEC4 = 318,
+ U8VEC2 = 319,
+ U8VEC3 = 320,
+ U8VEC4 = 321,
+ VEC2 = 322,
+ VEC3 = 323,
+ VEC4 = 324,
+ MAT2 = 325,
+ MAT3 = 326,
+ MAT4 = 327,
+ CENTROID = 328,
+ IN = 329,
+ OUT = 330,
+ INOUT = 331,
+ UNIFORM = 332,
+ PATCH = 333,
+ SAMPLE = 334,
+ BUFFER = 335,
+ SHARED = 336,
+ NONUNIFORM = 337,
+ PAYLOADNV = 338,
+ PAYLOADINNV = 339,
+ HITATTRNV = 340,
+ CALLDATANV = 341,
+ CALLDATAINNV = 342,
+ COHERENT = 343,
+ VOLATILE = 344,
+ RESTRICT = 345,
+ READONLY = 346,
+ WRITEONLY = 347,
+ DEVICECOHERENT = 348,
+ QUEUEFAMILYCOHERENT = 349,
+ WORKGROUPCOHERENT = 350,
+ SUBGROUPCOHERENT = 351,
+ NONPRIVATE = 352,
+ DVEC2 = 353,
+ DVEC3 = 354,
+ DVEC4 = 355,
+ DMAT2 = 356,
+ DMAT3 = 357,
+ DMAT4 = 358,
+ F16VEC2 = 359,
+ F16VEC3 = 360,
+ F16VEC4 = 361,
+ F16MAT2 = 362,
+ F16MAT3 = 363,
+ F16MAT4 = 364,
+ F32VEC2 = 365,
+ F32VEC3 = 366,
+ F32VEC4 = 367,
+ F32MAT2 = 368,
+ F32MAT3 = 369,
+ F32MAT4 = 370,
+ F64VEC2 = 371,
+ F64VEC3 = 372,
+ F64VEC4 = 373,
+ F64MAT2 = 374,
+ F64MAT3 = 375,
+ F64MAT4 = 376,
+ NOPERSPECTIVE = 377,
+ FLAT = 378,
+ SMOOTH = 379,
+ LAYOUT = 380,
+ EXPLICITINTERPAMD = 381,
+ PERVERTEXNV = 382,
+ PERPRIMITIVENV = 383,
+ PERVIEWNV = 384,
+ PERTASKNV = 385,
+ MAT2X2 = 386,
+ MAT2X3 = 387,
+ MAT2X4 = 388,
+ MAT3X2 = 389,
+ MAT3X3 = 390,
+ MAT3X4 = 391,
+ MAT4X2 = 392,
+ MAT4X3 = 393,
+ MAT4X4 = 394,
+ DMAT2X2 = 395,
+ DMAT2X3 = 396,
+ DMAT2X4 = 397,
+ DMAT3X2 = 398,
+ DMAT3X3 = 399,
+ DMAT3X4 = 400,
+ DMAT4X2 = 401,
+ DMAT4X3 = 402,
+ DMAT4X4 = 403,
+ F16MAT2X2 = 404,
+ F16MAT2X3 = 405,
+ F16MAT2X4 = 406,
+ F16MAT3X2 = 407,
+ F16MAT3X3 = 408,
+ F16MAT3X4 = 409,
+ F16MAT4X2 = 410,
+ F16MAT4X3 = 411,
+ F16MAT4X4 = 412,
+ F32MAT2X2 = 413,
+ F32MAT2X3 = 414,
+ F32MAT2X4 = 415,
+ F32MAT3X2 = 416,
+ F32MAT3X3 = 417,
+ F32MAT3X4 = 418,
+ F32MAT4X2 = 419,
+ F32MAT4X3 = 420,
+ F32MAT4X4 = 421,
+ F64MAT2X2 = 422,
+ F64MAT2X3 = 423,
+ F64MAT2X4 = 424,
+ F64MAT3X2 = 425,
+ F64MAT3X3 = 426,
+ F64MAT3X4 = 427,
+ F64MAT4X2 = 428,
+ F64MAT4X3 = 429,
+ F64MAT4X4 = 430,
+ ATOMIC_UINT = 431,
+ ACCSTRUCTNV = 432,
+ FCOOPMATNV = 433,
+ SAMPLER1D = 434,
+ SAMPLER2D = 435,
+ SAMPLER3D = 436,
+ SAMPLERCUBE = 437,
+ SAMPLER1DSHADOW = 438,
+ SAMPLER2DSHADOW = 439,
+ SAMPLERCUBESHADOW = 440,
+ SAMPLER1DARRAY = 441,
+ SAMPLER2DARRAY = 442,
+ SAMPLER1DARRAYSHADOW = 443,
+ SAMPLER2DARRAYSHADOW = 444,
+ ISAMPLER1D = 445,
+ ISAMPLER2D = 446,
+ ISAMPLER3D = 447,
+ ISAMPLERCUBE = 448,
+ ISAMPLER1DARRAY = 449,
+ ISAMPLER2DARRAY = 450,
+ USAMPLER1D = 451,
+ USAMPLER2D = 452,
+ USAMPLER3D = 453,
+ USAMPLERCUBE = 454,
+ USAMPLER1DARRAY = 455,
+ USAMPLER2DARRAY = 456,
+ SAMPLER2DRECT = 457,
+ SAMPLER2DRECTSHADOW = 458,
+ ISAMPLER2DRECT = 459,
+ USAMPLER2DRECT = 460,
+ SAMPLERBUFFER = 461,
+ ISAMPLERBUFFER = 462,
+ USAMPLERBUFFER = 463,
+ SAMPLERCUBEARRAY = 464,
+ SAMPLERCUBEARRAYSHADOW = 465,
+ ISAMPLERCUBEARRAY = 466,
+ USAMPLERCUBEARRAY = 467,
+ SAMPLER2DMS = 468,
+ ISAMPLER2DMS = 469,
+ USAMPLER2DMS = 470,
+ SAMPLER2DMSARRAY = 471,
+ ISAMPLER2DMSARRAY = 472,
+ USAMPLER2DMSARRAY = 473,
+ SAMPLEREXTERNALOES = 474,
+ SAMPLEREXTERNAL2DY2YEXT = 475,
+ F16SAMPLER1D = 476,
+ F16SAMPLER2D = 477,
+ F16SAMPLER3D = 478,
+ F16SAMPLER2DRECT = 479,
+ F16SAMPLERCUBE = 480,
+ F16SAMPLER1DARRAY = 481,
+ F16SAMPLER2DARRAY = 482,
+ F16SAMPLERCUBEARRAY = 483,
+ F16SAMPLERBUFFER = 484,
+ F16SAMPLER2DMS = 485,
+ F16SAMPLER2DMSARRAY = 486,
+ F16SAMPLER1DSHADOW = 487,
+ F16SAMPLER2DSHADOW = 488,
+ F16SAMPLER1DARRAYSHADOW = 489,
+ F16SAMPLER2DARRAYSHADOW = 490,
+ F16SAMPLER2DRECTSHADOW = 491,
+ F16SAMPLERCUBESHADOW = 492,
+ F16SAMPLERCUBEARRAYSHADOW = 493,
+ SAMPLER = 494,
+ SAMPLERSHADOW = 495,
+ TEXTURE1D = 496,
+ TEXTURE2D = 497,
+ TEXTURE3D = 498,
+ TEXTURECUBE = 499,
+ TEXTURE1DARRAY = 500,
+ TEXTURE2DARRAY = 501,
+ ITEXTURE1D = 502,
+ ITEXTURE2D = 503,
+ ITEXTURE3D = 504,
+ ITEXTURECUBE = 505,
+ ITEXTURE1DARRAY = 506,
+ ITEXTURE2DARRAY = 507,
+ UTEXTURE1D = 508,
+ UTEXTURE2D = 509,
+ UTEXTURE3D = 510,
+ UTEXTURECUBE = 511,
+ UTEXTURE1DARRAY = 512,
+ UTEXTURE2DARRAY = 513,
+ TEXTURE2DRECT = 514,
+ ITEXTURE2DRECT = 515,
+ UTEXTURE2DRECT = 516,
+ TEXTUREBUFFER = 517,
+ ITEXTUREBUFFER = 518,
+ UTEXTUREBUFFER = 519,
+ TEXTURECUBEARRAY = 520,
+ ITEXTURECUBEARRAY = 521,
+ UTEXTURECUBEARRAY = 522,
+ TEXTURE2DMS = 523,
+ ITEXTURE2DMS = 524,
+ UTEXTURE2DMS = 525,
+ TEXTURE2DMSARRAY = 526,
+ ITEXTURE2DMSARRAY = 527,
+ UTEXTURE2DMSARRAY = 528,
+ F16TEXTURE1D = 529,
+ F16TEXTURE2D = 530,
+ F16TEXTURE3D = 531,
+ F16TEXTURE2DRECT = 532,
+ F16TEXTURECUBE = 533,
+ F16TEXTURE1DARRAY = 534,
+ F16TEXTURE2DARRAY = 535,
+ F16TEXTURECUBEARRAY = 536,
+ F16TEXTUREBUFFER = 537,
+ F16TEXTURE2DMS = 538,
+ F16TEXTURE2DMSARRAY = 539,
+ SUBPASSINPUT = 540,
+ SUBPASSINPUTMS = 541,
+ ISUBPASSINPUT = 542,
+ ISUBPASSINPUTMS = 543,
+ USUBPASSINPUT = 544,
+ USUBPASSINPUTMS = 545,
+ F16SUBPASSINPUT = 546,
+ F16SUBPASSINPUTMS = 547,
+ IMAGE1D = 548,
+ IIMAGE1D = 549,
+ UIMAGE1D = 550,
+ IMAGE2D = 551,
+ IIMAGE2D = 552,
+ UIMAGE2D = 553,
+ IMAGE3D = 554,
+ IIMAGE3D = 555,
+ UIMAGE3D = 556,
+ IMAGE2DRECT = 557,
+ IIMAGE2DRECT = 558,
+ UIMAGE2DRECT = 559,
+ IMAGECUBE = 560,
+ IIMAGECUBE = 561,
+ UIMAGECUBE = 562,
+ IMAGEBUFFER = 563,
+ IIMAGEBUFFER = 564,
+ UIMAGEBUFFER = 565,
+ IMAGE1DARRAY = 566,
+ IIMAGE1DARRAY = 567,
+ UIMAGE1DARRAY = 568,
+ IMAGE2DARRAY = 569,
+ IIMAGE2DARRAY = 570,
+ UIMAGE2DARRAY = 571,
+ IMAGECUBEARRAY = 572,
+ IIMAGECUBEARRAY = 573,
+ UIMAGECUBEARRAY = 574,
+ IMAGE2DMS = 575,
+ IIMAGE2DMS = 576,
+ UIMAGE2DMS = 577,
+ IMAGE2DMSARRAY = 578,
+ IIMAGE2DMSARRAY = 579,
+ UIMAGE2DMSARRAY = 580,
+ F16IMAGE1D = 581,
+ F16IMAGE2D = 582,
+ F16IMAGE3D = 583,
+ F16IMAGE2DRECT = 584,
+ F16IMAGECUBE = 585,
+ F16IMAGE1DARRAY = 586,
+ F16IMAGE2DARRAY = 587,
+ F16IMAGECUBEARRAY = 588,
+ F16IMAGEBUFFER = 589,
+ F16IMAGE2DMS = 590,
+ F16IMAGE2DMSARRAY = 591,
+ STRUCT = 592,
+ VOID = 593,
+ WHILE = 594,
+ IDENTIFIER = 595,
+ TYPE_NAME = 596,
+ FLOATCONSTANT = 597,
+ DOUBLECONSTANT = 598,
+ INT16CONSTANT = 599,
+ UINT16CONSTANT = 600,
+ INT32CONSTANT = 601,
+ UINT32CONSTANT = 602,
+ INTCONSTANT = 603,
+ UINTCONSTANT = 604,
+ INT64CONSTANT = 605,
+ UINT64CONSTANT = 606,
+ BOOLCONSTANT = 607,
+ FLOAT16CONSTANT = 608,
+ LEFT_OP = 609,
+ RIGHT_OP = 610,
+ INC_OP = 611,
+ DEC_OP = 612,
+ LE_OP = 613,
+ GE_OP = 614,
+ EQ_OP = 615,
+ NE_OP = 616,
+ AND_OP = 617,
+ OR_OP = 618,
+ XOR_OP = 619,
+ MUL_ASSIGN = 620,
+ DIV_ASSIGN = 621,
+ ADD_ASSIGN = 622,
+ MOD_ASSIGN = 623,
+ LEFT_ASSIGN = 624,
+ RIGHT_ASSIGN = 625,
+ AND_ASSIGN = 626,
+ XOR_ASSIGN = 627,
+ OR_ASSIGN = 628,
+ SUB_ASSIGN = 629,
+ LEFT_PAREN = 630,
+ RIGHT_PAREN = 631,
+ LEFT_BRACKET = 632,
+ RIGHT_BRACKET = 633,
+ LEFT_BRACE = 634,
+ RIGHT_BRACE = 635,
+ DOT = 636,
+ COMMA = 637,
+ COLON = 638,
+ EQUAL = 639,
+ SEMICOLON = 640,
+ BANG = 641,
+ DASH = 642,
+ TILDE = 643,
+ PLUS = 644,
+ STAR = 645,
+ SLASH = 646,
+ PERCENT = 647,
+ LEFT_ANGLE = 648,
+ RIGHT_ANGLE = 649,
+ VERTICAL_BAR = 650,
+ CARET = 651,
+ AMPERSAND = 652,
+ QUESTION = 653,
+ INVARIANT = 654,
+ PRECISE = 655,
+ HIGH_PRECISION = 656,
+ MEDIUM_PRECISION = 657,
+ LOW_PRECISION = 658,
+ PRECISION = 659,
+ PACKED = 660,
+ RESOURCE = 661,
+ SUPERP = 662
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 71 "MachineIndependent/glslang.y" /* yacc.c:355 */
+
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+
+#line 576 "MachineIndependent/glslang_tab.cpp" /* yacc.c:355 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+
+int yyparse (glslang::TParseContext* pParseContext);
+
+#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */
+
+/* Copy the second part of user declarations. */
+#line 107 "MachineIndependent/glslang.y" /* yacc.c:358 */
+
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4065)
+ #pragma warning(disable : 4127)
+ #pragma warning(disable : 4244)
+#endif
+
+#define parseContext (*pParseContext)
+#define yyerror(context, msg) context->parserError(msg)
+
+extern int yylex(YYSTYPE*, TParseContext&);
+
+
+#line 607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:358 */
+
+#ifdef short
+# undef short
+#endif
+
+#ifdef YYTYPE_UINT8
+typedef YYTYPE_UINT8 yytype_uint8;
+#else
+typedef unsigned char yytype_uint8;
+#endif
+
+#ifdef YYTYPE_INT8
+typedef YYTYPE_INT8 yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef YYTYPE_UINT16
+typedef YYTYPE_UINT16 yytype_uint16;
+#else
+typedef unsigned short int yytype_uint16;
+#endif
+
+#ifdef YYTYPE_INT16
+typedef YYTYPE_INT16 yytype_int16;
+#else
+typedef short int yytype_int16;
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif ! defined YYSIZE_T
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned int
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__ \
+ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
+ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+# define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+#if !defined _Noreturn \
+ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
+# if defined _MSC_VER && 1200 <= _MSC_VER
+# define _Noreturn __declspec (noreturn)
+# else
+# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yytype_int16 yyss_alloc;
+ YYSTYPE yyvs_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+ + YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYSIZE_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 384
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 9348
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 408
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 110
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 578
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 722
+
+/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
+ by yylex, with out-of-bounds checking. */
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 662
+
+#define YYTRANSLATE(YYX) \
+ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, without out-of-bounds checking. */
+static const yytype_uint16 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
+ 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_uint16 yyrline[] =
+{
+ 0, 302, 302, 308, 311, 315, 319, 322, 326, 330,
+ 334, 338, 342, 345, 349, 353, 356, 364, 367, 370,
+ 373, 376, 381, 389, 396, 403, 409, 413, 420, 423,
+ 429, 436, 446, 454, 459, 486, 494, 500, 504, 508,
+ 528, 529, 530, 531, 537, 538, 543, 548, 557, 558,
+ 563, 571, 572, 578, 587, 588, 593, 598, 603, 611,
+ 612, 621, 633, 634, 643, 644, 653, 654, 663, 664,
+ 672, 673, 681, 682, 690, 691, 691, 709, 710, 726,
+ 730, 734, 738, 743, 747, 751, 755, 759, 763, 767,
+ 774, 777, 788, 795, 800, 805, 813, 817, 821, 825,
+ 830, 835, 844, 844, 855, 859, 866, 873, 876, 883,
+ 891, 911, 934, 949, 974, 985, 995, 1005, 1015, 1024,
+ 1027, 1031, 1035, 1040, 1048, 1053, 1058, 1063, 1068, 1077,
+ 1088, 1115, 1124, 1131, 1138, 1149, 1158, 1168, 1180, 1189,
+ 1201, 1207, 1210, 1217, 1221, 1225, 1233, 1242, 1245, 1256,
+ 1259, 1262, 1266, 1270, 1274, 1278, 1284, 1288, 1300, 1314,
+ 1319, 1325, 1331, 1338, 1344, 1349, 1354, 1359, 1369, 1379,
+ 1389, 1399, 1408, 1420, 1424, 1429, 1434, 1439, 1444, 1449,
+ 1453, 1457, 1461, 1465, 1471, 1480, 1487, 1490, 1498, 1503,
+ 1513, 1518, 1526, 1530, 1540, 1543, 1549, 1555, 1562, 1572,
+ 1576, 1580, 1585, 1590, 1595, 1600, 1604, 1609, 1614, 1619,
+ 1624, 1629, 1634, 1639, 1644, 1649, 1653, 1658, 1663, 1668,
+ 1674, 1680, 1686, 1692, 1698, 1704, 1710, 1716, 1722, 1728,
+ 1734, 1740, 1745, 1750, 1755, 1760, 1765, 1770, 1776, 1782,
+ 1788, 1794, 1800, 1806, 1812, 1818, 1824, 1830, 1836, 1842,
+ 1848, 1854, 1860, 1866, 1872, 1878, 1884, 1890, 1896, 1902,
+ 1908, 1914, 1920, 1926, 1932, 1937, 1942, 1947, 1952, 1957,
+ 1962, 1967, 1972, 1977, 1982, 1987, 1992, 1998, 2004, 2010,
+ 2016, 2022, 2028, 2034, 2040, 2046, 2052, 2058, 2064, 2070,
+ 2076, 2082, 2088, 2094, 2100, 2106, 2112, 2118, 2124, 2130,
+ 2136, 2142, 2148, 2154, 2160, 2166, 2172, 2178, 2184, 2190,
+ 2196, 2202, 2208, 2214, 2220, 2226, 2232, 2238, 2244, 2250,
+ 2256, 2262, 2268, 2274, 2280, 2286, 2291, 2296, 2301, 2306,
+ 2311, 2316, 2321, 2326, 2331, 2336, 2341, 2346, 2351, 2356,
+ 2364, 2372, 2380, 2388, 2396, 2404, 2412, 2420, 2428, 2436,
+ 2444, 2452, 2460, 2465, 2470, 2475, 2480, 2485, 2490, 2495,
+ 2500, 2505, 2510, 2515, 2520, 2525, 2530, 2535, 2540, 2548,
+ 2556, 2561, 2566, 2571, 2579, 2584, 2589, 2594, 2602, 2607,
+ 2612, 2617, 2625, 2630, 2635, 2640, 2645, 2650, 2658, 2663,
+ 2671, 2676, 2684, 2689, 2697, 2702, 2710, 2715, 2723, 2728,
+ 2736, 2741, 2746, 2751, 2756, 2761, 2766, 2771, 2776, 2781,
+ 2786, 2791, 2796, 2801, 2806, 2811, 2819, 2824, 2829, 2834,
+ 2842, 2847, 2852, 2857, 2865, 2870, 2875, 2880, 2888, 2893,
+ 2898, 2903, 2911, 2916, 2921, 2926, 2934, 2939, 2944, 2949,
+ 2957, 2962, 2967, 2972, 2980, 2985, 2990, 2995, 3003, 3008,
+ 3013, 3018, 3026, 3031, 3036, 3041, 3049, 3054, 3059, 3064,
+ 3072, 3077, 3082, 3087, 3095, 3100, 3105, 3110, 3118, 3123,
+ 3128, 3133, 3141, 3146, 3151, 3157, 3163, 3169, 3175, 3184,
+ 3193, 3199, 3205, 3211, 3217, 3223, 3228, 3244, 3249, 3254,
+ 3262, 3262, 3273, 3273, 3283, 3286, 3299, 3321, 3348, 3352,
+ 3358, 3363, 3374, 3377, 3383, 3392, 3395, 3401, 3405, 3406,
+ 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3422, 3423, 3427,
+ 3423, 3439, 3440, 3444, 3444, 3451, 3451, 3465, 3468, 3476,
+ 3484, 3495, 3496, 3500, 3503, 3509, 3516, 3520, 3528, 3532,
+ 3545, 3548, 3554, 3554, 3574, 3577, 3583, 3595, 3607, 3610,
+ 3616, 3616, 3631, 3631, 3647, 3647, 3668, 3671, 3677, 3680,
+ 3686, 3690, 3697, 3702, 3707, 3714, 3717, 3726, 3730, 3739,
+ 3742, 3745, 3753, 3753, 3775, 3781, 3784, 3789, 3792
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 1
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "ATTRIBUTE", "VARYING", "FLOAT16_T",
+ "FLOAT", "FLOAT32_T", "DOUBLE", "FLOAT64_T", "CONST", "BOOL", "INT",
+ "UINT", "INT64_T", "UINT64_T", "INT32_T", "UINT32_T", "INT16_T",
+ "UINT16_T", "INT8_T", "UINT8_T", "BREAK", "CONTINUE", "DO", "ELSE",
+ "FOR", "IF", "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT",
+ "SUBROUTINE", "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4",
+ "UVEC2", "UVEC3", "UVEC4", "I64VEC2", "I64VEC3", "I64VEC4", "U64VEC2",
+ "U64VEC3", "U64VEC4", "I32VEC2", "I32VEC3", "I32VEC4", "U32VEC2",
+ "U32VEC3", "U32VEC4", "I16VEC2", "I16VEC3", "I16VEC4", "U16VEC2",
+ "U16VEC3", "U16VEC4", "I8VEC2", "I8VEC3", "I8VEC4", "U8VEC2", "U8VEC3",
+ "U8VEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", "MAT4", "CENTROID",
+ "IN", "OUT", "INOUT", "UNIFORM", "PATCH", "SAMPLE", "BUFFER", "SHARED",
+ "NONUNIFORM", "PAYLOADNV", "PAYLOADINNV", "HITATTRNV", "CALLDATANV",
+ "CALLDATAINNV", "COHERENT", "VOLATILE", "RESTRICT", "READONLY",
+ "WRITEONLY", "DEVICECOHERENT", "QUEUEFAMILYCOHERENT",
+ "WORKGROUPCOHERENT", "SUBGROUPCOHERENT", "NONPRIVATE", "DVEC2", "DVEC3",
+ "DVEC4", "DMAT2", "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4",
+ "F16MAT2", "F16MAT3", "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4",
+ "F32MAT2", "F32MAT3", "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4",
+ "F64MAT2", "F64MAT3", "F64MAT4", "NOPERSPECTIVE", "FLAT", "SMOOTH",
+ "LAYOUT", "EXPLICITINTERPAMD", "PERVERTEXNV", "PERPRIMITIVENV",
+ "PERVIEWNV", "PERTASKNV", "MAT2X2", "MAT2X3", "MAT2X4", "MAT3X2",
+ "MAT3X3", "MAT3X4", "MAT4X2", "MAT4X3", "MAT4X4", "DMAT2X2", "DMAT2X3",
+ "DMAT2X4", "DMAT3X2", "DMAT3X3", "DMAT3X4", "DMAT4X2", "DMAT4X3",
+ "DMAT4X4", "F16MAT2X2", "F16MAT2X3", "F16MAT2X4", "F16MAT3X2",
+ "F16MAT3X3", "F16MAT3X4", "F16MAT4X2", "F16MAT4X3", "F16MAT4X4",
+ "F32MAT2X2", "F32MAT2X3", "F32MAT2X4", "F32MAT3X2", "F32MAT3X3",
+ "F32MAT3X4", "F32MAT4X2", "F32MAT4X3", "F32MAT4X4", "F64MAT2X2",
+ "F64MAT2X3", "F64MAT2X4", "F64MAT3X2", "F64MAT3X3", "F64MAT3X4",
+ "F64MAT4X2", "F64MAT4X3", "F64MAT4X4", "ATOMIC_UINT", "ACCSTRUCTNV",
+ "FCOOPMATNV", "SAMPLER1D", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE",
+ "SAMPLER1DSHADOW", "SAMPLER2DSHADOW", "SAMPLERCUBESHADOW",
+ "SAMPLER1DARRAY", "SAMPLER2DARRAY", "SAMPLER1DARRAYSHADOW",
+ "SAMPLER2DARRAYSHADOW", "ISAMPLER1D", "ISAMPLER2D", "ISAMPLER3D",
+ "ISAMPLERCUBE", "ISAMPLER1DARRAY", "ISAMPLER2DARRAY", "USAMPLER1D",
+ "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", "USAMPLER1DARRAY",
+ "USAMPLER2DARRAY", "SAMPLER2DRECT", "SAMPLER2DRECTSHADOW",
+ "ISAMPLER2DRECT", "USAMPLER2DRECT", "SAMPLERBUFFER", "ISAMPLERBUFFER",
+ "USAMPLERBUFFER", "SAMPLERCUBEARRAY", "SAMPLERCUBEARRAYSHADOW",
+ "ISAMPLERCUBEARRAY", "USAMPLERCUBEARRAY", "SAMPLER2DMS", "ISAMPLER2DMS",
+ "USAMPLER2DMS", "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY",
+ "USAMPLER2DMSARRAY", "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT",
+ "F16SAMPLER1D", "F16SAMPLER2D", "F16SAMPLER3D", "F16SAMPLER2DRECT",
+ "F16SAMPLERCUBE", "F16SAMPLER1DARRAY", "F16SAMPLER2DARRAY",
+ "F16SAMPLERCUBEARRAY", "F16SAMPLERBUFFER", "F16SAMPLER2DMS",
+ "F16SAMPLER2DMSARRAY", "F16SAMPLER1DSHADOW", "F16SAMPLER2DSHADOW",
+ "F16SAMPLER1DARRAYSHADOW", "F16SAMPLER2DARRAYSHADOW",
+ "F16SAMPLER2DRECTSHADOW", "F16SAMPLERCUBESHADOW",
+ "F16SAMPLERCUBEARRAYSHADOW", "SAMPLER", "SAMPLERSHADOW", "TEXTURE1D",
+ "TEXTURE2D", "TEXTURE3D", "TEXTURECUBE", "TEXTURE1DARRAY",
+ "TEXTURE2DARRAY", "ITEXTURE1D", "ITEXTURE2D", "ITEXTURE3D",
+ "ITEXTURECUBE", "ITEXTURE1DARRAY", "ITEXTURE2DARRAY", "UTEXTURE1D",
+ "UTEXTURE2D", "UTEXTURE3D", "UTEXTURECUBE", "UTEXTURE1DARRAY",
+ "UTEXTURE2DARRAY", "TEXTURE2DRECT", "ITEXTURE2DRECT", "UTEXTURE2DRECT",
+ "TEXTUREBUFFER", "ITEXTUREBUFFER", "UTEXTUREBUFFER", "TEXTURECUBEARRAY",
+ "ITEXTURECUBEARRAY", "UTEXTURECUBEARRAY", "TEXTURE2DMS", "ITEXTURE2DMS",
+ "UTEXTURE2DMS", "TEXTURE2DMSARRAY", "ITEXTURE2DMSARRAY",
+ "UTEXTURE2DMSARRAY", "F16TEXTURE1D", "F16TEXTURE2D", "F16TEXTURE3D",
+ "F16TEXTURE2DRECT", "F16TEXTURECUBE", "F16TEXTURE1DARRAY",
+ "F16TEXTURE2DARRAY", "F16TEXTURECUBEARRAY", "F16TEXTUREBUFFER",
+ "F16TEXTURE2DMS", "F16TEXTURE2DMSARRAY", "SUBPASSINPUT",
+ "SUBPASSINPUTMS", "ISUBPASSINPUT", "ISUBPASSINPUTMS", "USUBPASSINPUT",
+ "USUBPASSINPUTMS", "F16SUBPASSINPUT", "F16SUBPASSINPUTMS", "IMAGE1D",
+ "IIMAGE1D", "UIMAGE1D", "IMAGE2D", "IIMAGE2D", "UIMAGE2D", "IMAGE3D",
+ "IIMAGE3D", "UIMAGE3D", "IMAGE2DRECT", "IIMAGE2DRECT", "UIMAGE2DRECT",
+ "IMAGECUBE", "IIMAGECUBE", "UIMAGECUBE", "IMAGEBUFFER", "IIMAGEBUFFER",
+ "UIMAGEBUFFER", "IMAGE1DARRAY", "IIMAGE1DARRAY", "UIMAGE1DARRAY",
+ "IMAGE2DARRAY", "IIMAGE2DARRAY", "UIMAGE2DARRAY", "IMAGECUBEARRAY",
+ "IIMAGECUBEARRAY", "UIMAGECUBEARRAY", "IMAGE2DMS", "IIMAGE2DMS",
+ "UIMAGE2DMS", "IMAGE2DMSARRAY", "IIMAGE2DMSARRAY", "UIMAGE2DMSARRAY",
+ "F16IMAGE1D", "F16IMAGE2D", "F16IMAGE3D", "F16IMAGE2DRECT",
+ "F16IMAGECUBE", "F16IMAGE1DARRAY", "F16IMAGE2DARRAY",
+ "F16IMAGECUBEARRAY", "F16IMAGEBUFFER", "F16IMAGE2DMS",
+ "F16IMAGE2DMSARRAY", "STRUCT", "VOID", "WHILE", "IDENTIFIER",
+ "TYPE_NAME", "FLOATCONSTANT", "DOUBLECONSTANT", "INT16CONSTANT",
+ "UINT16CONSTANT", "INT32CONSTANT", "UINT32CONSTANT", "INTCONSTANT",
+ "UINTCONSTANT", "INT64CONSTANT", "UINT64CONSTANT", "BOOLCONSTANT",
+ "FLOAT16CONSTANT", "LEFT_OP", "RIGHT_OP", "INC_OP", "DEC_OP", "LE_OP",
+ "GE_OP", "EQ_OP", "NE_OP", "AND_OP", "OR_OP", "XOR_OP", "MUL_ASSIGN",
+ "DIV_ASSIGN", "ADD_ASSIGN", "MOD_ASSIGN", "LEFT_ASSIGN", "RIGHT_ASSIGN",
+ "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", "SUB_ASSIGN", "LEFT_PAREN",
+ "RIGHT_PAREN", "LEFT_BRACKET", "RIGHT_BRACKET", "LEFT_BRACE",
+ "RIGHT_BRACE", "DOT", "COMMA", "COLON", "EQUAL", "SEMICOLON", "BANG",
+ "DASH", "TILDE", "PLUS", "STAR", "SLASH", "PERCENT", "LEFT_ANGLE",
+ "RIGHT_ANGLE", "VERTICAL_BAR", "CARET", "AMPERSAND", "QUESTION",
+ "INVARIANT", "PRECISE", "HIGH_PRECISION", "MEDIUM_PRECISION",
+ "LOW_PRECISION", "PRECISION", "PACKED", "RESOURCE", "SUPERP", "$accept",
+ "variable_identifier", "primary_expression", "postfix_expression",
+ "integer_expression", "function_call", "function_call_or_method",
+ "function_call_generic", "function_call_header_no_parameters",
+ "function_call_header_with_parameters", "function_call_header",
+ "function_identifier", "unary_expression", "unary_operator",
+ "multiplicative_expression", "additive_expression", "shift_expression",
+ "relational_expression", "equality_expression", "and_expression",
+ "exclusive_or_expression", "inclusive_or_expression",
+ "logical_and_expression", "logical_xor_expression",
+ "logical_or_expression", "conditional_expression", "$@1",
+ "assignment_expression", "assignment_operator", "expression",
+ "constant_expression", "declaration", "block_structure", "$@2",
+ "identifier_list", "function_prototype", "function_declarator",
+ "function_header_with_parameters", "function_header",
+ "parameter_declarator", "parameter_declaration",
+ "parameter_type_specifier", "init_declarator_list", "single_declaration",
+ "fully_specified_type", "invariant_qualifier", "interpolation_qualifier",
+ "layout_qualifier", "layout_qualifier_id_list", "layout_qualifier_id",
+ "precise_qualifier", "type_qualifier", "single_type_qualifier",
+ "storage_qualifier", "non_uniform_qualifier", "type_name_list",
+ "type_specifier", "array_specifier", "type_parameter_specifier_opt",
+ "type_parameter_specifier", "type_parameter_specifier_list",
+ "type_specifier_nonarray", "precision_qualifier", "struct_specifier",
+ "$@3", "$@4", "struct_declaration_list", "struct_declaration",
+ "struct_declarator_list", "struct_declarator", "initializer",
+ "initializer_list", "declaration_statement", "statement",
+ "simple_statement", "compound_statement", "$@5", "$@6",
+ "statement_no_new_scope", "statement_scoped", "$@7", "$@8",
+ "compound_statement_no_new_scope", "statement_list",
+ "expression_statement", "selection_statement",
+ "selection_statement_nonattributed", "selection_rest_statement",
+ "condition", "switch_statement", "switch_statement_nonattributed", "$@9",
+ "switch_statement_list", "case_label", "iteration_statement",
+ "iteration_statement_nonattributed", "$@10", "$@11", "$@12",
+ "for_init_statement", "conditionopt", "for_rest_statement",
+ "jump_statement", "translation_unit", "external_declaration",
+ "function_definition", "$@13", "attribute", "attribute_list",
+ "single_attribute", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_uint16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
+ 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414,
+ 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 428, 429, 430, 431, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452, 453, 454,
+ 455, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 468, 469, 470, 471, 472, 473, 474,
+ 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504,
+ 505, 506, 507, 508, 509, 510, 511, 512, 513, 514,
+ 515, 516, 517, 518, 519, 520, 521, 522, 523, 524,
+ 525, 526, 527, 528, 529, 530, 531, 532, 533, 534,
+ 535, 536, 537, 538, 539, 540, 541, 542, 543, 544,
+ 545, 546, 547, 548, 549, 550, 551, 552, 553, 554,
+ 555, 556, 557, 558, 559, 560, 561, 562, 563, 564,
+ 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 614,
+ 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632, 633, 634,
+ 635, 636, 637, 638, 639, 640, 641, 642, 643, 644,
+ 645, 646, 647, 648, 649, 650, 651, 652, 653, 654,
+ 655, 656, 657, 658, 659, 660, 661, 662
+};
+# endif
+
+#define YYPACT_NINF -659
+
+#define yypact_value_is_default(Yystate) \
+ (!!((Yystate) == (-659)))
+
+#define YYTABLE_NINF -524
+
+#define yytable_value_is_error(Yytable_value) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int16 yypact[] =
+{
+ 3535, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -331, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -324, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -319, -659, -659, -659, -659, -659,
+ -659, -659, -659, -256, -659, -314, -351, -309, -306, 5942,
+ -257, -659, -217, -659, -659, -659, -659, 4338, -659, -659,
+ -659, -659, -241, -659, -659, 721, -659, -659, -204, -71,
+ -219, -659, 9007, -349, -659, -659, -215, -659, 5942, -659,
+ -659, -659, 5942, -178, -172, -659, -337, -267, -659, -659,
+ -659, 8237, -207, -659, -659, -659, -659, -341, -659, -211,
+ -330, -659, -659, 5942, -210, 6697, -659, -322, 1123, -659,
+ -659, -659, -659, -207, -328, -659, 7082, -304, -659, -163,
+ -659, -252, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -659, -659, -659, 8237, 8237, 8237, -659, -659,
+ -659, -659, -659, -659, -303, -659, -659, -659, -196, -299,
+ 8622, -194, -659, 8237, -659, -659, -355, -195, -659, -157,
+ 8237, -659, -71, 5942, 5942, -155, 4739, -659, -659, -659,
+ -659, -242, -236, -249, -335, -206, -191, -187, -209, -149,
+ -150, -333, -162, 7467, -659, -170, -168, -659, -154, -153,
+ -167, 7852, -152, 8237, -159, -148, -151, -160, -659, -659,
+ -274, -659, -659, -251, -659, -351, -147, -144, -659, -659,
+ -659, -659, 1525, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -19, -195, 7082, -302, 7082, -659, -659, 7082,
+ 5942, -659, -115, -659, -659, -659, -292, -659, -659, 8237,
+ -108, -659, -659, 8237, -143, -659, -659, -659, 8237, -659,
+ -659, -659, -659, -659, 5140, -155, -207, -250, -659, -659,
+ -659, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237,
+ 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237, 8237,
+ -659, -659, -659, -142, -659, -659, 1927, -659, 8237, -659,
+ -659, -245, 8237, -226, -659, -659, -106, -659, 1927, -659,
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ 8237, 8237, -659, -659, -659, -659, -659, -659, -659, 7082,
+ -659, -238, -659, 5541, -659, -659, -141, -140, -659, -659,
+ -659, -659, -244, -195, -155, -659, -659, -659, -659, -242,
+ -242, -236, -236, -249, -249, -249, -249, -335, -335, -206,
+ -191, -187, -209, -149, -150, 8237, -659, -104, 3133, -263,
+ -659, -260, -659, 3937, -136, -297, -659, 1927, -659, -659,
+ -659, -659, 6312, -659, -659, -659, -659, -224, -135, -659,
+ -659, 3937, -138, -659, -140, -97, 5942, -132, 8237, -133,
+ -106, -134, -659, -659, 8237, 8237, -659, -137, -129, 224,
+ -128, 2731, -659, -127, -131, 2329, -126, -659, -659, -659,
+ -659, -255, 8237, 2329, -138, -659, -659, 1927, 7082, -659,
+ -659, -659, -659, -130, -140, -659, -659, 1927, -123, -659,
+ -659, -659
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint16 yydefact[] =
+{
+ 0, 157, 158, 202, 200, 203, 201, 204, 156, 215,
+ 205, 206, 213, 214, 211, 212, 209, 210, 207, 208,
+ 183, 231, 232, 233, 234, 235, 236, 249, 250, 251,
+ 246, 247, 248, 261, 262, 263, 243, 244, 245, 258,
+ 259, 260, 240, 241, 242, 255, 256, 257, 237, 238,
+ 239, 252, 253, 254, 216, 217, 218, 264, 265, 266,
+ 162, 160, 161, 159, 165, 163, 164, 166, 172, 185,
+ 168, 169, 167, 170, 171, 173, 179, 180, 181, 182,
+ 174, 175, 176, 177, 178, 219, 220, 221, 276, 277,
+ 278, 222, 223, 224, 288, 289, 290, 225, 226, 227,
+ 300, 301, 302, 228, 229, 230, 312, 313, 314, 134,
+ 133, 132, 0, 135, 136, 137, 138, 139, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 325, 324, 484, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 352, 353, 354,
+ 355, 356, 357, 359, 360, 361, 362, 363, 364, 366,
+ 367, 370, 371, 372, 374, 375, 337, 338, 358, 365,
+ 376, 378, 379, 380, 382, 383, 474, 475, 339, 340,
+ 341, 368, 342, 346, 347, 350, 373, 377, 381, 343,
+ 344, 348, 349, 369, 345, 351, 384, 385, 386, 388,
+ 390, 392, 394, 396, 400, 401, 402, 403, 404, 405,
+ 407, 408, 409, 410, 411, 412, 414, 416, 417, 418,
+ 420, 421, 398, 406, 413, 422, 424, 425, 426, 428,
+ 429, 387, 389, 391, 415, 393, 395, 397, 399, 419,
+ 423, 427, 476, 477, 480, 481, 482, 483, 478, 479,
+ 430, 432, 433, 434, 436, 437, 438, 440, 441, 442,
+ 444, 445, 446, 448, 449, 450, 452, 453, 454, 456,
+ 457, 458, 460, 461, 462, 464, 465, 466, 468, 469,
+ 470, 472, 473, 431, 435, 439, 443, 447, 455, 459,
+ 463, 451, 467, 471, 0, 199, 486, 571, 131, 146,
+ 487, 488, 489, 0, 570, 0, 572, 0, 108, 107,
+ 0, 119, 124, 153, 152, 150, 154, 0, 147, 149,
+ 155, 129, 195, 151, 485, 0, 567, 569, 0, 0,
+ 0, 492, 0, 0, 96, 93, 0, 106, 0, 115,
+ 109, 117, 0, 118, 0, 94, 125, 0, 99, 148,
+ 130, 0, 188, 194, 1, 568, 186, 0, 145, 143,
+ 0, 141, 490, 0, 0, 0, 97, 0, 0, 573,
+ 110, 114, 116, 112, 120, 111, 0, 126, 102, 0,
+ 100, 0, 2, 12, 13, 10, 11, 4, 5, 6,
+ 7, 8, 9, 15, 14, 0, 0, 0, 42, 41,
+ 43, 40, 3, 17, 36, 19, 24, 25, 0, 0,
+ 29, 0, 197, 0, 35, 33, 0, 189, 184, 0,
+ 0, 140, 0, 0, 0, 0, 0, 494, 95, 190,
+ 44, 48, 51, 54, 59, 62, 64, 66, 68, 70,
+ 72, 74, 0, 0, 98, 0, 0, 552, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 518, 527, 531,
+ 44, 77, 90, 0, 507, 0, 155, 129, 510, 529,
+ 509, 508, 0, 511, 512, 533, 513, 540, 514, 515,
+ 548, 516, 0, 113, 0, 121, 0, 502, 128, 0,
+ 0, 104, 0, 101, 37, 38, 0, 21, 22, 0,
+ 0, 27, 26, 0, 199, 30, 32, 39, 0, 196,
+ 187, 92, 144, 142, 0, 0, 500, 0, 498, 493,
+ 495, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 75, 191, 192, 0, 563, 562, 0, 554, 0, 566,
+ 564, 0, 0, 0, 547, 550, 0, 517, 0, 80,
+ 81, 83, 82, 85, 86, 87, 88, 89, 84, 79,
+ 0, 0, 532, 528, 530, 534, 541, 549, 123, 0,
+ 505, 0, 127, 0, 105, 16, 0, 23, 20, 31,
+ 198, 491, 0, 501, 0, 496, 45, 46, 47, 50,
+ 49, 52, 53, 57, 58, 55, 56, 60, 61, 63,
+ 65, 67, 69, 71, 73, 0, 193, 0, 0, 0,
+ 565, 0, 546, 0, 577, 0, 575, 519, 78, 91,
+ 122, 503, 0, 103, 18, 497, 499, 0, 0, 557,
+ 556, 559, 525, 542, 538, 0, 0, 0, 0, 0,
+ 0, 0, 504, 506, 0, 0, 558, 0, 0, 537,
+ 0, 0, 535, 0, 0, 0, 0, 574, 576, 520,
+ 76, 0, 560, 0, 525, 524, 526, 544, 0, 522,
+ 551, 521, 578, 0, 561, 555, 536, 545, 0, 539,
+ 553, 543
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -659, -659, -659, -659, -659, -659, -659, -659, -659, -659,
+ -659, -659, -364, -659, -389, -385, -457, -384, -310, -307,
+ -305, -308, -301, -298, -659, -386, -659, -390, -659, -415,
+ -418, 1, -659, -659, -659, 2, -659, -659, -659, -110,
+ -105, -107, -659, -659, -628, -659, -659, -659, -659, -188,
+ -659, -336, -343, -659, 6, -659, 0, -334, -659, -659,
+ -659, -659, -67, -659, -659, -659, -431, -437, -277, -350,
+ -501, -659, -375, -488, -658, -414, -659, -659, -428, -426,
+ -659, -659, -87, -568, -368, -659, -231, -659, -388, -659,
+ -230, -659, -659, -659, -659, -228, -659, -659, -659, -659,
+ -659, -659, -659, -659, -70, -659, -659, -659, -659, -394
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 432, 433, 434, 616, 435, 436, 437, 438, 439,
+ 440, 441, 490, 443, 461, 462, 463, 464, 465, 466,
+ 467, 468, 469, 470, 471, 491, 645, 492, 600, 493,
+ 542, 494, 335, 520, 411, 495, 337, 338, 339, 369,
+ 370, 371, 340, 341, 342, 343, 344, 345, 390, 391,
+ 346, 347, 348, 349, 444, 387, 445, 397, 382, 383,
+ 446, 352, 353, 354, 453, 393, 456, 457, 547, 548,
+ 518, 611, 498, 499, 500, 501, 588, 681, 710, 689,
+ 690, 691, 711, 502, 503, 504, 505, 692, 677, 506,
+ 507, 693, 718, 508, 509, 510, 653, 576, 648, 671,
+ 687, 688, 511, 355, 356, 357, 366, 512, 655, 656
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_int16 yytable[] =
+{
+ 351, 334, 336, 372, 379, 477, 350, 478, 479, 472,
+ 388, 482, 526, 608, 604, 610, 517, 442, 612, 550,
+ 657, 360, 544, 558, 559, 675, 363, 538, 395, 379,
+ 569, 460, 372, 706, 365, 448, 396, 709, 405, 539,
+ 395, 449, 407, 675, 358, 709, 451, 406, 447, 395,
+ 535, 359, 452, 527, 528, 473, 514, 454, 560, 561,
+ 361, 524, 525, 474, 541, 570, 581, 367, 583, 513,
+ 515, 364, -34, 473, 529, 473, 368, 532, 530, 537,
+ 519, 679, 609, 533, 615, 680, 460, 573, 647, 613,
+ 601, 589, 590, 591, 592, 593, 594, 595, 596, 597,
+ 598, 633, 634, 635, 636, 556, 557, 550, 660, 460,
+ 599, 379, 408, 672, 617, 409, 673, 454, 410, 601,
+ 454, 713, 601, 376, 517, 374, 517, 601, 375, 517,
+ 522, 601, 624, 523, 602, 625, 386, 601, 624, 717,
+ 650, 665, 661, 619, 662, 330, 331, 332, 551, 552,
+ 553, 554, 381, 555, 562, 563, 601, 652, 601, 684,
+ 392, 683, 403, 649, 398, 629, 630, 651, 404, 604,
+ 395, 631, 632, 450, 620, 458, 550, 521, 637, 638,
+ 531, 536, 473, 540, 454, 546, 566, 626, 627, 628,
+ 460, 460, 460, 460, 460, 460, 460, 460, 460, 460,
+ 460, 460, 460, 460, 460, 460, 564, 719, 454, 565,
+ 658, 659, 623, 567, 568, 574, 571, 575, 579, 517,
+ 587, 577, 578, 582, 584, 614, 586, 585, -35, 604,
+ 667, -33, 618, -28, 654, 668, 646, 664, 674, 678,
+ 685, -523, 601, 694, 695, 697, 699, 703, 702, 704,
+ 712, 487, 707, 708, 639, 720, 674, 721, 640, 642,
+ 696, 641, 401, 400, 543, 402, 362, 643, 622, 389,
+ 701, 644, 517, 669, 666, 715, 705, 454, 716, 399,
+ 670, 605, 606, 686, 607, 385, 698, 714, 0, 0,
+ 0, 0, 541, 0, 700, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 460, 0, 0, 676, 517, 0,
+ 485, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 379, 0, 676, 0, 0, 0, 373,
+ 0, 0, 0, 0, 0, 350, 0, 380, 0, 0,
+ 0, 0, 0, 350, 0, 351, 334, 336, 0, 0,
+ 0, 350, 394, 0, 0, 0, 0, 0, 373, 0,
+ 0, 0, 373, 0, 350, 0, 0, 0, 350, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 0, 0, 0, 0, 497, 350,
+ 0, 0, 0, 0, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 545, 0, 455, 0, 0, 350,
+ 350, 0, 350, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 497, 0, 0, 0, 0, 0, 496, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 455, 0, 0, 0, 0, 0, 350, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 455, 0, 0, 0, 0, 0,
+ 350, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 497, 0, 0, 0,
+ 0, 0, 496, 0, 0, 0, 0, 0, 497, 0,
+ 0, 0, 0, 0, 496, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 455, 0, 0, 0, 0, 0, 350,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 497, 0,
+ 0, 0, 0, 497, 496, 0, 0, 497, 0, 496,
+ 0, 0, 0, 496, 0, 0, 0, 0, 0, 0,
+ 0, 497, 0, 0, 0, 0, 380, 496, 0, 0,
+ 0, 0, 350, 0, 0, 0, 0, 0, 0, 0,
+ 0, 497, 0, 0, 0, 497, 0, 496, 0, 0,
+ 0, 496, 0, 497, 0, 0, 0, 497, 0, 496,
+ 0, 0, 0, 496, 0, 0, 0, 497, 0, 0,
+ 0, 384, 0, 496, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 0, 0, 326, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 327, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 333, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 475, 476, 477, 0, 478,
+ 479, 480, 481, 482, 483, 484, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
+ 294, 295, 296, 297, 298, 299, 300, 301, 302, 303,
+ 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323,
+ 324, 325, 485, 412, 326, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 0, 0, 425,
+ 426, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 427, 0,
+ 486, 0, 487, 488, 0, 0, 0, 0, 489, 428,
+ 429, 430, 431, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 328, 329, 330, 331, 332, 333, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 475, 476, 477,
+ 0, 478, 479, 480, 481, 482, 483, 484, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 485, 412, 326, 413, 414, 415,
+ 416, 417, 418, 419, 420, 421, 422, 423, 424, 0,
+ 0, 425, 426, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 427, 0, 486, 0, 487, 603, 0, 0, 0, 0,
+ 489, 428, 429, 430, 431, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 328, 329, 330, 331, 332, 333,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 475,
+ 476, 477, 0, 478, 479, 480, 481, 482, 483, 484,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 485, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 486, 0, 487, 0, 0, 0,
+ 0, 0, 489, 428, 429, 430, 431, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 328, 329, 330, 331,
+ 332, 333, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 475, 476, 477, 0, 478, 479, 480, 481, 482,
+ 483, 484, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 485, 412,
+ 326, 413, 414, 415, 416, 417, 418, 419, 420, 421,
+ 422, 423, 424, 0, 0, 425, 426, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 427, 0, 486, 0, 398, 0,
+ 0, 0, 0, 0, 489, 428, 429, 430, 431, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 328, 329,
+ 330, 331, 332, 333, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 475, 476, 477, 0, 478, 479, 480,
+ 481, 482, 483, 484, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 485, 412, 326, 413, 414, 415, 416, 417, 418, 419,
+ 420, 421, 422, 423, 424, 0, 0, 425, 426, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 427, 0, 486, 0,
+ 0, 0, 0, 0, 0, 0, 489, 428, 429, 430,
+ 431, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 333, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
+ 294, 295, 296, 297, 298, 299, 300, 301, 302, 303,
+ 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323,
+ 324, 325, 0, 412, 326, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 0, 0, 425,
+ 426, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 427, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 489, 428,
+ 429, 430, 431, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 328, 329, 330, 331, 332, 333, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 0, 0, 326, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 327, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 328, 329, 330, 331, 332, 333,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 328, 329, 330, 331,
+ 332, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 0, 377, 326,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 378, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 328, 329, 330,
+ 331, 332, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 0, 0,
+ 326, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 549,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 328, 329,
+ 330, 331, 332, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 0,
+ 0, 326, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 621, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 328,
+ 329, 330, 331, 332, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
+ 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325,
+ 0, 0, 326, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 663, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 328, 329, 330, 331, 332, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 0, 326, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 328, 329, 330, 331, 332, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 516, 682, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 459, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 516, 0, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 572, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 580, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 69,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 412, 326, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423,
+ 424, 0, 0, 425, 426, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 427, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 428, 429, 430, 431, 3, 4, 5,
+ 6, 7, 0, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 69, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 534, 0, 412, 326, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 0, 0, 425, 426,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 427, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 428, 429,
+ 430, 431, 3, 4, 5, 6, 7, 0, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 0, 0, 326
+};
+
+static const yytype_int16 yycheck[] =
+{
+ 0, 0, 0, 339, 347, 24, 0, 26, 27, 395,
+ 81, 30, 427, 514, 502, 516, 406, 381, 519, 456,
+ 588, 340, 453, 358, 359, 653, 340, 382, 377, 372,
+ 363, 395, 368, 691, 385, 376, 385, 695, 375, 394,
+ 377, 382, 376, 671, 375, 703, 376, 384, 382, 377,
+ 440, 375, 382, 356, 357, 377, 384, 393, 393, 394,
+ 379, 425, 426, 385, 450, 398, 481, 376, 483, 403,
+ 404, 385, 375, 377, 377, 377, 382, 376, 381, 443,
+ 384, 378, 384, 382, 376, 382, 450, 473, 576, 520,
+ 382, 365, 366, 367, 368, 369, 370, 371, 372, 373,
+ 374, 558, 559, 560, 561, 354, 355, 544, 609, 473,
+ 384, 454, 379, 376, 529, 382, 376, 453, 385, 382,
+ 456, 376, 382, 340, 514, 382, 516, 382, 385, 519,
+ 382, 382, 382, 385, 385, 385, 340, 382, 382, 707,
+ 385, 385, 380, 533, 382, 401, 402, 403, 390, 391,
+ 392, 387, 393, 389, 360, 361, 382, 383, 382, 383,
+ 379, 662, 340, 578, 379, 554, 555, 582, 340, 657,
+ 377, 556, 557, 384, 538, 385, 613, 340, 562, 563,
+ 376, 375, 377, 340, 520, 340, 395, 551, 552, 553,
+ 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 397, 708, 544, 396,
+ 600, 601, 546, 362, 364, 385, 378, 385, 385, 609,
+ 380, 375, 375, 375, 383, 340, 377, 375, 375, 717,
+ 645, 375, 340, 376, 340, 339, 378, 378, 653, 375,
+ 375, 379, 382, 340, 376, 378, 380, 376, 385, 25,
+ 376, 379, 379, 384, 564, 385, 671, 380, 565, 567,
+ 678, 566, 372, 368, 452, 372, 333, 568, 545, 340,
+ 685, 569, 662, 648, 624, 703, 690, 613, 704, 366,
+ 648, 512, 512, 671, 512, 355, 680, 702, -1, -1,
+ -1, -1, 678, -1, 684, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 678, -1, -1, 653, 708, -1,
+ 339, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 676, -1, 671, -1, -1, -1, 339,
+ -1, -1, -1, -1, -1, 339, -1, 347, -1, -1,
+ -1, -1, -1, 347, -1, 355, 355, 355, -1, -1,
+ -1, 355, 362, -1, -1, -1, -1, -1, 368, -1,
+ -1, -1, 372, -1, 368, -1, -1, -1, 372, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 393, -1, -1, -1, -1, 398, 393,
+ -1, -1, -1, -1, 398, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 453, 454, -1, 456, -1, -1, 453,
+ 454, -1, 456, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 502, -1, -1, -1, -1, -1, 502, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 520, -1, -1, -1, -1, -1, 520, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 544, -1, -1, -1, -1, -1,
+ 544, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 576, -1, -1, -1,
+ -1, -1, 576, -1, -1, -1, -1, -1, 588, -1,
+ -1, -1, -1, -1, 588, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 613, -1, -1, -1, -1, -1, 613,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 648, -1,
+ -1, -1, -1, 653, 648, -1, -1, 657, -1, 653,
+ -1, -1, -1, 657, -1, -1, -1, -1, -1, -1,
+ -1, 671, -1, -1, -1, -1, 676, 671, -1, -1,
+ -1, -1, 676, -1, -1, -1, -1, -1, -1, -1,
+ -1, 691, -1, -1, -1, 695, -1, 691, -1, -1,
+ -1, 695, -1, 703, -1, -1, -1, 707, -1, 703,
+ -1, -1, -1, 707, -1, -1, -1, 717, -1, -1,
+ -1, 0, -1, 717, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ -1, -1, 341, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 385, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 404, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, -1, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
+ 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, -1, -1, 356,
+ 357, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 375, -1,
+ 377, -1, 379, 380, -1, -1, -1, -1, 385, 386,
+ 387, 388, 389, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 399, 400, 401, 402, 403, 404, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ -1, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, -1,
+ -1, 356, 357, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 375, -1, 377, -1, 379, 380, -1, -1, -1, -1,
+ 385, 386, 387, 388, 389, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 399, 400, 401, 402, 403, 404,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, -1, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, 377, -1, 379, -1, -1, -1,
+ -1, -1, 385, 386, 387, 388, 389, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 399, 400, 401, 402,
+ 403, 404, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, -1, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300,
+ 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
+ 351, 352, 353, -1, -1, 356, 357, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 375, -1, 377, -1, 379, -1,
+ -1, -1, -1, -1, 385, 386, 387, 388, 389, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 399, 400,
+ 401, 402, 403, 404, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, -1, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, -1, -1, 356, 357, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 375, -1, 377, -1,
+ -1, -1, -1, -1, -1, -1, 385, 386, 387, 388,
+ 389, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 404, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
+ 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336,
+ 337, 338, -1, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, -1, -1, 356,
+ 357, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 375, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 385, 386,
+ 387, 388, 389, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 399, 400, 401, 402, 403, 404, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, -1, -1, 341, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 385, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 399, 400, 401, 402, 403, 404,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 399, 400, 401, 402,
+ 403, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
+ 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
+ 332, 333, 334, 335, 336, 337, 338, -1, 340, 341,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 385, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 399, 400, 401,
+ 402, 403, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170,
+ 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300,
+ 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 318, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, -1, -1,
+ 341, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 380,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 399, 400,
+ 401, 402, 403, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
+ 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 299,
+ 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 337, 338, -1,
+ -1, 341, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 380, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 399,
+ 400, 401, 402, 403, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
+ 329, 330, 331, 332, 333, 334, 335, 336, 337, 338,
+ -1, -1, 341, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 380, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 399, 400, 401, 402, 403, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, -1, 341, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, 399, 400, 401, 402, 403, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, 379, 380, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, 378, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, 379, -1, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, 378, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 385, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 82,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, 340, 341, 342,
+ 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, -1, -1, 356, 357, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 375, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 386, 387, 388, 389, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 82, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 131, 132, 133, 134, 135, 136, 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
+ 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, -1, 340, 341, 342, 343, 344, 345, 346, 347,
+ 348, 349, 350, 351, 352, 353, -1, -1, 356, 357,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 375, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 386, 387,
+ 388, 389, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, -1, -1, 341
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint16 yystos[] =
+{
+ 0, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
+ 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282,
+ 283, 284, 285, 286, 287, 288, 289, 290, 291, 292,
+ 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312,
+ 313, 314, 315, 316, 317, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 341, 385, 399, 400,
+ 401, 402, 403, 404, 439, 440, 443, 444, 445, 446,
+ 450, 451, 452, 453, 454, 455, 458, 459, 460, 461,
+ 462, 464, 469, 470, 471, 511, 512, 513, 375, 375,
+ 340, 379, 470, 340, 385, 385, 514, 376, 382, 447,
+ 448, 449, 459, 464, 382, 385, 340, 340, 385, 460,
+ 464, 393, 466, 467, 0, 512, 340, 463, 81, 340,
+ 456, 457, 379, 473, 464, 377, 385, 465, 379, 490,
+ 448, 447, 449, 340, 340, 375, 384, 465, 379, 382,
+ 385, 442, 340, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 356, 357, 375, 386, 387,
+ 388, 389, 409, 410, 411, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 462, 464, 468, 465, 376, 382,
+ 384, 376, 382, 472, 459, 464, 474, 475, 385, 378,
+ 420, 422, 423, 424, 425, 426, 427, 428, 429, 430,
+ 431, 432, 433, 377, 385, 22, 23, 24, 26, 27,
+ 28, 29, 30, 31, 32, 339, 377, 379, 380, 385,
+ 420, 433, 435, 437, 439, 443, 462, 464, 480, 481,
+ 482, 483, 491, 492, 493, 494, 497, 498, 501, 502,
+ 503, 510, 515, 465, 384, 465, 379, 435, 478, 384,
+ 441, 340, 382, 385, 420, 420, 437, 356, 357, 377,
+ 381, 376, 376, 382, 338, 435, 375, 420, 382, 394,
+ 340, 433, 438, 457, 474, 464, 340, 476, 477, 380,
+ 475, 390, 391, 392, 387, 389, 354, 355, 358, 359,
+ 393, 394, 360, 361, 397, 396, 395, 362, 364, 363,
+ 398, 378, 378, 433, 385, 385, 505, 375, 375, 385,
+ 385, 437, 375, 437, 383, 375, 377, 380, 484, 365,
+ 366, 367, 368, 369, 370, 371, 372, 373, 374, 384,
+ 436, 382, 385, 380, 481, 494, 498, 503, 478, 384,
+ 478, 479, 478, 474, 340, 376, 412, 437, 340, 435,
+ 420, 380, 476, 465, 382, 385, 420, 420, 420, 422,
+ 422, 423, 423, 424, 424, 424, 424, 425, 425, 426,
+ 427, 428, 429, 430, 431, 434, 378, 481, 506, 437,
+ 385, 437, 383, 504, 340, 516, 517, 491, 435, 435,
+ 478, 380, 382, 380, 378, 385, 477, 437, 339, 480,
+ 492, 507, 376, 376, 437, 452, 459, 496, 375, 378,
+ 382, 485, 380, 478, 383, 375, 496, 508, 509, 487,
+ 488, 489, 495, 499, 340, 376, 438, 378, 517, 380,
+ 435, 437, 385, 376, 25, 483, 482, 379, 384, 482,
+ 486, 490, 376, 376, 437, 486, 487, 491, 500, 478,
+ 385, 380
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint16 yyr1[] =
+{
+ 0, 408, 409, 410, 410, 410, 410, 410, 410, 410,
+ 410, 410, 410, 410, 410, 410, 410, 411, 411, 411,
+ 411, 411, 411, 412, 413, 414, 415, 415, 416, 416,
+ 417, 417, 418, 419, 419, 419, 420, 420, 420, 420,
+ 421, 421, 421, 421, 422, 422, 422, 422, 423, 423,
+ 423, 424, 424, 424, 425, 425, 425, 425, 425, 426,
+ 426, 426, 427, 427, 428, 428, 429, 429, 430, 430,
+ 431, 431, 432, 432, 433, 434, 433, 435, 435, 436,
+ 436, 436, 436, 436, 436, 436, 436, 436, 436, 436,
+ 437, 437, 438, 439, 439, 439, 439, 439, 439, 439,
+ 439, 439, 441, 440, 442, 442, 443, 444, 444, 445,
+ 445, 446, 447, 447, 448, 448, 448, 448, 449, 450,
+ 450, 450, 450, 450, 451, 451, 451, 451, 451, 452,
+ 452, 453, 454, 454, 454, 454, 454, 454, 454, 454,
+ 455, 456, 456, 457, 457, 457, 458, 459, 459, 460,
+ 460, 460, 460, 460, 460, 460, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 461, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 461, 461, 461, 461, 461,
+ 461, 461, 461, 461, 461, 462, 463, 463, 464, 464,
+ 465, 465, 465, 465, 466, 466, 467, 468, 468, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 469, 469, 469,
+ 469, 469, 469, 469, 469, 469, 469, 470, 470, 470,
+ 472, 471, 473, 471, 474, 474, 475, 475, 476, 476,
+ 477, 477, 478, 478, 478, 479, 479, 480, 481, 481,
+ 482, 482, 482, 482, 482, 482, 482, 483, 484, 485,
+ 483, 486, 486, 488, 487, 489, 487, 490, 490, 491,
+ 491, 492, 492, 493, 493, 494, 495, 495, 496, 496,
+ 497, 497, 499, 498, 500, 500, 501, 501, 502, 502,
+ 504, 503, 505, 503, 506, 503, 507, 507, 508, 508,
+ 509, 509, 510, 510, 510, 510, 510, 511, 511, 512,
+ 512, 512, 514, 513, 515, 516, 516, 517, 517
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
+{
+ 0, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 1, 4, 1,
+ 3, 2, 2, 1, 1, 1, 2, 2, 2, 1,
+ 2, 3, 2, 1, 1, 1, 1, 2, 2, 2,
+ 1, 1, 1, 1, 1, 3, 3, 3, 1, 3,
+ 3, 1, 3, 3, 1, 3, 3, 3, 3, 1,
+ 3, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 1, 3, 1, 3, 1, 0, 6, 1, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 3, 1, 2, 2, 4, 2, 3, 4, 2,
+ 3, 4, 0, 6, 2, 3, 2, 1, 1, 2,
+ 3, 3, 2, 3, 2, 1, 2, 1, 1, 1,
+ 3, 4, 6, 5, 1, 2, 3, 5, 4, 1,
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 4, 1, 3, 1, 3, 1, 1, 1, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 4, 1, 1, 3, 2, 3,
+ 2, 3, 3, 4, 1, 0, 3, 1, 3, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 6, 0, 5, 1, 2, 3, 4, 1, 3,
+ 1, 2, 1, 3, 4, 1, 3, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 0, 0,
+ 5, 1, 1, 0, 2, 0, 2, 2, 3, 1,
+ 2, 1, 2, 1, 2, 5, 3, 1, 1, 4,
+ 1, 2, 0, 8, 0, 1, 3, 2, 1, 2,
+ 0, 6, 0, 8, 0, 7, 1, 1, 1, 0,
+ 2, 3, 2, 2, 2, 3, 2, 1, 2, 1,
+ 1, 1, 0, 3, 5, 1, 3, 1, 4
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (pParseContext, YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+/* This macro is provided for backward compatibility. */
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value, pParseContext); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*----------------------------------------.
+| Print this symbol's value on YYOUTPUT. |
+`----------------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext)
+{
+ FILE *yyo = yyoutput;
+ YYUSE (yyo);
+ YYUSE (pParseContext);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
+# endif
+ YYUSE (yytype);
+}
+
+
+/*--------------------------------.
+| Print this symbol on YYOUTPUT. |
+`--------------------------------*/
+
+static void
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext)
+{
+ YYFPRINTF (yyoutput, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ yy_symbol_value_print (yyoutput, yytype, yyvaluep, pParseContext);
+ YYFPRINTF (yyoutput, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, glslang::TParseContext* pParseContext)
+{
+ unsigned long int yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ , pParseContext);
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, Rule, pParseContext); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen strlen
+# else
+/* Return the length of YYSTR. */
+static YYSIZE_T
+yystrlen (const char *yystr)
+{
+ YYSIZE_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYSIZE_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYSIZE_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (! yyres)
+ return yystrlen (yystr);
+
+ return yystpcpy (yyres, yystr) - yyres;
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+ yytype_int16 *yyssp, int yytoken)
+{
+ YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ YYSIZE_T yysize = yysize0;
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat. */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Number of reported tokens (one for the "unexpected", one per
+ "expected"). */
+ int yycount = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (! (yysize <= yysize1
+ && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+ if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ yyp++;
+ yyformat++;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext)
+{
+ YYUSE (yyvaluep);
+ YYUSE (pParseContext);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (glslang::TParseContext* pParseContext)
+{
+/* The lookahead symbol. */
+int yychar;
+
+
+/* The semantic value of the lookahead symbol. */
+/* Default value used for initialization, for pacifying older GCCs
+ or non-GCC compilers. */
+YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
+YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
+
+ /* Number of syntax errors so far. */
+ int yynerrs;
+
+ int yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yytype_int16 yyssa[YYINITDEPTH];
+ yytype_int16 *yyss;
+ yytype_int16 *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ YYSIZE_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+ goto yysetstate;
+
+/*------------------------------------------------------------.
+| yynewstate -- Push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+ yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+ yysetstate:
+ *yyssp = yystate;
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYSIZE_T yysize = yyssp - yyss + 1;
+
+#ifdef yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
+ }
+#else /* no yyoverflow */
+# ifndef YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+# else
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+#endif /* no yyoverflow */
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+
+ YYDPRINTF ((stderr, "Stack size increased to %lu\n",
+ (unsigned long int) yystacksize));
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex (&yylval, parseContext);
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- Do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+#line 302 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleVariable((yyvsp[0].lex).loc, (yyvsp[0].lex).symbol, (yyvsp[0].lex).string);
+ }
+#line 4159 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 3:
+#line 308 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 4:
+#line 311 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 5:
+#line 315 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4185 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 6:
+#line 319 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4193 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 7:
+#line 322 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4202 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 8:
+#line 326 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i64, (yyvsp[0].lex).loc, true);
+ }
+#line 4211 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 9:
+#line 330 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u64, (yyvsp[0].lex).loc, true);
+ }
+#line 4220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 10:
+#line 334 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((short)(yyvsp[0].lex).i, (yyvsp[0].lex).loc, true);
+ }
+#line 4229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 11:
+#line 338 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit unsigned integer literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((unsigned short)(yyvsp[0].lex).u, (yyvsp[0].lex).loc, true);
+ }
+#line 4238 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 12:
+#line 342 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat, (yyvsp[0].lex).loc, true);
+ }
+#line 4246 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 13:
+#line 345 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtDouble, (yyvsp[0].lex).loc, true);
+ }
+#line 4255 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 14:
+#line 349 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float literal");
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat16, (yyvsp[0].lex).loc, true);
+ }
+#line 4264 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 15:
+#line 353 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).b, (yyvsp[0].lex).loc, true);
+ }
+#line 4272 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 16:
+#line 356 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode);
+ if ((yyval.interm.intermTypedNode)->getAsConstantUnion())
+ (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression();
+ }
+#line 4282 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 17:
+#line 364 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4290 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 18:
+#line 367 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBracketDereference((yyvsp[-2].lex).loc, (yyvsp[-3].interm.intermTypedNode), (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4298 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 19:
+#line 370 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4306 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 20:
+#line 373 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleDotDereference((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode), *(yyvsp[0].lex).string);
+ }
+#line 4314 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 21:
+#line 376 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode));
+ parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "++", (yyvsp[-1].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "++", EOpPostIncrement, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 22:
+#line 381 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode));
+ parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "--", (yyvsp[-1].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "--", EOpPostDecrement, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 4334 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 23:
+#line 389 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.integerCheck((yyvsp[0].interm.intermTypedNode), "[]");
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4343 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 24:
+#line 396 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleFunctionCall((yyvsp[0].interm).loc, (yyvsp[0].interm).function, (yyvsp[0].interm).intermNode);
+ delete (yyvsp[0].interm).function;
+ }
+#line 4352 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 25:
+#line 403 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 4360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 26:
+#line 409 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 4369 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 27:
+#line 413 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 4378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 28:
+#line 420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ }
+#line 4386 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 29:
+#line 423 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 4394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 30:
+#line 429 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType());
+ (yyvsp[-1].interm).function->addParameter(param);
+ (yyval.interm).function = (yyvsp[-1].interm).function;
+ (yyval.interm).intermNode = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4406 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 31:
+#line 436 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType };
+ param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType());
+ (yyvsp[-2].interm).function->addParameter(param);
+ (yyval.interm).function = (yyvsp[-2].interm).function;
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc);
+ }
+#line 4418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 32:
+#line 446 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-1].interm);
+ }
+#line 4426 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 33:
+#line 454 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Constructor
+ (yyval.interm).intermNode = 0;
+ (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type));
+ }
+#line 4436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 34:
+#line 459 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // Should be a method or subroutine call, but we haven't recognized the arguments yet.
+ //
+ (yyval.interm).function = 0;
+ (yyval.interm).intermNode = 0;
+
+ TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode();
+ if (method) {
+ (yyval.interm).function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength);
+ (yyval.interm).intermNode = method->getObject();
+ } else {
+ TIntermSymbol* symbol = (yyvsp[0].interm.intermTypedNode)->getAsSymbolNode();
+ if (symbol) {
+ parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName());
+ TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid));
+ (yyval.interm).function = function;
+ } else
+ parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "function call, method, or subroutine call expected", "", "");
+ }
+
+ if ((yyval.interm).function == 0) {
+ // error recover
+ TString* empty = NewPoolTString("");
+ (yyval.interm).function = new TFunction(empty, TType(EbtVoid), EOpNull);
+ }
+ }
+#line 4468 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 35:
+#line 486 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Constructor
+ (yyval.interm).intermNode = 0;
+ (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type));
+ }
+#line 4478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 36:
+#line 494 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.variableCheck((yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ if (TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode())
+ parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), "");
+ }
+#line 4489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 37:
+#line 500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "++", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "++", EOpPreIncrement, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 4498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 38:
+#line 504 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "--", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "--", EOpPreDecrement, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 4507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 39:
+#line 508 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm).op != EOpNull) {
+ char errorOp[2] = {0, 0};
+ switch((yyvsp[-1].interm).op) {
+ case EOpNegative: errorOp[0] = '-'; break;
+ case EOpLogicalNot: errorOp[0] = '!'; break;
+ case EOpBitwiseNot: errorOp[0] = '~'; break;
+ default: break; // some compilers want this
+ }
+ (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].interm).loc, errorOp, (yyvsp[-1].interm).op, (yyvsp[0].interm.intermTypedNode));
+ } else {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ if ((yyval.interm.intermTypedNode)->getAsConstantUnion())
+ (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression();
+ }
+ }
+#line 4528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 40:
+#line 528 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; }
+#line 4534 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 41:
+#line 529 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; }
+#line 4540 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 42:
+#line 530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; }
+#line 4546 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 43:
+#line 531 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot;
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise not"); }
+#line 4553 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 44:
+#line 537 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4559 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 45:
+#line 538 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "*", EOpMul, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 46:
+#line 543 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "/", EOpDiv, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4579 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 47:
+#line 548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "%");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "%", EOpMod, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 48:
+#line 557 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 49:
+#line 558 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "+", EOpAdd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4606 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 50:
+#line 563 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "-", EOpSub, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4616 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 51:
+#line 571 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4622 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 52:
+#line 572 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift left");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<<", EOpLeftShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4633 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 53:
+#line 578 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift right");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">>", EOpRightShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4644 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 54:
+#line 587 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 55:
+#line 588 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<", EOpLessThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 56:
+#line 593 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">", EOpGreaterThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 57:
+#line 598 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<=", EOpLessThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 58:
+#line 603 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">=", EOpGreaterThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4690 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 59:
+#line 611 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 60:
+#line 612 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison");
+ parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "==");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "==", EOpEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4710 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 61:
+#line 621 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison");
+ parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!=");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "!=", EOpNotEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4724 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 62:
+#line 633 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4730 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 63:
+#line 634 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise and");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&", EOpAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4741 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 64:
+#line 643 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4747 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 65:
+#line 644 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise exclusive or");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^", EOpExclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 66:
+#line 653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4764 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 67:
+#line 654 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise inclusive or");
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "|", EOpInclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 4775 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 68:
+#line 663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4781 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 69:
+#line 664 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&&", EOpLogicalAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4791 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 70:
+#line 672 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 71:
+#line 673 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^^", EOpLogicalXor, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4807 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 72:
+#line 681 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4813 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 73:
+#line 682 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "||", EOpLogicalOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ if ((yyval.interm.intermTypedNode) == 0)
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc);
+ }
+#line 4823 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 74:
+#line 690 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4829 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 75:
+#line 691 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 4837 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 76:
+#line 694 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.controlFlowNestingLevel;
+ parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-5].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-4].lex).loc, "?", (yyvsp[-5].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addSelection((yyvsp[-5].interm.intermTypedNode), (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-4].lex).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.binaryOpError((yyvsp[-4].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+ }
+#line 4854 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 77:
+#line 709 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); }
+#line 4860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 78:
+#line 710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayObjectCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array assignment");
+ parseContext.opaqueCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.storage16BitAssignmentCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.specializationCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=");
+ parseContext.lValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode));
+ parseContext.rValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addAssign((yyvsp[-1].interm).op, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].interm).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.assignError((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+ }
+#line 4878 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 79:
+#line 726 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpAssign;
+ }
+#line 4887 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 80:
+#line 730 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpMulAssign;
+ }
+#line 4896 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 81:
+#line 734 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpDivAssign;
+ }
+#line 4905 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 82:
+#line 738 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "%=");
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpModAssign;
+ }
+#line 4915 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 83:
+#line 743 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpAddAssign;
+ }
+#line 4924 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 84:
+#line 747 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).op = EOpSubAssign;
+ }
+#line 4933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 85:
+#line 751 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift left assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLeftShiftAssign;
+ }
+#line 4942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 86:
+#line 755 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift right assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpRightShiftAssign;
+ }
+#line 4951 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 87:
+#line 759 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-and assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAndAssign;
+ }
+#line 4960 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 88:
+#line 763 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-xor assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpExclusiveOrAssign;
+ }
+#line 4969 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 89:
+#line 767 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-or assign");
+ (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpInclusiveOrAssign;
+ }
+#line 4978 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 90:
+#line 774 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 4986 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 91:
+#line 777 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.samplerConstructorLocationCheck((yyvsp[-1].lex).loc, ",", (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.addComma((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc);
+ if ((yyval.interm.intermTypedNode) == 0) {
+ parseContext.binaryOpError((yyvsp[-1].lex).loc, ",", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString());
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+ }
+#line 4999 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 92:
+#line 788 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.constantValueCheck((yyvsp[0].interm.intermTypedNode), "");
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 5008 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 93:
+#line 795 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleFunctionDeclarator((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).function, true /* prototype */);
+ (yyval.interm.intermNode) = 0;
+ // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature
+ }
+#line 5018 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 94:
+#line 800 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm).intermNode && (yyvsp[-1].interm).intermNode->getAsAggregate())
+ (yyvsp[-1].interm).intermNode->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-1].interm).intermNode;
+ }
+#line 5028 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 95:
+#line 805 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[-3].lex).loc, ENoProfile, 130, 0, "precision statement");
+
+ // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope
+ parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]);
+ parseContext.setDefaultPrecision((yyvsp[-3].lex).loc, (yyvsp[-1].interm.type), (yyvsp[-2].interm.type).qualifier.precision);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5041 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 96:
+#line 813 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).typeList);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5050 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 97:
+#line 817 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-2].interm).loc, *(yyvsp[-2].interm).typeList, (yyvsp[-1].lex).string);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5059 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 98:
+#line 821 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.declareBlock((yyvsp[-3].interm).loc, *(yyvsp[-3].interm).typeList, (yyvsp[-2].lex).string, (yyvsp[-1].interm).arraySizes);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5068 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 99:
+#line 825 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier);
+ parseContext.updateStandaloneQualifierDefaults((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type));
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5078 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 100:
+#line 830 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkNoShaderLayouts((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).shaderQualifiers);
+ parseContext.addQualifierToExisting((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, *(yyvsp[-1].lex).string);
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5088 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 101:
+#line 835 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkNoShaderLayouts((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).shaderQualifiers);
+ (yyvsp[-1].interm.identifierList)->push_back((yyvsp[-2].lex).string);
+ parseContext.addQualifierToExisting((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).qualifier, *(yyvsp[-1].interm.identifierList));
+ (yyval.interm.intermNode) = 0;
+ }
+#line 5099 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 102:
+#line 844 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); }
+#line 5105 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 103:
+#line 844 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.structNestingLevel;
+ parseContext.blockName = (yyvsp[-4].lex).string;
+ parseContext.globalQualifierFixCheck((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).qualifier);
+ parseContext.checkNoShaderLayouts((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).shaderQualifiers);
+ parseContext.currentBlockQualifier = (yyvsp[-5].interm.type).qualifier;
+ (yyval.interm).loc = (yyvsp[-5].interm.type).loc;
+ (yyval.interm).typeList = (yyvsp[-1].interm.typeList);
+ }
+#line 5119 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 104:
+#line 855 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.identifierList) = new TIdentifierList;
+ (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string);
+ }
+#line 5128 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 105:
+#line 859 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.identifierList) = (yyvsp[-2].interm.identifierList);
+ (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string);
+ }
+#line 5137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 106:
+#line 866 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).function = (yyvsp[-1].interm.function);
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ }
+#line 5146 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 107:
+#line 873 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.function) = (yyvsp[0].interm.function);
+ }
+#line 5154 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 108:
+#line 876 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.function) = (yyvsp[0].interm.function);
+ }
+#line 5162 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 109:
+#line 883 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // Add the parameter
+ (yyval.interm.function) = (yyvsp[-1].interm.function);
+ if ((yyvsp[0].interm).param.type->getBasicType() != EbtVoid)
+ (yyvsp[-1].interm.function)->addParameter((yyvsp[0].interm).param);
+ else
+ delete (yyvsp[0].interm).param.type;
+ }
+#line 5175 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 110:
+#line 891 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // Only first parameter of one-parameter functions can be void
+ // The check for named parameters not being void is done in parameter_declarator
+ //
+ if ((yyvsp[0].interm).param.type->getBasicType() == EbtVoid) {
+ //
+ // This parameter > first is void
+ //
+ parseContext.error((yyvsp[-1].lex).loc, "cannot be an argument type except for '(void)'", "void", "");
+ delete (yyvsp[0].interm).param.type;
+ } else {
+ // Add the parameter
+ (yyval.interm.function) = (yyvsp[-2].interm.function);
+ (yyvsp[-2].interm.function)->addParameter((yyvsp[0].interm).param);
+ }
+ }
+#line 5197 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 111:
+#line 911 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).qualifier.storage != EvqGlobal && (yyvsp[-2].interm.type).qualifier.storage != EvqTemporary) {
+ parseContext.error((yyvsp[-1].lex).loc, "no qualifiers allowed for function return",
+ GetStorageQualifierString((yyvsp[-2].interm.type).qualifier.storage), "");
+ }
+ if ((yyvsp[-2].interm.type).arraySizes)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+
+ // Add the function as a prototype after parsing it (we do not support recursion)
+ TFunction *function;
+ TType type((yyvsp[-2].interm.type));
+
+ // Potentially rename shader entry point function. No-op most of the time.
+ parseContext.renameShaderFunction((yyvsp[-1].lex).string);
+
+ // Make the function
+ function = new TFunction((yyvsp[-1].lex).string, type);
+ (yyval.interm.function) = function;
+ }
+#line 5221 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 112:
+#line 934 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-1].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-1].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck((yyvsp[-1].interm.type).loc, *(yyvsp[-1].interm.type).arraySizes);
+ }
+ if ((yyvsp[-1].interm.type).basicType == EbtVoid) {
+ parseContext.error((yyvsp[0].lex).loc, "illegal use of type 'void'", (yyvsp[0].lex).string->c_str(), "");
+ }
+ parseContext.reservedErrorCheck((yyvsp[0].lex).loc, *(yyvsp[0].lex).string);
+
+ TParameter param = {(yyvsp[0].lex).string, new TType((yyvsp[-1].interm.type))};
+ (yyval.interm).loc = (yyvsp[0].lex).loc;
+ (yyval.interm).param = param;
+ }
+#line 5241 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 113:
+#line 949 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+ TType* type = new TType((yyvsp[-2].interm.type));
+ type->transferArraySizes((yyvsp[0].interm).arraySizes);
+ type->copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+
+ parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, type->getArraySizes());
+ parseContext.arraySizeRequiredCheck((yyvsp[0].interm).loc, *(yyvsp[0].interm).arraySizes);
+ parseContext.reservedErrorCheck((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string);
+
+ TParameter param = { (yyvsp[-1].lex).string, type };
+
+ (yyval.interm).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm).param = param;
+ }
+#line 5265 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 114:
+#line 974 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone)
+ (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision;
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type);
+ parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type);
+
+ }
+#line 5281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 115:
+#line 985 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type);
+ parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type);
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+ }
+#line 5293 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 116:
+#line 995 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone)
+ (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision;
+ parseContext.precisionQualifierCheck((yyvsp[-1].interm.type).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+
+ parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type);
+ parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type);
+ }
+#line 5308 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 117:
+#line 1005 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+
+ parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type);
+ parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type);
+ parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier());
+ }
+#line 5320 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 118:
+#line 1015 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TParameter param = { 0, new TType((yyvsp[0].interm.type)) };
+ (yyval.interm).param = param;
+ if ((yyvsp[0].interm.type).arraySizes)
+ parseContext.arraySizeRequiredCheck((yyvsp[0].interm.type).loc, *(yyvsp[0].interm.type).arraySizes);
+ }
+#line 5331 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 119:
+#line 1024 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[0].interm);
+ }
+#line 5339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 120:
+#line 1027 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-2].interm);
+ parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-2].interm).type);
+ }
+#line 5348 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 121:
+#line 1031 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-3].interm);
+ parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-3].interm).type, (yyvsp[0].interm).arraySizes);
+ }
+#line 5357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 122:
+#line 1035 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-5].interm).type;
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-5].interm).type, (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-5].interm).intermNode, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 123:
+#line 1040 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-4].interm).type;
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-4].interm).type, 0, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-4].interm).intermNode, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5377 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 124:
+#line 1048 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[0].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareTypeDefaults((yyval.interm).loc, (yyval.interm).type);
+ }
+#line 5387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 125:
+#line 1053 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-1].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-1].interm.type));
+ }
+#line 5397 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 126:
+#line 1058 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-2].interm.type);
+ (yyval.interm).intermNode = 0;
+ parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-2].interm.type), (yyvsp[0].interm).arraySizes);
+ }
+#line 5407 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 127:
+#line 1063 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-4].interm.type);
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-4].interm.type), (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5417 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 128:
+#line 1068 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).type = (yyvsp[-3].interm.type);
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode));
+ (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc);
+ }
+#line 5427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 129:
+#line 1077 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+
+ parseContext.globalQualifierTypeCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyval.interm.type));
+ if ((yyvsp[0].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ parseContext.precisionQualifierCheck((yyval.interm.type).loc, (yyval.interm.type).basicType, (yyval.interm.type).qualifier);
+ }
+#line 5443 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 130:
+#line 1088 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier);
+ parseContext.globalQualifierTypeCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, (yyvsp[0].interm.type));
+
+ if ((yyvsp[0].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ }
+
+ if ((yyvsp[0].interm.type).arraySizes && parseContext.arrayQualifierError((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).qualifier))
+ (yyvsp[0].interm.type).arraySizes = nullptr;
+
+ parseContext.checkNoShaderLayouts((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers);
+ (yyvsp[0].interm.type).shaderQualifiers.merge((yyvsp[-1].interm.type).shaderQualifiers);
+ parseContext.mergeQualifiers((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyvsp[-1].interm.type).qualifier, true);
+ parseContext.precisionQualifierCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).basicType, (yyvsp[0].interm.type).qualifier);
+
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+
+ if (! (yyval.interm.type).qualifier.isInterpolation() &&
+ ((parseContext.language == EShLangVertex && (yyval.interm.type).qualifier.storage == EvqVaryingOut) ||
+ (parseContext.language == EShLangFragment && (yyval.interm.type).qualifier.storage == EvqVaryingIn)))
+ (yyval.interm.type).qualifier.smooth = true;
+ }
+#line 5472 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 131:
+#line 1115 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "invariant");
+ parseContext.profileRequires((yyval.interm.type).loc, ENoProfile, 120, 0, "invariant");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.invariant = true;
+ }
+#line 5483 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 132:
+#line 1124 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "smooth");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "smooth");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "smooth");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.smooth = true;
+ }
+#line 5495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 133:
+#line 1131 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "flat");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "flat");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "flat");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.flat = true;
+ }
+#line 5507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 134:
+#line 1138 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "noperspective");
+#ifdef NV_EXTENSIONS
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective");
+#else
+ parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "noperspective");
+#endif
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "noperspective");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.nopersp = true;
+ }
+#line 5523 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 135:
+#line 1149 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "__explicitInterpAMD");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.explicitInterp = true;
+#endif
+ }
+#line 5537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 136:
+#line 1158 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "pervertexNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.pervertexNV = true;
+#endif
+ }
+#line 5552 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 137:
+#line 1168 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "perprimitiveNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV");
+ // Fragment shader stage doesn't check for extension. So we explicitly add below extension check.
+ if (parseContext.language == EShLangFragment)
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perPrimitiveNV = true;
+#endif
+ }
+#line 5569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 138:
+#line 1180 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "perviewNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangMeshNV, "perviewNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perViewNV = true;
+#endif
+ }
+#line 5583 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 139:
+#line 1189 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ // No need for profile version or extension check. Shader stage already checks both.
+ parseContext.globalCheck((yyvsp[0].lex).loc, "taskNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.perTaskNV = true;
+#endif
+ }
+#line 5597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 140:
+#line 1201 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ }
+#line 5605 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 141:
+#line 1207 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5613 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 142:
+#line 1210 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-2].interm.type);
+ (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers);
+ parseContext.mergeObjectLayoutQualifiers((yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false);
+ }
+#line 5623 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 143:
+#line 1217 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), *(yyvsp[0].lex).string);
+ }
+#line 5632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 144:
+#line 1221 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[-2].lex).loc);
+ parseContext.setLayoutQualifier((yyvsp[-2].lex).loc, (yyval.interm.type), *(yyvsp[-2].lex).string, (yyvsp[0].interm.intermTypedNode));
+ }
+#line 5641 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 145:
+#line 1225 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // because "shared" is both an identifier and a keyword
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ TString strShared("shared");
+ parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), strShared);
+ }
+#line 5651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 146:
+#line 1233 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyval.interm.type).loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.noContraction = true;
+ }
+#line 5662 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 147:
+#line 1242 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 148:
+#line 1245 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ if ((yyval.interm.type).basicType == EbtVoid)
+ (yyval.interm.type).basicType = (yyvsp[0].interm.type).basicType;
+
+ (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers);
+ parseContext.mergeQualifiers((yyval.interm.type).loc, (yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false);
+ }
+#line 5683 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 149:
+#line 1256 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5691 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 150:
+#line 1259 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 151:
+#line 1262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkPrecisionQualifier((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier.precision);
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5708 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 152:
+#line 1266 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 153:
+#line 1270 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 154:
+#line 1274 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // allow inheritance of storage qualifier from block declaration
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5735 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 155:
+#line 1278 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ }
+#line 5743 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 156:
+#line 1284 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant
+ }
+#line 5752 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 157:
+#line 1288 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangVertex, "attribute");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "attribute");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "attribute");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "attribute");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "attribute");
+
+ parseContext.globalCheck((yyvsp[0].lex).loc, "attribute");
+
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqVaryingIn;
+ }
+#line 5769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 158:
+#line 1300 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "varying");
+ parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "varying");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "varying");
+ parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "varying");
+
+ parseContext.globalCheck((yyvsp[0].lex).loc, "varying");
+
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ if (parseContext.language == EShLangVertex)
+ (yyval.interm.type).qualifier.storage = EvqVaryingOut;
+ else
+ (yyval.interm.type).qualifier.storage = EvqVaryingIn;
+ }
+#line 5788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 159:
+#line 1314 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "inout");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqInOut;
+ }
+#line 5798 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 160:
+#line 1319 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "in");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later
+ (yyval.interm.type).qualifier.storage = EvqIn;
+ }
+#line 5809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 161:
+#line 1325 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "out");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later
+ (yyval.interm.type).qualifier.storage = EvqOut;
+ }
+#line 5820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 162:
+#line 1331 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 120, 0, "centroid");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "centroid");
+ parseContext.globalCheck((yyvsp[0].lex).loc, "centroid");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.centroid = true;
+ }
+#line 5832 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 163:
+#line 1338 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "patch");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.patch = true;
+ }
+#line 5843 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 164:
+#line 1344 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "sample");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.sample = true;
+ }
+#line 5853 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 165:
+#line 1349 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "uniform");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqUniform;
+ }
+#line 5863 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 166:
+#line 1354 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "buffer");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqBuffer;
+ }
+#line 5873 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 167:
+#line 1359 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectNVMask | EShLangClosestHitNVMask
+ | EShLangAnyHitNVMask), "hitAttributeNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqHitAttrNV;
+#endif
+ }
+#line 5888 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 168:
+#line 1369 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenNVMask | EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqPayloadNV;
+#endif
+ }
+#line 5903 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 169:
+#line 1379 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitNVMask |
+ EShLangAnyHitNVMask | EShLangMissNVMask), "rayPayloadInNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqPayloadInNV;
+#endif
+ }
+#line 5918 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 170:
+#line 1389 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenNVMask |
+ EShLangClosestHitNVMask | EShLangMissNVMask | EShLangCallableNVMask), "callableDataNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqCallableDataNV;
+#endif
+ }
+#line 5933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 171:
+#line 1399 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInNV");
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableNVMask), "callableDataInNV");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqCallableDataInNV;
+#endif
+ }
+#line 5947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 172:
+#line 1408 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.globalCheck((yyvsp[0].lex).loc, "shared");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared");
+ parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 310, 0, "shared");
+#ifdef NV_EXTENSIONS
+ parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared");
+#else
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangCompute, "shared");
+#endif
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.storage = EvqShared;
+ }
+#line 5964 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 173:
+#line 1420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.coherent = true;
+ }
+#line 5973 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 174:
+#line 1424 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent");
+ (yyval.interm.type).qualifier.devicecoherent = true;
+ }
+#line 5983 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 175:
+#line 1429 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent");
+ (yyval.interm.type).qualifier.queuefamilycoherent = true;
+ }
+#line 5993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 176:
+#line 1434 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent");
+ (yyval.interm.type).qualifier.workgroupcoherent = true;
+ }
+#line 6003 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 177:
+#line 1439 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent");
+ (yyval.interm.type).qualifier.subgroupcoherent = true;
+ }
+#line 6013 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 178:
+#line 1444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate");
+ (yyval.interm.type).qualifier.nonprivate = true;
+ }
+#line 6023 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 179:
+#line 1449 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.volatil = true;
+ }
+#line 6032 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 180:
+#line 1453 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.restrict = true;
+ }
+#line 6041 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 181:
+#line 1457 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.readonly = true;
+ }
+#line 6050 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 182:
+#line 1461 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.writeonly = true;
+ }
+#line 6059 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 183:
+#line 1465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.spvRemoved((yyvsp[0].lex).loc, "subroutine");
+ parseContext.globalCheck((yyvsp[0].lex).loc, "subroutine");
+ parseContext.unimplemented((yyvsp[0].lex).loc, "subroutine");
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ }
+#line 6070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 184:
+#line 1471 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.spvRemoved((yyvsp[-3].lex).loc, "subroutine");
+ parseContext.globalCheck((yyvsp[-3].lex).loc, "subroutine");
+ parseContext.unimplemented((yyvsp[-3].lex).loc, "subroutine");
+ (yyval.interm.type).init((yyvsp[-3].lex).loc);
+ }
+#line 6081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 185:
+#line 1480 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc);
+ (yyval.interm.type).qualifier.nonUniform = true;
+ }
+#line 6090 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 186:
+#line 1487 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // TODO
+ }
+#line 6098 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 187:
+#line 1490 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // TODO: 4.0 semantics: subroutines
+ // 1) make sure each identifier is a type declared earlier with SUBROUTINE
+ // 2) save all of the identifiers for future comparison with the declared function
+ }
+#line 6108 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 188:
+#line 1498 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[-1].interm.type);
+ (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type));
+ (yyval.interm.type).typeParameters = (yyvsp[0].interm.typeParameters);
+ }
+#line 6118 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 189:
+#line 1503 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayOfArrayVersionCheck((yyvsp[0].interm).loc, (yyvsp[0].interm).arraySizes);
+ (yyval.interm.type) = (yyvsp[-2].interm.type);
+ (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type));
+ (yyval.interm.type).typeParameters = (yyvsp[-1].interm.typeParameters);
+ (yyval.interm.type).arraySizes = (yyvsp[0].interm).arraySizes;
+ }
+#line 6130 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 190:
+#line 1513 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm).arraySizes = new TArraySizes;
+ (yyval.interm).arraySizes->addInnerSize();
+ }
+#line 6140 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 191:
+#line 1518 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm).loc = (yyvsp[-2].lex).loc;
+ (yyval.interm).arraySizes = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size");
+ (yyval.interm).arraySizes->addInnerSize(size);
+ }
+#line 6153 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 192:
+#line 1526 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-2].interm);
+ (yyval.interm).arraySizes->addInnerSize();
+ }
+#line 6162 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 193:
+#line 1530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm) = (yyvsp[-3].interm);
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size");
+ (yyval.interm).arraySizes->addInnerSize(size);
+ }
+#line 6174 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 194:
+#line 1540 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[0].interm.typeParameters);
+ }
+#line 6182 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 195:
+#line 1543 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = 0;
+ }
+#line 6190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 196:
+#line 1549 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[-1].interm.typeParameters);
+ }
+#line 6198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 197:
+#line 1555 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = new TArraySizes;
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter");
+ (yyval.interm.typeParameters)->addInnerSize(size);
+ }
+#line 6210 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 198:
+#line 1562 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeParameters) = (yyvsp[-2].interm.typeParameters);
+
+ TArraySize size;
+ parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter");
+ (yyval.interm.typeParameters)->addInnerSize(size);
+ }
+#line 6222 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 199:
+#line 1572 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtVoid;
+ }
+#line 6231 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 200:
+#line 1576 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ }
+#line 6240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 201:
+#line 1580 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ }
+#line 6250 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 202:
+#line 1585 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "float16_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ }
+#line 6260 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 203:
+#line 1590 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ }
+#line 6270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 204:
+#line 1595 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ }
+#line 6280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 205:
+#line 1600 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ }
+#line 6289 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 206:
+#line 1604 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ }
+#line 6299 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 207:
+#line 1609 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ }
+#line 6309 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 208:
+#line 1614 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ }
+#line 6319 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 209:
+#line 1619 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ }
+#line 6329 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 210:
+#line 1624 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ }
+#line 6339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 211:
+#line 1629 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ }
+#line 6349 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 212:
+#line 1634 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ }
+#line 6359 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 213:
+#line 1639 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ }
+#line 6369 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 214:
+#line 1644 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ }
+#line 6379 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 215:
+#line 1649 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ }
+#line 6388 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 216:
+#line 1653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6398 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 217:
+#line 1658 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6408 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 218:
+#line 1663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 219:
+#line 1668 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6429 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 220:
+#line 1674 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6440 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 221:
+#line 1680 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 222:
+#line 1686 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6462 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 223:
+#line 1692 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6473 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 224:
+#line 1698 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6484 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 225:
+#line 1704 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 226:
+#line 1710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 227:
+#line 1716 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6517 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 228:
+#line 1722 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 229:
+#line 1728 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 230:
+#line 1734 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 231:
+#line 1740 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6560 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 232:
+#line 1745 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6570 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 233:
+#line 1750 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtBool;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6580 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 234:
+#line 1755 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 235:
+#line 1760 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6600 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 236:
+#line 1765 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6610 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 237:
+#line 1770 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6621 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 238:
+#line 1776 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 239:
+#line 1782 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt8;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6643 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 240:
+#line 1788 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6654 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 241:
+#line 1794 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6665 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 242:
+#line 1800 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 243:
+#line 1806 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6687 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 244:
+#line 1812 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6698 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 245:
+#line 1818 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6709 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 246:
+#line 1824 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6720 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 247:
+#line 1830 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6731 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 248:
+#line 1836 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtInt64;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6742 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 249:
+#line 1842 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6753 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 250:
+#line 1848 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6764 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 251:
+#line 1854 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6775 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 252:
+#line 1860 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6786 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 253:
+#line 1866 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 254:
+#line 1872 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint8;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6808 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 255:
+#line 1878 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 256:
+#line 1884 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6830 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 257:
+#line 1890 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint16;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6841 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 258:
+#line 1896 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6852 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 259:
+#line 1902 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6863 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 260:
+#line 1908 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6874 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 261:
+#line 1914 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(2);
+ }
+#line 6885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 262:
+#line 1920 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(3);
+ }
+#line 6896 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 263:
+#line 1926 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtUint64;
+ (yyval.interm.type).setVector(4);
+ }
+#line 6907 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 264:
+#line 1932 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 6917 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 265:
+#line 1937 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 6927 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 266:
+#line 1942 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 6937 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 267:
+#line 1947 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 6947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 268:
+#line 1952 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 6957 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 269:
+#line 1957 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 6967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 270:
+#line 1962 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 6977 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 271:
+#line 1967 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 6987 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 272:
+#line 1972 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 6997 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 273:
+#line 1977 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7007 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 274:
+#line 1982 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7017 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 275:
+#line 1987 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7027 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 276:
+#line 1992 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7038 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 277:
+#line 1998 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7049 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 278:
+#line 2004 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7060 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 279:
+#line 2010 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 280:
+#line 2016 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7082 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 281:
+#line 2022 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7093 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 282:
+#line 2028 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7104 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 283:
+#line 2034 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7115 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 284:
+#line 2040 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7126 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 285:
+#line 2046 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 286:
+#line 2052 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7148 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 287:
+#line 2058 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7159 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 288:
+#line 2064 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7170 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 289:
+#line 2070 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7181 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 290:
+#line 2076 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7192 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 291:
+#line 2082 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7203 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 292:
+#line 2088 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7214 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 293:
+#line 2094 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7225 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 294:
+#line 2100 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7236 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 295:
+#line 2106 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7247 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 296:
+#line 2112 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7258 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 297:
+#line 2118 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7269 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 298:
+#line 2124 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 299:
+#line 2130 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat16;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7291 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 300:
+#line 2136 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7302 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 301:
+#line 2142 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 302:
+#line 2148 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 303:
+#line 2154 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7335 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 304:
+#line 2160 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7346 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 305:
+#line 2166 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 306:
+#line 2172 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7368 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 307:
+#line 2178 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7379 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 308:
+#line 2184 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7390 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 309:
+#line 2190 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 310:
+#line 2196 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7412 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 311:
+#line 2202 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7423 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 312:
+#line 2208 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7434 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 313:
+#line 2214 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7445 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 314:
+#line 2220 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7456 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 315:
+#line 2226 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 2);
+ }
+#line 7467 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 316:
+#line 2232 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 3);
+ }
+#line 7478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 317:
+#line 2238 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(2, 4);
+ }
+#line 7489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 318:
+#line 2244 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 2);
+ }
+#line 7500 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 319:
+#line 2250 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 3);
+ }
+#line 7511 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 320:
+#line 2256 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(3, 4);
+ }
+#line 7522 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 321:
+#line 2262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 2);
+ }
+#line 7533 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 322:
+#line 2268 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 3);
+ }
+#line 7544 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 323:
+#line 2274 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtDouble;
+ (yyval.interm.type).setMatrix(4, 4);
+ }
+#line 7555 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 324:
+#line 2280 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef NV_EXTENSIONS
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtAccStructNV;
+#endif
+ }
+#line 7566 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 325:
+#line 2286 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.vulkanRemoved((yyvsp[0].lex).loc, "atomic counter types");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtAtomicUint;
+ }
+#line 7576 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 326:
+#line 2291 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D);
+ }
+#line 7586 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 327:
+#line 2296 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ }
+#line 7596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 328:
+#line 2301 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd3D);
+ }
+#line 7606 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 329:
+#line 2306 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube);
+ }
+#line 7616 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 330:
+#line 2311 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, false, true);
+ }
+#line 7626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 331:
+#line 2316 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, true);
+ }
+#line 7636 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 332:
+#line 2321 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, false, true);
+ }
+#line 7646 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 333:
+#line 2326 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true);
+ }
+#line 7656 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 334:
+#line 2331 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true);
+ }
+#line 7666 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 335:
+#line 2336 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true, true);
+ }
+#line 7676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 336:
+#line 2341 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, true);
+ }
+#line 7686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 337:
+#line 2346 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true);
+ }
+#line 7696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 338:
+#line 2351 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true, true);
+ }
+#line 7706 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 339:
+#line 2356 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D);
+#endif
+ }
+#line 7719 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 340:
+#line 2364 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D);
+#endif
+ }
+#line 7732 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 341:
+#line 2372 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd3D);
+#endif
+ }
+#line 7745 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 342:
+#line 2380 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube);
+#endif
+ }
+#line 7758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 343:
+#line 2388 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, false, true);
+#endif
+ }
+#line 7771 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 344:
+#line 2396 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, true);
+#endif
+ }
+#line 7784 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 345:
+#line 2404 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, false, true);
+#endif
+ }
+#line 7797 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 346:
+#line 2412 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 7810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 347:
+#line 2420 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 7823 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 348:
+#line 2428 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true, true);
+#endif
+ }
+#line 7836 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 349:
+#line 2436 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, true);
+#endif
+ }
+#line 7849 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 350:
+#line 2444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 7862 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 351:
+#line 2452 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true, true);
+#endif
+ }
+#line 7875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 352:
+#line 2460 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd1D);
+ }
+#line 7885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 353:
+#line 2465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D);
+ }
+#line 7895 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 354:
+#line 2470 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd3D);
+ }
+#line 7905 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 355:
+#line 2475 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdCube);
+ }
+#line 7915 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 356:
+#line 2480 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd1D, true);
+ }
+#line 7925 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 357:
+#line 2485 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, true);
+ }
+#line 7935 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 358:
+#line 2490 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdCube, true);
+ }
+#line 7945 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 359:
+#line 2495 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd1D);
+ }
+#line 7955 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 360:
+#line 2500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D);
+ }
+#line 7965 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 361:
+#line 2505 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd3D);
+ }
+#line 7975 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 362:
+#line 2510 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdCube);
+ }
+#line 7985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 363:
+#line 2515 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd1D, true);
+ }
+#line 7995 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 364:
+#line 2520 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, true);
+ }
+#line 8005 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 365:
+#line 2525 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdCube, true);
+ }
+#line 8015 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 366:
+#line 2530 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdRect);
+ }
+#line 8025 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 367:
+#line 2535 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdRect, false, true);
+ }
+#line 8035 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 368:
+#line 2540 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8048 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 369:
+#line 2548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdRect, false, true);
+#endif
+ }
+#line 8061 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 370:
+#line 2556 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdRect);
+ }
+#line 8071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 371:
+#line 2561 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdRect);
+ }
+#line 8081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 372:
+#line 2566 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, EsdBuffer);
+ }
+#line 8091 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 373:
+#line 2571 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8104 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 374:
+#line 2579 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, EsdBuffer);
+ }
+#line 8114 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 375:
+#line 2584 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, EsdBuffer);
+ }
+#line 8124 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 376:
+#line 2589 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, false, true);
+ }
+#line 8134 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 377:
+#line 2594 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 8147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 378:
+#line 2602 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, false, false, true);
+ }
+#line 8157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 379:
+#line 2607 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, false, false, true);
+ }
+#line 8167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 380:
+#line 2612 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, false, true);
+ }
+#line 8177 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 381:
+#line 2617 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 8190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 382:
+#line 2625 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtInt, Esd2D, true, false, true);
+ }
+#line 8200 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 383:
+#line 2630 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtUint, Esd2D, true, false, true);
+ }
+#line 8210 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 384:
+#line 2635 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setPureSampler(false);
+ }
+#line 8220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 385:
+#line 2640 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setPureSampler(true);
+ }
+#line 8230 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 386:
+#line 2645 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D);
+ }
+#line 8240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 387:
+#line 2650 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D);
+#endif
+ }
+#line 8253 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 388:
+#line 2658 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D);
+ }
+#line 8263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 389:
+#line 2663 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D);
+#endif
+ }
+#line 8276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 390:
+#line 2671 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd3D);
+ }
+#line 8286 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 391:
+#line 2676 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd3D);
+#endif
+ }
+#line 8299 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 392:
+#line 2684 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube);
+ }
+#line 8309 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 393:
+#line 2689 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube);
+#endif
+ }
+#line 8322 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 394:
+#line 2697 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D, true);
+ }
+#line 8332 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 395:
+#line 2702 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 8345 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 396:
+#line 2710 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true);
+ }
+#line 8355 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 397:
+#line 2715 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 8368 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 398:
+#line 2723 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube, true);
+ }
+#line 8378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 399:
+#line 2728 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 8391 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 400:
+#line 2736 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D);
+ }
+#line 8401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 401:
+#line 2741 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D);
+ }
+#line 8411 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 402:
+#line 2746 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd3D);
+ }
+#line 8421 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 403:
+#line 2751 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube);
+ }
+#line 8431 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 404:
+#line 2756 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D, true);
+ }
+#line 8441 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 405:
+#line 2761 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true);
+ }
+#line 8451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 406:
+#line 2766 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube, true);
+ }
+#line 8461 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 407:
+#line 2771 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D);
+ }
+#line 8471 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 408:
+#line 2776 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D);
+ }
+#line 8481 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 409:
+#line 2781 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd3D);
+ }
+#line 8491 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 410:
+#line 2786 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube);
+ }
+#line 8501 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 411:
+#line 2791 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D, true);
+ }
+#line 8511 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 412:
+#line 2796 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true);
+ }
+#line 8521 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 413:
+#line 2801 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube, true);
+ }
+#line 8531 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 414:
+#line 2806 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdRect);
+ }
+#line 8541 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 415:
+#line 2811 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8554 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 416:
+#line 2819 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdRect);
+ }
+#line 8564 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 417:
+#line 2824 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdRect);
+ }
+#line 8574 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 418:
+#line 2829 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, EsdBuffer);
+ }
+#line 8584 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 419:
+#line 2834 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 420:
+#line 2842 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, EsdBuffer);
+ }
+#line 8607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 421:
+#line 2847 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, EsdBuffer);
+ }
+#line 8617 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 422:
+#line 2852 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, false, false, true);
+ }
+#line 8627 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 423:
+#line 2857 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 8640 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 424:
+#line 2865 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, false, false, true);
+ }
+#line 8650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 425:
+#line 2870 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, false, false, true);
+ }
+#line 8660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 426:
+#line 2875 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true, false, true);
+ }
+#line 8670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 427:
+#line 2880 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 8683 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 428:
+#line 2888 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true, false, true);
+ }
+#line 8693 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 429:
+#line 2893 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true, false, true);
+ }
+#line 8703 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 430:
+#line 2898 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D);
+ }
+#line 8713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 431:
+#line 2903 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D);
+#endif
+ }
+#line 8726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 432:
+#line 2911 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd1D);
+ }
+#line 8736 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 433:
+#line 2916 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd1D);
+ }
+#line 8746 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 434:
+#line 2921 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D);
+ }
+#line 8756 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 435:
+#line 2926 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D);
+#endif
+ }
+#line 8769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 436:
+#line 2934 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D);
+ }
+#line 8779 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 437:
+#line 2939 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D);
+ }
+#line 8789 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 438:
+#line 2944 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd3D);
+ }
+#line 8799 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 439:
+#line 2949 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd3D);
+#endif
+ }
+#line 8812 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 440:
+#line 2957 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd3D);
+ }
+#line 8822 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 441:
+#line 2962 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd3D);
+ }
+#line 8832 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 442:
+#line 2967 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdRect);
+ }
+#line 8842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 443:
+#line 2972 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdRect);
+#endif
+ }
+#line 8855 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 444:
+#line 2980 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdRect);
+ }
+#line 8865 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 445:
+#line 2985 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdRect);
+ }
+#line 8875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 446:
+#line 2990 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube);
+ }
+#line 8885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 447:
+#line 2995 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube);
+#endif
+ }
+#line 8898 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 448:
+#line 3003 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdCube);
+ }
+#line 8908 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 449:
+#line 3008 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdCube);
+ }
+#line 8918 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 450:
+#line 3013 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdBuffer);
+ }
+#line 8928 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 451:
+#line 3018 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdBuffer);
+#endif
+ }
+#line 8941 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 452:
+#line 3026 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdBuffer);
+ }
+#line 8951 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 453:
+#line 3031 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdBuffer);
+ }
+#line 8961 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 454:
+#line 3036 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D, true);
+ }
+#line 8971 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 455:
+#line 3041 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D, true);
+#endif
+ }
+#line 8984 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 456:
+#line 3049 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd1D, true);
+ }
+#line 8994 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 457:
+#line 3054 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd1D, true);
+ }
+#line 9004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 458:
+#line 3059 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true);
+ }
+#line 9014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 459:
+#line 3064 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true);
+#endif
+ }
+#line 9027 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 460:
+#line 3072 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true);
+ }
+#line 9037 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 461:
+#line 3077 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true);
+ }
+#line 9047 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 462:
+#line 3082 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube, true);
+ }
+#line 9057 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 463:
+#line 3087 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube, true);
+#endif
+ }
+#line 9070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 464:
+#line 3095 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, EsdCube, true);
+ }
+#line 9080 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 465:
+#line 3100 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, EsdCube, true);
+ }
+#line 9090 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 466:
+#line 3105 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, false, false, true);
+ }
+#line 9100 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 467:
+#line 3110 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, false, false, true);
+#endif
+ }
+#line 9113 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 468:
+#line 3118 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, false, false, true);
+ }
+#line 9123 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 469:
+#line 3123 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, false, false, true);
+ }
+#line 9133 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 470:
+#line 3128 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true, false, true);
+ }
+#line 9143 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 471:
+#line 3133 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true, false, true);
+#endif
+ }
+#line 9156 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 472:
+#line 3141 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true, false, true);
+ }
+#line 9166 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 473:
+#line 3146 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true, false, true);
+ }
+#line 9176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 474:
+#line 3151 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // GL_OES_EGL_image_external
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ (yyval.interm.type).sampler.external = true;
+ }
+#line 9187 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 475:
+#line 3157 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { // GL_EXT_YUV_target
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.set(EbtFloat, Esd2D);
+ (yyval.interm.type).sampler.yuv = true;
+ }
+#line 9198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 476:
+#line 3163 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat);
+ }
+#line 9209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 477:
+#line 3169 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat, true);
+ }
+#line 9220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 478:
+#line 3175 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat16);
+#endif
+ }
+#line 9234 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 479:
+#line 3184 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+#ifdef AMD_EXTENSIONS
+ parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel());
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtFloat16, true);
+#endif
+ }
+#line 9248 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 480:
+#line 3193 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtInt);
+ }
+#line 9259 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 481:
+#line 3199 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtInt, true);
+ }
+#line 9270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 482:
+#line 3205 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtUint);
+ }
+#line 9281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 483:
+#line 3211 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtSampler;
+ (yyval.interm.type).sampler.setSubpass(EbtUint, true);
+ }
+#line 9292 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 484:
+#line 3217 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.fcoopmatCheck((yyvsp[0].lex).loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel());
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtFloat;
+ (yyval.interm.type).coopmat = true;
+ }
+#line 9303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 485:
+#line 3223 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.type) = (yyvsp[0].interm.type);
+ (yyval.interm.type).qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary;
+ parseContext.structTypeCheck((yyval.interm.type).loc, (yyval.interm.type));
+ }
+#line 9313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 486:
+#line 3228 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ //
+ // This is for user defined type names. The lexical phase looked up the
+ // type.
+ //
+ if (const TVariable* variable = ((yyvsp[0].lex).symbol)->getAsVariable()) {
+ const TType& structure = variable->getType();
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = &structure;
+ } else
+ parseContext.error((yyvsp[0].lex).loc, "expected type name", (yyvsp[0].lex).string->c_str(), "");
+ }
+#line 9331 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 487:
+#line 3244 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "highp precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqHigh);
+ }
+#line 9341 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 488:
+#line 3249 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "mediump precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqMedium);
+ }
+#line 9351 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 489:
+#line 3254 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "lowp precision qualifier");
+ (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel());
+ parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqLow);
+ }
+#line 9361 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 490:
+#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); }
+#line 9367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 491:
+#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TType* structure = new TType((yyvsp[-1].interm.typeList), *(yyvsp[-4].lex).string);
+ parseContext.structArrayCheck((yyvsp[-4].lex).loc, *structure);
+ TVariable* userTypeDef = new TVariable((yyvsp[-4].lex).string, *structure, true);
+ if (! parseContext.symbolTable.insert(*userTypeDef))
+ parseContext.error((yyvsp[-4].lex).loc, "redefinition", (yyvsp[-4].lex).string->c_str(), "struct");
+ (yyval.interm.type).init((yyvsp[-5].lex).loc);
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+#line 9383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 492:
+#line 3273 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); }
+#line 9389 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 493:
+#line 3273 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ TType* structure = new TType((yyvsp[-1].interm.typeList), TString(""));
+ (yyval.interm.type).init((yyvsp[-4].lex).loc);
+ (yyval.interm.type).basicType = EbtStruct;
+ (yyval.interm.type).userDef = structure;
+ --parseContext.structNestingLevel;
+ }
+#line 9401 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 494:
+#line 3283 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = (yyvsp[0].interm.typeList);
+ }
+#line 9409 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 495:
+#line 3286 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+ for (unsigned int i = 0; i < (yyvsp[0].interm.typeList)->size(); ++i) {
+ for (unsigned int j = 0; j < (yyval.interm.typeList)->size(); ++j) {
+ if ((*(yyval.interm.typeList))[j].type->getFieldName() == (*(yyvsp[0].interm.typeList))[i].type->getFieldName())
+ parseContext.error((*(yyvsp[0].interm.typeList))[i].loc, "duplicate member name:", "", (*(yyvsp[0].interm.typeList))[i].type->getFieldName().c_str());
+ }
+ (yyval.interm.typeList)->push_back((*(yyvsp[0].interm.typeList))[i]);
+ }
+ }
+#line 9424 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 496:
+#line 3299 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+
+ parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType);
+ parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier);
+
+ for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) {
+ TType type((yyvsp[-2].interm.type));
+ type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName());
+ type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes());
+ type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes());
+ (*(yyval.interm.typeList))[i].type->shallowCopy(type);
+ }
+ }
+#line 9451 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 497:
+#line 3321 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.type).arraySizes) {
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type");
+ parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type");
+ if (parseContext.profile == EEsProfile)
+ parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes);
+ }
+
+ (yyval.interm.typeList) = (yyvsp[-1].interm.typeList);
+
+ parseContext.memberQualifierCheck((yyvsp[-3].interm.type));
+ parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType);
+ parseContext.mergeQualifiers((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, (yyvsp[-3].interm.type).qualifier, true);
+ parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier);
+
+ for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) {
+ TType type((yyvsp[-2].interm.type));
+ type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName());
+ type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes());
+ type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes);
+ parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes());
+ (*(yyval.interm.typeList))[i].type->shallowCopy(type);
+ }
+ }
+#line 9480 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 498:
+#line 3348 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList) = new TTypeList;
+ (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine));
+ }
+#line 9489 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 499:
+#line 3352 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine));
+ }
+#line 9497 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 500:
+#line 3358 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.typeLine).type = new TType(EbtVoid);
+ (yyval.interm.typeLine).loc = (yyvsp[0].lex).loc;
+ (yyval.interm.typeLine).type->setFieldName(*(yyvsp[0].lex).string);
+ }
+#line 9507 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 501:
+#line 3363 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, (yyvsp[0].interm).arraySizes);
+
+ (yyval.interm.typeLine).type = new TType(EbtVoid);
+ (yyval.interm.typeLine).loc = (yyvsp[-1].lex).loc;
+ (yyval.interm.typeLine).type->setFieldName(*(yyvsp[-1].lex).string);
+ (yyval.interm.typeLine).type->transferArraySizes((yyvsp[0].interm).arraySizes);
+ }
+#line 9520 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 502:
+#line 3374 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 9528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 503:
+#line 3377 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile((yyvsp[-2].lex).loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires((yyvsp[-2].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode);
+ }
+#line 9539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 504:
+#line 3383 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ const char* initFeature = "{ } style initializers";
+ parseContext.requireProfile((yyvsp[-3].lex).loc, ~EEsProfile, initFeature);
+ parseContext.profileRequires((yyvsp[-3].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature);
+ (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode);
+ }
+#line 9550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 505:
+#line 3392 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate(0, (yyvsp[0].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)->getLoc());
+ }
+#line 9558 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 506:
+#line 3395 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode));
+ }
+#line 9566 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 507:
+#line 3401 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9572 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 508:
+#line 3405 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9578 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 509:
+#line 3406 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9584 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 510:
+#line 3412 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9590 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 511:
+#line 3413 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9596 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 512:
+#line 3414 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9602 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 513:
+#line 3415 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9608 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 514:
+#line 3416 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 515:
+#line 3417 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9620 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 516:
+#line 3418 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 517:
+#line 3422 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = 0; }
+#line 9632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 518:
+#line 3423 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ }
+#line 9641 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 519:
+#line 3427 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ }
+#line 9650 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 520:
+#line 3431 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-2].interm.intermNode) && (yyvsp[-2].interm.intermNode)->getAsAggregate())
+ (yyvsp[-2].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-2].interm.intermNode);
+ }
+#line 9660 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 521:
+#line 3439 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9666 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 522:
+#line 3440 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); }
+#line 9672 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 523:
+#line 3444 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 524:
+#line 3447 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ --parseContext.controlFlowNestingLevel;
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 525:
+#line 3451 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 526:
+#line 3456 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9710 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 527:
+#line 3465 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ }
+#line 9718 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 528:
+#line 3468 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[-1].interm.intermNode) && (yyvsp[-1].interm.intermNode)->getAsAggregate())
+ (yyvsp[-1].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ (yyval.interm.intermNode) = (yyvsp[-1].interm.intermNode);
+ }
+#line 9728 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 529:
+#line 3476 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[0].interm.intermNode));
+ if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase ||
+ (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence(0, (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case
+ }
+ }
+#line 9741 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 530:
+#line 3484 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase ||
+ (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) {
+ parseContext.wrapupSwitchSubsequence((yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0, (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case
+ } else
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode));
+ }
+#line 9754 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 531:
+#line 3495 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = 0; }
+#line 9760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 532:
+#line 3496 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ { (yyval.interm.intermNode) = static_cast<TIntermNode*>((yyvsp[-1].interm.intermTypedNode)); }
+#line 9766 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 533:
+#line 3500 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9774 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 534:
+#line 3503 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleSelectionAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9783 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 535:
+#line 3509 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-2].interm.intermTypedNode));
+ (yyval.interm.intermNode) = parseContext.intermediate.addSelection((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.nodePair), (yyvsp[-4].lex).loc);
+ }
+#line 9792 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 536:
+#line 3516 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermNode);
+ (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermNode);
+ }
+#line 9801 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 537:
+#line 3520 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[0].interm.intermNode);
+ (yyval.interm.nodePair).node2 = 0;
+ }
+#line 9810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 538:
+#line 3528 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ parseContext.boolCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode));
+ }
+#line 9819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 539:
+#line 3532 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.boolCheck((yyvsp[-2].lex).loc, (yyvsp[-3].interm.type));
+
+ TType type((yyvsp[-3].interm.type));
+ TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode));
+ if (initNode)
+ (yyval.interm.intermTypedNode) = initNode->getAsTyped();
+ else
+ (yyval.interm.intermTypedNode) = 0;
+ }
+#line 9834 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 540:
+#line 3545 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 541:
+#line 3548 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleSwitchAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9851 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 542:
+#line 3554 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // start new switch sequence on the switch stack
+ ++parseContext.controlFlowNestingLevel;
+ ++parseContext.statementNestingLevel;
+ parseContext.switchSequenceStack.push_back(new TIntermSequence);
+ parseContext.switchLevel.push_back(parseContext.statementNestingLevel);
+ parseContext.symbolTable.push();
+ }
+#line 9864 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 543:
+#line 3562 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.addSwitch((yyvsp[-7].lex).loc, (yyvsp[-5].interm.intermTypedNode), (yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0);
+ delete parseContext.switchSequenceStack.back();
+ parseContext.switchSequenceStack.pop_back();
+ parseContext.switchLevel.pop_back();
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9878 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 544:
+#line 3574 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ }
+#line 9886 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 545:
+#line 3577 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9894 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 546:
+#line 3583 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error((yyvsp[-2].lex).loc, "cannot appear outside switch statement", "case", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error((yyvsp[-2].lex).loc, "cannot be nested inside control flow", "case", "");
+ else {
+ parseContext.constantValueCheck((yyvsp[-1].interm.intermTypedNode), "case");
+ parseContext.integerCheck((yyvsp[-1].interm.intermTypedNode), "case");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpCase, (yyvsp[-1].interm.intermTypedNode), (yyvsp[-2].lex).loc);
+ }
+ }
+#line 9911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 547:
+#line 3595 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = 0;
+ if (parseContext.switchLevel.size() == 0)
+ parseContext.error((yyvsp[-1].lex).loc, "cannot appear outside switch statement", "default", "");
+ else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel)
+ parseContext.error((yyvsp[-1].lex).loc, "cannot be nested inside control flow", "default", "");
+ else
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDefault, (yyvsp[-1].lex).loc);
+ }
+#line 9925 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 548:
+#line 3607 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9933 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 549:
+#line 3610 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.handleLoopAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode));
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 9942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 550:
+#line 3616 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error((yyvsp[-1].lex).loc, "while loops not available", "limitation", "");
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9955 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 551:
+#line 3624 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, true, (yyvsp[-5].lex).loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 552:
+#line 3631 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 9977 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 553:
+#line 3636 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (! parseContext.limits.whileLoops)
+ parseContext.error((yyvsp[-7].lex).loc, "do-while loops not available", "limitation", "");
+
+ parseContext.boolCheck((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode));
+
+ (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[-5].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, false, (yyvsp[-4].lex).loc);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 9993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 554:
+#line 3647 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.push();
+ ++parseContext.loopNestingLevel;
+ ++parseContext.statementNestingLevel;
+ ++parseContext.controlFlowNestingLevel;
+ }
+#line 10004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 555:
+#line 3653 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[-3].interm.intermNode), (yyvsp[-5].lex).loc);
+ TIntermLoop* forLoop = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node1), reinterpret_cast<TIntermTyped*>((yyvsp[-2].interm.nodePair).node2), true, (yyvsp[-6].lex).loc);
+ if (! parseContext.limits.nonInductiveForLoops)
+ parseContext.inductiveLoopCheck((yyvsp[-6].lex).loc, (yyvsp[-3].interm.intermNode), forLoop);
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyval.interm.intermNode), forLoop, (yyvsp[-6].lex).loc);
+ (yyval.interm.intermNode)->getAsAggregate()->setOperator(EOpSequence);
+ --parseContext.loopNestingLevel;
+ --parseContext.statementNestingLevel;
+ --parseContext.controlFlowNestingLevel;
+ }
+#line 10021 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 556:
+#line 3668 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10029 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 557:
+#line 3671 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10037 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 558:
+#line 3677 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 10045 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 559:
+#line 3680 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermTypedNode) = 0;
+ }
+#line 10053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 560:
+#line 3686 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-1].interm.intermTypedNode);
+ (yyval.interm.nodePair).node2 = 0;
+ }
+#line 10062 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 561:
+#line 3690 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermTypedNode);
+ (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermTypedNode);
+ }
+#line 10071 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 562:
+#line 3697 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (parseContext.loopNestingLevel <= 0)
+ parseContext.error((yyvsp[-1].lex).loc, "continue statement only allowed in loops", "", "");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpContinue, (yyvsp[-1].lex).loc);
+ }
+#line 10081 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 563:
+#line 3702 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0)
+ parseContext.error((yyvsp[-1].lex).loc, "break statement only allowed in switch and loops", "", "");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpBreak, (yyvsp[-1].lex).loc);
+ }
+#line 10091 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 564:
+#line 3707 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpReturn, (yyvsp[-1].lex).loc);
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid)
+ parseContext.error((yyvsp[-1].lex).loc, "non-void function must return a value", "return", "");
+ if (parseContext.inMain)
+ parseContext.postEntryPointReturn = true;
+ }
+#line 10103 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 565:
+#line 3714 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = parseContext.handleReturnValue((yyvsp[-2].lex).loc, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 10111 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 566:
+#line 3717 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "discard");
+ (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpKill, (yyvsp[-1].lex).loc);
+ }
+#line 10120 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 567:
+#line 3726 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ parseContext.intermediate.setTreeRoot((yyval.interm.intermNode));
+ }
+#line 10129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 568:
+#line 3730 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ if ((yyvsp[0].interm.intermNode) != nullptr) {
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode));
+ parseContext.intermediate.setTreeRoot((yyval.interm.intermNode));
+ }
+ }
+#line 10140 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 569:
+#line 3739 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10148 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 570:
+#line 3742 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode);
+ }
+#line 10156 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 571:
+#line 3745 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "extraneous semicolon");
+ parseContext.profileRequires((yyvsp[0].lex).loc, ~EEsProfile, 460, nullptr, "extraneous semicolon");
+ (yyval.interm.intermNode) = nullptr;
+ }
+#line 10166 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 572:
+#line 3753 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyvsp[0].interm).function = parseContext.handleFunctionDeclarator((yyvsp[0].interm).loc, *(yyvsp[0].interm).function, false /* not prototype */);
+ (yyvsp[0].interm).intermNode = parseContext.handleFunctionDefinition((yyvsp[0].interm).loc, *(yyvsp[0].interm).function);
+ }
+#line 10175 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 573:
+#line 3757 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ // May be best done as post process phase on intermediate code
+ if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue)
+ parseContext.error((yyvsp[-2].interm).loc, "function does not return a value:", "", (yyvsp[-2].interm).function->getName().c_str());
+ parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]);
+ (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermNode));
+ parseContext.intermediate.setAggregateOperator((yyval.interm.intermNode), EOpFunction, (yyvsp[-2].interm).function->getType(), (yyvsp[-2].interm).loc);
+ (yyval.interm.intermNode)->getAsAggregate()->setName((yyvsp[-2].interm).function->getMangledName().c_str());
+
+ // store the pragma information for debug and optimize and other vendor specific
+ // information. This information can be queried from the parse tree
+ (yyval.interm.intermNode)->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize);
+ (yyval.interm.intermNode)->getAsAggregate()->setDebug(parseContext.contextPragma.debug);
+ (yyval.interm.intermNode)->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable);
+ }
+#line 10195 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 574:
+#line 3775 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = (yyvsp[-2].interm.attributes);
+ parseContext.requireExtensions((yyvsp[-4].lex).loc, 1, &E_GL_EXT_control_flow_attributes, "attribute");
+ }
+#line 10204 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 575:
+#line 3781 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = (yyvsp[0].interm.attributes);
+ }
+#line 10212 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 576:
+#line 3784 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.mergeAttributes((yyvsp[-2].interm.attributes), (yyvsp[0].interm.attributes));
+ }
+#line 10220 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 577:
+#line 3789 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[0].lex).string);
+ }
+#line 10228 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+ case 578:
+#line 3792 "MachineIndependent/glslang.y" /* yacc.c:1646 */
+ {
+ (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[-3].lex).string, (yyvsp[-1].interm.intermTypedNode));
+ }
+#line 10236 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ break;
+
+
+#line 10240 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+
+ yyn = yyr1[yyn];
+
+ yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
+ if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
+ yystate = yytable[yystate];
+ else
+ yystate = yydefgoto[yyn - YYNTOKENS];
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (pParseContext, YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (pParseContext, yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval, pParseContext);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+
+ /* Pacify compilers like GCC when the user code never invokes
+ YYERROR and the label yyerrorlab therefore never appears in user
+ code. */
+ if (/*CONSTCOND*/ 0)
+ goto yyerrorlab;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp, pParseContext);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (pParseContext, YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval, pParseContext);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp, pParseContext);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
+#line 3796 "MachineIndependent/glslang.y" /* yacc.c:1906 */
+
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h b/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h
new file mode 100644
index 0000000..a467db6
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/glslang_tab.cpp.h
@@ -0,0 +1,509 @@
+/* A Bison parser, made by GNU Bison 3.0.4. */
+
+/* Bison interface for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int yydebug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ VARYING = 259,
+ FLOAT16_T = 260,
+ FLOAT = 261,
+ FLOAT32_T = 262,
+ DOUBLE = 263,
+ FLOAT64_T = 264,
+ CONST = 265,
+ BOOL = 266,
+ INT = 267,
+ UINT = 268,
+ INT64_T = 269,
+ UINT64_T = 270,
+ INT32_T = 271,
+ UINT32_T = 272,
+ INT16_T = 273,
+ UINT16_T = 274,
+ INT8_T = 275,
+ UINT8_T = 276,
+ BREAK = 277,
+ CONTINUE = 278,
+ DO = 279,
+ ELSE = 280,
+ FOR = 281,
+ IF = 282,
+ DISCARD = 283,
+ RETURN = 284,
+ SWITCH = 285,
+ CASE = 286,
+ DEFAULT = 287,
+ SUBROUTINE = 288,
+ BVEC2 = 289,
+ BVEC3 = 290,
+ BVEC4 = 291,
+ IVEC2 = 292,
+ IVEC3 = 293,
+ IVEC4 = 294,
+ UVEC2 = 295,
+ UVEC3 = 296,
+ UVEC4 = 297,
+ I64VEC2 = 298,
+ I64VEC3 = 299,
+ I64VEC4 = 300,
+ U64VEC2 = 301,
+ U64VEC3 = 302,
+ U64VEC4 = 303,
+ I32VEC2 = 304,
+ I32VEC3 = 305,
+ I32VEC4 = 306,
+ U32VEC2 = 307,
+ U32VEC3 = 308,
+ U32VEC4 = 309,
+ I16VEC2 = 310,
+ I16VEC3 = 311,
+ I16VEC4 = 312,
+ U16VEC2 = 313,
+ U16VEC3 = 314,
+ U16VEC4 = 315,
+ I8VEC2 = 316,
+ I8VEC3 = 317,
+ I8VEC4 = 318,
+ U8VEC2 = 319,
+ U8VEC3 = 320,
+ U8VEC4 = 321,
+ VEC2 = 322,
+ VEC3 = 323,
+ VEC4 = 324,
+ MAT2 = 325,
+ MAT3 = 326,
+ MAT4 = 327,
+ CENTROID = 328,
+ IN = 329,
+ OUT = 330,
+ INOUT = 331,
+ UNIFORM = 332,
+ PATCH = 333,
+ SAMPLE = 334,
+ BUFFER = 335,
+ SHARED = 336,
+ NONUNIFORM = 337,
+ PAYLOADNV = 338,
+ PAYLOADINNV = 339,
+ HITATTRNV = 340,
+ CALLDATANV = 341,
+ CALLDATAINNV = 342,
+ COHERENT = 343,
+ VOLATILE = 344,
+ RESTRICT = 345,
+ READONLY = 346,
+ WRITEONLY = 347,
+ DEVICECOHERENT = 348,
+ QUEUEFAMILYCOHERENT = 349,
+ WORKGROUPCOHERENT = 350,
+ SUBGROUPCOHERENT = 351,
+ NONPRIVATE = 352,
+ DVEC2 = 353,
+ DVEC3 = 354,
+ DVEC4 = 355,
+ DMAT2 = 356,
+ DMAT3 = 357,
+ DMAT4 = 358,
+ F16VEC2 = 359,
+ F16VEC3 = 360,
+ F16VEC4 = 361,
+ F16MAT2 = 362,
+ F16MAT3 = 363,
+ F16MAT4 = 364,
+ F32VEC2 = 365,
+ F32VEC3 = 366,
+ F32VEC4 = 367,
+ F32MAT2 = 368,
+ F32MAT3 = 369,
+ F32MAT4 = 370,
+ F64VEC2 = 371,
+ F64VEC3 = 372,
+ F64VEC4 = 373,
+ F64MAT2 = 374,
+ F64MAT3 = 375,
+ F64MAT4 = 376,
+ NOPERSPECTIVE = 377,
+ FLAT = 378,
+ SMOOTH = 379,
+ LAYOUT = 380,
+ EXPLICITINTERPAMD = 381,
+ PERVERTEXNV = 382,
+ PERPRIMITIVENV = 383,
+ PERVIEWNV = 384,
+ PERTASKNV = 385,
+ MAT2X2 = 386,
+ MAT2X3 = 387,
+ MAT2X4 = 388,
+ MAT3X2 = 389,
+ MAT3X3 = 390,
+ MAT3X4 = 391,
+ MAT4X2 = 392,
+ MAT4X3 = 393,
+ MAT4X4 = 394,
+ DMAT2X2 = 395,
+ DMAT2X3 = 396,
+ DMAT2X4 = 397,
+ DMAT3X2 = 398,
+ DMAT3X3 = 399,
+ DMAT3X4 = 400,
+ DMAT4X2 = 401,
+ DMAT4X3 = 402,
+ DMAT4X4 = 403,
+ F16MAT2X2 = 404,
+ F16MAT2X3 = 405,
+ F16MAT2X4 = 406,
+ F16MAT3X2 = 407,
+ F16MAT3X3 = 408,
+ F16MAT3X4 = 409,
+ F16MAT4X2 = 410,
+ F16MAT4X3 = 411,
+ F16MAT4X4 = 412,
+ F32MAT2X2 = 413,
+ F32MAT2X3 = 414,
+ F32MAT2X4 = 415,
+ F32MAT3X2 = 416,
+ F32MAT3X3 = 417,
+ F32MAT3X4 = 418,
+ F32MAT4X2 = 419,
+ F32MAT4X3 = 420,
+ F32MAT4X4 = 421,
+ F64MAT2X2 = 422,
+ F64MAT2X3 = 423,
+ F64MAT2X4 = 424,
+ F64MAT3X2 = 425,
+ F64MAT3X3 = 426,
+ F64MAT3X4 = 427,
+ F64MAT4X2 = 428,
+ F64MAT4X3 = 429,
+ F64MAT4X4 = 430,
+ ATOMIC_UINT = 431,
+ ACCSTRUCTNV = 432,
+ FCOOPMATNV = 433,
+ SAMPLER1D = 434,
+ SAMPLER2D = 435,
+ SAMPLER3D = 436,
+ SAMPLERCUBE = 437,
+ SAMPLER1DSHADOW = 438,
+ SAMPLER2DSHADOW = 439,
+ SAMPLERCUBESHADOW = 440,
+ SAMPLER1DARRAY = 441,
+ SAMPLER2DARRAY = 442,
+ SAMPLER1DARRAYSHADOW = 443,
+ SAMPLER2DARRAYSHADOW = 444,
+ ISAMPLER1D = 445,
+ ISAMPLER2D = 446,
+ ISAMPLER3D = 447,
+ ISAMPLERCUBE = 448,
+ ISAMPLER1DARRAY = 449,
+ ISAMPLER2DARRAY = 450,
+ USAMPLER1D = 451,
+ USAMPLER2D = 452,
+ USAMPLER3D = 453,
+ USAMPLERCUBE = 454,
+ USAMPLER1DARRAY = 455,
+ USAMPLER2DARRAY = 456,
+ SAMPLER2DRECT = 457,
+ SAMPLER2DRECTSHADOW = 458,
+ ISAMPLER2DRECT = 459,
+ USAMPLER2DRECT = 460,
+ SAMPLERBUFFER = 461,
+ ISAMPLERBUFFER = 462,
+ USAMPLERBUFFER = 463,
+ SAMPLERCUBEARRAY = 464,
+ SAMPLERCUBEARRAYSHADOW = 465,
+ ISAMPLERCUBEARRAY = 466,
+ USAMPLERCUBEARRAY = 467,
+ SAMPLER2DMS = 468,
+ ISAMPLER2DMS = 469,
+ USAMPLER2DMS = 470,
+ SAMPLER2DMSARRAY = 471,
+ ISAMPLER2DMSARRAY = 472,
+ USAMPLER2DMSARRAY = 473,
+ SAMPLEREXTERNALOES = 474,
+ SAMPLEREXTERNAL2DY2YEXT = 475,
+ F16SAMPLER1D = 476,
+ F16SAMPLER2D = 477,
+ F16SAMPLER3D = 478,
+ F16SAMPLER2DRECT = 479,
+ F16SAMPLERCUBE = 480,
+ F16SAMPLER1DARRAY = 481,
+ F16SAMPLER2DARRAY = 482,
+ F16SAMPLERCUBEARRAY = 483,
+ F16SAMPLERBUFFER = 484,
+ F16SAMPLER2DMS = 485,
+ F16SAMPLER2DMSARRAY = 486,
+ F16SAMPLER1DSHADOW = 487,
+ F16SAMPLER2DSHADOW = 488,
+ F16SAMPLER1DARRAYSHADOW = 489,
+ F16SAMPLER2DARRAYSHADOW = 490,
+ F16SAMPLER2DRECTSHADOW = 491,
+ F16SAMPLERCUBESHADOW = 492,
+ F16SAMPLERCUBEARRAYSHADOW = 493,
+ SAMPLER = 494,
+ SAMPLERSHADOW = 495,
+ TEXTURE1D = 496,
+ TEXTURE2D = 497,
+ TEXTURE3D = 498,
+ TEXTURECUBE = 499,
+ TEXTURE1DARRAY = 500,
+ TEXTURE2DARRAY = 501,
+ ITEXTURE1D = 502,
+ ITEXTURE2D = 503,
+ ITEXTURE3D = 504,
+ ITEXTURECUBE = 505,
+ ITEXTURE1DARRAY = 506,
+ ITEXTURE2DARRAY = 507,
+ UTEXTURE1D = 508,
+ UTEXTURE2D = 509,
+ UTEXTURE3D = 510,
+ UTEXTURECUBE = 511,
+ UTEXTURE1DARRAY = 512,
+ UTEXTURE2DARRAY = 513,
+ TEXTURE2DRECT = 514,
+ ITEXTURE2DRECT = 515,
+ UTEXTURE2DRECT = 516,
+ TEXTUREBUFFER = 517,
+ ITEXTUREBUFFER = 518,
+ UTEXTUREBUFFER = 519,
+ TEXTURECUBEARRAY = 520,
+ ITEXTURECUBEARRAY = 521,
+ UTEXTURECUBEARRAY = 522,
+ TEXTURE2DMS = 523,
+ ITEXTURE2DMS = 524,
+ UTEXTURE2DMS = 525,
+ TEXTURE2DMSARRAY = 526,
+ ITEXTURE2DMSARRAY = 527,
+ UTEXTURE2DMSARRAY = 528,
+ F16TEXTURE1D = 529,
+ F16TEXTURE2D = 530,
+ F16TEXTURE3D = 531,
+ F16TEXTURE2DRECT = 532,
+ F16TEXTURECUBE = 533,
+ F16TEXTURE1DARRAY = 534,
+ F16TEXTURE2DARRAY = 535,
+ F16TEXTURECUBEARRAY = 536,
+ F16TEXTUREBUFFER = 537,
+ F16TEXTURE2DMS = 538,
+ F16TEXTURE2DMSARRAY = 539,
+ SUBPASSINPUT = 540,
+ SUBPASSINPUTMS = 541,
+ ISUBPASSINPUT = 542,
+ ISUBPASSINPUTMS = 543,
+ USUBPASSINPUT = 544,
+ USUBPASSINPUTMS = 545,
+ F16SUBPASSINPUT = 546,
+ F16SUBPASSINPUTMS = 547,
+ IMAGE1D = 548,
+ IIMAGE1D = 549,
+ UIMAGE1D = 550,
+ IMAGE2D = 551,
+ IIMAGE2D = 552,
+ UIMAGE2D = 553,
+ IMAGE3D = 554,
+ IIMAGE3D = 555,
+ UIMAGE3D = 556,
+ IMAGE2DRECT = 557,
+ IIMAGE2DRECT = 558,
+ UIMAGE2DRECT = 559,
+ IMAGECUBE = 560,
+ IIMAGECUBE = 561,
+ UIMAGECUBE = 562,
+ IMAGEBUFFER = 563,
+ IIMAGEBUFFER = 564,
+ UIMAGEBUFFER = 565,
+ IMAGE1DARRAY = 566,
+ IIMAGE1DARRAY = 567,
+ UIMAGE1DARRAY = 568,
+ IMAGE2DARRAY = 569,
+ IIMAGE2DARRAY = 570,
+ UIMAGE2DARRAY = 571,
+ IMAGECUBEARRAY = 572,
+ IIMAGECUBEARRAY = 573,
+ UIMAGECUBEARRAY = 574,
+ IMAGE2DMS = 575,
+ IIMAGE2DMS = 576,
+ UIMAGE2DMS = 577,
+ IMAGE2DMSARRAY = 578,
+ IIMAGE2DMSARRAY = 579,
+ UIMAGE2DMSARRAY = 580,
+ F16IMAGE1D = 581,
+ F16IMAGE2D = 582,
+ F16IMAGE3D = 583,
+ F16IMAGE2DRECT = 584,
+ F16IMAGECUBE = 585,
+ F16IMAGE1DARRAY = 586,
+ F16IMAGE2DARRAY = 587,
+ F16IMAGECUBEARRAY = 588,
+ F16IMAGEBUFFER = 589,
+ F16IMAGE2DMS = 590,
+ F16IMAGE2DMSARRAY = 591,
+ STRUCT = 592,
+ VOID = 593,
+ WHILE = 594,
+ IDENTIFIER = 595,
+ TYPE_NAME = 596,
+ FLOATCONSTANT = 597,
+ DOUBLECONSTANT = 598,
+ INT16CONSTANT = 599,
+ UINT16CONSTANT = 600,
+ INT32CONSTANT = 601,
+ UINT32CONSTANT = 602,
+ INTCONSTANT = 603,
+ UINTCONSTANT = 604,
+ INT64CONSTANT = 605,
+ UINT64CONSTANT = 606,
+ BOOLCONSTANT = 607,
+ FLOAT16CONSTANT = 608,
+ LEFT_OP = 609,
+ RIGHT_OP = 610,
+ INC_OP = 611,
+ DEC_OP = 612,
+ LE_OP = 613,
+ GE_OP = 614,
+ EQ_OP = 615,
+ NE_OP = 616,
+ AND_OP = 617,
+ OR_OP = 618,
+ XOR_OP = 619,
+ MUL_ASSIGN = 620,
+ DIV_ASSIGN = 621,
+ ADD_ASSIGN = 622,
+ MOD_ASSIGN = 623,
+ LEFT_ASSIGN = 624,
+ RIGHT_ASSIGN = 625,
+ AND_ASSIGN = 626,
+ XOR_ASSIGN = 627,
+ OR_ASSIGN = 628,
+ SUB_ASSIGN = 629,
+ LEFT_PAREN = 630,
+ RIGHT_PAREN = 631,
+ LEFT_BRACKET = 632,
+ RIGHT_BRACKET = 633,
+ LEFT_BRACE = 634,
+ RIGHT_BRACE = 635,
+ DOT = 636,
+ COMMA = 637,
+ COLON = 638,
+ EQUAL = 639,
+ SEMICOLON = 640,
+ BANG = 641,
+ DASH = 642,
+ TILDE = 643,
+ PLUS = 644,
+ STAR = 645,
+ SLASH = 646,
+ PERCENT = 647,
+ LEFT_ANGLE = 648,
+ RIGHT_ANGLE = 649,
+ VERTICAL_BAR = 650,
+ CARET = 651,
+ AMPERSAND = 652,
+ QUESTION = 653,
+ INVARIANT = 654,
+ PRECISE = 655,
+ HIGH_PRECISION = 656,
+ MEDIUM_PRECISION = 657,
+ LOW_PRECISION = 658,
+ PRECISION = 659,
+ PACKED = 660,
+ RESOURCE = 661,
+ SUPERP = 662
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+
+union YYSTYPE
+{
+#line 71 "MachineIndependent/glslang.y" /* yacc.c:1909 */
+
+ struct {
+ glslang::TSourceLoc loc;
+ union {
+ glslang::TString *string;
+ int i;
+ unsigned int u;
+ long long i64;
+ unsigned long long u64;
+ bool b;
+ double d;
+ };
+ glslang::TSymbol* symbol;
+ } lex;
+ struct {
+ glslang::TSourceLoc loc;
+ glslang::TOperator op;
+ union {
+ TIntermNode* intermNode;
+ glslang::TIntermNodePair nodePair;
+ glslang::TIntermTyped* intermTypedNode;
+ glslang::TAttributes* attributes;
+ };
+ union {
+ glslang::TPublicType type;
+ glslang::TFunction* function;
+ glslang::TParameter param;
+ glslang::TTypeLoc typeLine;
+ glslang::TTypeList* typeList;
+ glslang::TArraySizes* arraySizes;
+ glslang::TIdentifierList* identifierList;
+ };
+ glslang::TArraySizes* typeParameters;
+ } interm;
+
+#line 498 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
+};
+
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+
+
+int yyparse (glslang::TParseContext* pParseContext);
+
+#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/intermOut.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/intermOut.cpp
new file mode 100644
index 0000000..a2c3627
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/intermOut.cpp
@@ -0,0 +1,1518 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "localintermediate.h"
+#include "../Include/InfoSink.h"
+
+#ifdef _MSC_VER
+#include <cfloat>
+#else
+#include <cmath>
+#endif
+#include <cstdint>
+
+namespace {
+
+bool IsInfinity(double x) {
+#ifdef _MSC_VER
+ switch (_fpclass(x)) {
+ case _FPCLASS_NINF:
+ case _FPCLASS_PINF:
+ return true;
+ default:
+ return false;
+ }
+#else
+ return std::isinf(x);
+#endif
+}
+
+bool IsNan(double x) {
+#ifdef _MSC_VER
+ switch (_fpclass(x)) {
+ case _FPCLASS_SNAN:
+ case _FPCLASS_QNAN:
+ return true;
+ default:
+ return false;
+ }
+#else
+ return std::isnan(x);
+#endif
+}
+
+}
+
+namespace glslang {
+
+//
+// Two purposes:
+// 1. Show an example of how to iterate tree. Functions can
+// also directly call Traverse() on children themselves to
+// have finer grained control over the process than shown here.
+// See the last function for how to get started.
+// 2. Print out a text based description of the tree.
+//
+
+//
+// Use this class to carry along data from node to node in
+// the traversal
+//
+class TOutputTraverser : public TIntermTraverser {
+public:
+ TOutputTraverser(TInfoSink& i) : infoSink(i), extraOutput(NoExtraOutput) { }
+
+ enum EExtraOutput {
+ NoExtraOutput,
+ BinaryDoubleOutput
+ };
+ void setDoubleOutput(EExtraOutput extra) { extraOutput = extra; }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual bool visitUnary(TVisit, TIntermUnary* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+ virtual bool visitSelection(TVisit, TIntermSelection* node);
+ virtual void visitConstantUnion(TIntermConstantUnion* node);
+ virtual void visitSymbol(TIntermSymbol* node);
+ virtual bool visitLoop(TVisit, TIntermLoop* node);
+ virtual bool visitBranch(TVisit, TIntermBranch* node);
+ virtual bool visitSwitch(TVisit, TIntermSwitch* node);
+
+ TInfoSink& infoSink;
+protected:
+ TOutputTraverser(TOutputTraverser&);
+ TOutputTraverser& operator=(TOutputTraverser&);
+
+ EExtraOutput extraOutput;
+};
+
+//
+// Helper functions for printing, not part of traversing.
+//
+
+static void OutputTreeText(TInfoSink& infoSink, const TIntermNode* node, const int depth)
+{
+ int i;
+
+ infoSink.debug << node->getLoc().string << ":";
+ if (node->getLoc().line)
+ infoSink.debug << node->getLoc().line;
+ else
+ infoSink.debug << "? ";
+
+ for (i = 0; i < depth; ++i)
+ infoSink.debug << " ";
+}
+
+//
+// The rest of the file are the traversal functions. The last one
+// is the one that starts the traversal.
+//
+// Return true from interior nodes to have the external traversal
+// continue on to children. If you process children yourself,
+// return false.
+//
+
+bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpAssign: out.debug << "move second child to first child"; break;
+ case EOpAddAssign: out.debug << "add second child into first child"; break;
+ case EOpSubAssign: out.debug << "subtract second child into first child"; break;
+ case EOpMulAssign: out.debug << "multiply second child into first child"; break;
+ case EOpVectorTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break;
+ case EOpVectorTimesScalarAssign: out.debug << "vector scale second child into first child"; break;
+ case EOpMatrixTimesScalarAssign: out.debug << "matrix scale second child into first child"; break;
+ case EOpMatrixTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break;
+ case EOpDivAssign: out.debug << "divide second child into first child"; break;
+ case EOpModAssign: out.debug << "mod second child into first child"; break;
+ case EOpAndAssign: out.debug << "and second child into first child"; break;
+ case EOpInclusiveOrAssign: out.debug << "or second child into first child"; break;
+ case EOpExclusiveOrAssign: out.debug << "exclusive or second child into first child"; break;
+ case EOpLeftShiftAssign: out.debug << "left shift second child into first child"; break;
+ case EOpRightShiftAssign: out.debug << "right shift second child into first child"; break;
+
+ case EOpIndexDirect: out.debug << "direct index"; break;
+ case EOpIndexIndirect: out.debug << "indirect index"; break;
+ case EOpIndexDirectStruct:
+ {
+ bool reference = node->getLeft()->getType().getBasicType() == EbtReference;
+ const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct();
+ out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
+ out.debug << ": direct index for structure"; break;
+ }
+ case EOpVectorSwizzle: out.debug << "vector swizzle"; break;
+ case EOpMatrixSwizzle: out.debug << "matrix swizzle"; break;
+
+ case EOpAdd: out.debug << "add"; break;
+ case EOpSub: out.debug << "subtract"; break;
+ case EOpMul: out.debug << "component-wise multiply"; break;
+ case EOpDiv: out.debug << "divide"; break;
+ case EOpMod: out.debug << "mod"; break;
+ case EOpRightShift: out.debug << "right-shift"; break;
+ case EOpLeftShift: out.debug << "left-shift"; break;
+ case EOpAnd: out.debug << "bitwise and"; break;
+ case EOpInclusiveOr: out.debug << "inclusive-or"; break;
+ case EOpExclusiveOr: out.debug << "exclusive-or"; break;
+ case EOpEqual: out.debug << "Compare Equal"; break;
+ case EOpNotEqual: out.debug << "Compare Not Equal"; break;
+ case EOpLessThan: out.debug << "Compare Less Than"; break;
+ case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
+ case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break;
+ case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break;
+ case EOpVectorEqual: out.debug << "Equal"; break;
+ case EOpVectorNotEqual: out.debug << "NotEqual"; break;
+
+ case EOpVectorTimesScalar: out.debug << "vector-scale"; break;
+ case EOpVectorTimesMatrix: out.debug << "vector-times-matrix"; break;
+ case EOpMatrixTimesVector: out.debug << "matrix-times-vector"; break;
+ case EOpMatrixTimesScalar: out.debug << "matrix-scale"; break;
+ case EOpMatrixTimesMatrix: out.debug << "matrix-multiply"; break;
+
+ case EOpLogicalOr: out.debug << "logical-or"; break;
+ case EOpLogicalXor: out.debug << "logical-xor"; break;
+ case EOpLogicalAnd: out.debug << "logical-and"; break;
+
+ default: out.debug << "<unknown op>";
+ }
+
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpNegative: out.debug << "Negate value"; break;
+ case EOpVectorLogicalNot:
+ case EOpLogicalNot: out.debug << "Negate conditional"; break;
+ case EOpBitwiseNot: out.debug << "Bitwise not"; break;
+
+ case EOpPostIncrement: out.debug << "Post-Increment"; break;
+ case EOpPostDecrement: out.debug << "Post-Decrement"; break;
+ case EOpPreIncrement: out.debug << "Pre-Increment"; break;
+ case EOpPreDecrement: out.debug << "Pre-Decrement"; break;
+
+ // * -> bool
+ case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break;
+ case EOpConvUint8ToBool: out.debug << "Convert uint8_t to bool"; break;
+ case EOpConvInt16ToBool: out.debug << "Convert int16_t to bool"; break;
+ case EOpConvUint16ToBool: out.debug << "Convert uint16_t to bool";break;
+ case EOpConvIntToBool: out.debug << "Convert int to bool"; break;
+ case EOpConvUintToBool: out.debug << "Convert uint to bool"; break;
+ case EOpConvInt64ToBool: out.debug << "Convert int64 to bool"; break;
+ case EOpConvUint64ToBool: out.debug << "Convert uint64 to bool"; break;
+ case EOpConvFloat16ToBool: out.debug << "Convert float16_t to bool"; break;
+ case EOpConvFloatToBool: out.debug << "Convert float to bool"; break;
+ case EOpConvDoubleToBool: out.debug << "Convert double to bool"; break;
+
+ // bool -> *
+ case EOpConvBoolToInt8: out.debug << "Convert bool to int8_t"; break;
+ case EOpConvBoolToUint8: out.debug << "Convert bool to uint8_t"; break;
+ case EOpConvBoolToInt16: out.debug << "Convert bool to in16t_t"; break;
+ case EOpConvBoolToUint16: out.debug << "Convert bool to uint16_t";break;
+ case EOpConvBoolToInt: out.debug << "Convert bool to int" ; break;
+ case EOpConvBoolToUint: out.debug << "Convert bool to uint"; break;
+ case EOpConvBoolToInt64: out.debug << "Convert bool to int64"; break;
+ case EOpConvBoolToUint64: out.debug << "Convert bool to uint64";break;
+ case EOpConvBoolToFloat16: out.debug << "Convert bool to float16_t"; break;
+ case EOpConvBoolToFloat: out.debug << "Convert bool to float"; break;
+ case EOpConvBoolToDouble: out.debug << "Convert bool to double"; break;
+
+ // int8_t -> (u)int*
+ case EOpConvInt8ToInt16: out.debug << "Convert int8_t to int16_t";break;
+ case EOpConvInt8ToInt: out.debug << "Convert int8_t to int"; break;
+ case EOpConvInt8ToInt64: out.debug << "Convert int8_t to int64"; break;
+ case EOpConvInt8ToUint8: out.debug << "Convert int8_t to uint8_t";break;
+ case EOpConvInt8ToUint16: out.debug << "Convert int8_t to uint16_t";break;
+ case EOpConvInt8ToUint: out.debug << "Convert int8_t to uint"; break;
+ case EOpConvInt8ToUint64: out.debug << "Convert int8_t to uint64"; break;
+
+ // uint8_t -> (u)int*
+ case EOpConvUint8ToInt8: out.debug << "Convert uint8_t to int8_t";break;
+ case EOpConvUint8ToInt16: out.debug << "Convert uint8_t to int16_t";break;
+ case EOpConvUint8ToInt: out.debug << "Convert uint8_t to int"; break;
+ case EOpConvUint8ToInt64: out.debug << "Convert uint8_t to int64"; break;
+ case EOpConvUint8ToUint16: out.debug << "Convert uint8_t to uint16_t";break;
+ case EOpConvUint8ToUint: out.debug << "Convert uint8_t to uint"; break;
+ case EOpConvUint8ToUint64: out.debug << "Convert uint8_t to uint64"; break;
+
+ // int8_t -> float*
+ case EOpConvInt8ToFloat16: out.debug << "Convert int8_t to float16_t";break;
+ case EOpConvInt8ToFloat: out.debug << "Convert int8_t to float"; break;
+ case EOpConvInt8ToDouble: out.debug << "Convert int8_t to double"; break;
+
+ // uint8_t -> float*
+ case EOpConvUint8ToFloat16: out.debug << "Convert uint8_t to float16_t";break;
+ case EOpConvUint8ToFloat: out.debug << "Convert uint8_t to float"; break;
+ case EOpConvUint8ToDouble: out.debug << "Convert uint8_t to double"; break;
+
+ // int16_t -> (u)int*
+ case EOpConvInt16ToInt8: out.debug << "Convert int16_t to int8_t";break;
+ case EOpConvInt16ToInt: out.debug << "Convert int16_t to int"; break;
+ case EOpConvInt16ToInt64: out.debug << "Convert int16_t to int64"; break;
+ case EOpConvInt16ToUint8: out.debug << "Convert int16_t to uint8_t";break;
+ case EOpConvInt16ToUint16: out.debug << "Convert int16_t to uint16_t";break;
+ case EOpConvInt16ToUint: out.debug << "Convert int16_t to uint"; break;
+ case EOpConvInt16ToUint64: out.debug << "Convert int16_t to uint64"; break;
+
+ // int16_t -> float*
+ case EOpConvInt16ToFloat16: out.debug << "Convert int16_t to float16_t";break;
+ case EOpConvInt16ToFloat: out.debug << "Convert int16_t to float"; break;
+ case EOpConvInt16ToDouble: out.debug << "Convert int16_t to double"; break;
+
+ // uint16_t -> (u)int*
+ case EOpConvUint16ToInt8: out.debug << "Convert uint16_t to int8_t";break;
+ case EOpConvUint16ToInt16: out.debug << "Convert uint16_t to int16_t";break;
+ case EOpConvUint16ToInt: out.debug << "Convert uint16_t to int"; break;
+ case EOpConvUint16ToInt64: out.debug << "Convert uint16_t to int64"; break;
+ case EOpConvUint16ToUint8: out.debug << "Convert uint16_t to uint8_t";break;
+ case EOpConvUint16ToUint: out.debug << "Convert uint16_t to uint"; break;
+ case EOpConvUint16ToUint64: out.debug << "Convert uint16_t to uint64"; break;
+
+ // uint16_t -> float*
+ case EOpConvUint16ToFloat16: out.debug << "Convert uint16_t to float16_t";break;
+ case EOpConvUint16ToFloat: out.debug << "Convert uint16_t to float"; break;
+ case EOpConvUint16ToDouble: out.debug << "Convert uint16_t to double"; break;
+
+ // int32_t -> (u)int*
+ case EOpConvIntToInt8: out.debug << "Convert int to int8_t";break;
+ case EOpConvIntToInt16: out.debug << "Convert int to int16_t";break;
+ case EOpConvIntToInt64: out.debug << "Convert int to int64"; break;
+ case EOpConvIntToUint8: out.debug << "Convert int to uint8_t";break;
+ case EOpConvIntToUint16: out.debug << "Convert int to uint16_t";break;
+ case EOpConvIntToUint: out.debug << "Convert int to uint"; break;
+ case EOpConvIntToUint64: out.debug << "Convert int to uint64"; break;
+
+ // int32_t -> float*
+ case EOpConvIntToFloat16: out.debug << "Convert int to float16_t";break;
+ case EOpConvIntToFloat: out.debug << "Convert int to float"; break;
+ case EOpConvIntToDouble: out.debug << "Convert int to double"; break;
+
+ // uint32_t -> (u)int*
+ case EOpConvUintToInt8: out.debug << "Convert uint to int8_t";break;
+ case EOpConvUintToInt16: out.debug << "Convert uint to int16_t";break;
+ case EOpConvUintToInt: out.debug << "Convert uint to int";break;
+ case EOpConvUintToInt64: out.debug << "Convert uint to int64"; break;
+ case EOpConvUintToUint8: out.debug << "Convert uint to uint8_t";break;
+ case EOpConvUintToUint16: out.debug << "Convert uint to uint16_t";break;
+ case EOpConvUintToUint64: out.debug << "Convert uint to uint64"; break;
+
+ // uint32_t -> float*
+ case EOpConvUintToFloat16: out.debug << "Convert uint to float16_t";break;
+ case EOpConvUintToFloat: out.debug << "Convert uint to float"; break;
+ case EOpConvUintToDouble: out.debug << "Convert uint to double"; break;
+
+ // int64 -> (u)int*
+ case EOpConvInt64ToInt8: out.debug << "Convert int64 to int8_t"; break;
+ case EOpConvInt64ToInt16: out.debug << "Convert int64 to int16_t"; break;
+ case EOpConvInt64ToInt: out.debug << "Convert int64 to int"; break;
+ case EOpConvInt64ToUint8: out.debug << "Convert int64 to uint8_t";break;
+ case EOpConvInt64ToUint16: out.debug << "Convert int64 to uint16_t";break;
+ case EOpConvInt64ToUint: out.debug << "Convert int64 to uint"; break;
+ case EOpConvInt64ToUint64: out.debug << "Convert int64 to uint64"; break;
+
+ // int64 -> float*
+ case EOpConvInt64ToFloat16: out.debug << "Convert int64 to float16_t";break;
+ case EOpConvInt64ToFloat: out.debug << "Convert int64 to float"; break;
+ case EOpConvInt64ToDouble: out.debug << "Convert int64 to double"; break;
+
+ // uint64 -> (u)int*
+ case EOpConvUint64ToInt8: out.debug << "Convert uint64 to int8_t";break;
+ case EOpConvUint64ToInt16: out.debug << "Convert uint64 to int16_t";break;
+ case EOpConvUint64ToInt: out.debug << "Convert uint64 to int"; break;
+ case EOpConvUint64ToInt64: out.debug << "Convert uint64 to int64"; break;
+ case EOpConvUint64ToUint8: out.debug << "Convert uint64 to uint8_t";break;
+ case EOpConvUint64ToUint16: out.debug << "Convert uint64 to uint16"; break;
+ case EOpConvUint64ToUint: out.debug << "Convert uint64 to uint"; break;
+
+ // uint64 -> float*
+ case EOpConvUint64ToFloat16: out.debug << "Convert uint64 to float16_t";break;
+ case EOpConvUint64ToFloat: out.debug << "Convert uint64 to float"; break;
+ case EOpConvUint64ToDouble: out.debug << "Convert uint64 to double"; break;
+
+ // float16_t -> int*
+ case EOpConvFloat16ToInt8: out.debug << "Convert float16_t to int8_t"; break;
+ case EOpConvFloat16ToInt16: out.debug << "Convert float16_t to int16_t"; break;
+ case EOpConvFloat16ToInt: out.debug << "Convert float16_t to int"; break;
+ case EOpConvFloat16ToInt64: out.debug << "Convert float16_t to int64"; break;
+
+ // float16_t -> uint*
+ case EOpConvFloat16ToUint8: out.debug << "Convert float16_t to uint8_t"; break;
+ case EOpConvFloat16ToUint16: out.debug << "Convert float16_t to uint16_t"; break;
+ case EOpConvFloat16ToUint: out.debug << "Convert float16_t to uint"; break;
+ case EOpConvFloat16ToUint64: out.debug << "Convert float16_t to uint64"; break;
+
+ // float16_t -> float*
+ case EOpConvFloat16ToFloat: out.debug << "Convert float16_t to float"; break;
+ case EOpConvFloat16ToDouble: out.debug << "Convert float16_t to double"; break;
+
+ // float32 -> float*
+ case EOpConvFloatToFloat16: out.debug << "Convert float to float16_t"; break;
+ case EOpConvFloatToDouble: out.debug << "Convert float to double"; break;
+
+ // float32_t -> int*
+ case EOpConvFloatToInt8: out.debug << "Convert float to int8_t"; break;
+ case EOpConvFloatToInt16: out.debug << "Convert float to int16_t"; break;
+ case EOpConvFloatToInt: out.debug << "Convert float to int"; break;
+ case EOpConvFloatToInt64: out.debug << "Convert float to int64"; break;
+
+ // float32_t -> uint*
+ case EOpConvFloatToUint8: out.debug << "Convert float to uint8_t"; break;
+ case EOpConvFloatToUint16: out.debug << "Convert float to uint16_t"; break;
+ case EOpConvFloatToUint: out.debug << "Convert float to uint"; break;
+ case EOpConvFloatToUint64: out.debug << "Convert float to uint64"; break;
+
+ // double -> float*
+ case EOpConvDoubleToFloat16: out.debug << "Convert double to float16_t"; break;
+ case EOpConvDoubleToFloat: out.debug << "Convert double to float"; break;
+
+ // double -> int*
+ case EOpConvDoubleToInt8: out.debug << "Convert double to int8_t"; break;
+ case EOpConvDoubleToInt16: out.debug << "Convert double to int16_t"; break;
+ case EOpConvDoubleToInt: out.debug << "Convert double to int"; break;
+ case EOpConvDoubleToInt64: out.debug << "Convert double to int64"; break;
+
+ // float32_t -> uint*
+ case EOpConvDoubleToUint8: out.debug << "Convert double to uint8_t"; break;
+ case EOpConvDoubleToUint16: out.debug << "Convert double to uint16_t"; break;
+ case EOpConvDoubleToUint: out.debug << "Convert double to uint"; break;
+ case EOpConvDoubleToUint64: out.debug << "Convert double to uint64"; break;
+
+ case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break;
+ case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break;
+
+ case EOpRadians: out.debug << "radians"; break;
+ case EOpDegrees: out.debug << "degrees"; break;
+ case EOpSin: out.debug << "sine"; break;
+ case EOpCos: out.debug << "cosine"; break;
+ case EOpTan: out.debug << "tangent"; break;
+ case EOpAsin: out.debug << "arc sine"; break;
+ case EOpAcos: out.debug << "arc cosine"; break;
+ case EOpAtan: out.debug << "arc tangent"; break;
+ case EOpSinh: out.debug << "hyp. sine"; break;
+ case EOpCosh: out.debug << "hyp. cosine"; break;
+ case EOpTanh: out.debug << "hyp. tangent"; break;
+ case EOpAsinh: out.debug << "arc hyp. sine"; break;
+ case EOpAcosh: out.debug << "arc hyp. cosine"; break;
+ case EOpAtanh: out.debug << "arc hyp. tangent"; break;
+
+ case EOpExp: out.debug << "exp"; break;
+ case EOpLog: out.debug << "log"; break;
+ case EOpExp2: out.debug << "exp2"; break;
+ case EOpLog2: out.debug << "log2"; break;
+ case EOpSqrt: out.debug << "sqrt"; break;
+ case EOpInverseSqrt: out.debug << "inverse sqrt"; break;
+
+ case EOpAbs: out.debug << "Absolute value"; break;
+ case EOpSign: out.debug << "Sign"; break;
+ case EOpFloor: out.debug << "Floor"; break;
+ case EOpTrunc: out.debug << "trunc"; break;
+ case EOpRound: out.debug << "round"; break;
+ case EOpRoundEven: out.debug << "roundEven"; break;
+ case EOpCeil: out.debug << "Ceiling"; break;
+ case EOpFract: out.debug << "Fraction"; break;
+
+ case EOpIsNan: out.debug << "isnan"; break;
+ case EOpIsInf: out.debug << "isinf"; break;
+
+ case EOpFloatBitsToInt: out.debug << "floatBitsToInt"; break;
+ case EOpFloatBitsToUint:out.debug << "floatBitsToUint"; break;
+ case EOpIntBitsToFloat: out.debug << "intBitsToFloat"; break;
+ case EOpUintBitsToFloat:out.debug << "uintBitsToFloat"; break;
+ case EOpDoubleBitsToInt64: out.debug << "doubleBitsToInt64"; break;
+ case EOpDoubleBitsToUint64: out.debug << "doubleBitsToUint64"; break;
+ case EOpInt64BitsToDouble: out.debug << "int64BitsToDouble"; break;
+ case EOpUint64BitsToDouble: out.debug << "uint64BitsToDouble"; break;
+ case EOpFloat16BitsToInt16: out.debug << "float16BitsToInt16"; break;
+ case EOpFloat16BitsToUint16: out.debug << "float16BitsToUint16"; break;
+ case EOpInt16BitsToFloat16: out.debug << "int16BitsToFloat16"; break;
+ case EOpUint16BitsToFloat16: out.debug << "uint16BitsToFloat16"; break;
+
+ case EOpPackSnorm2x16: out.debug << "packSnorm2x16"; break;
+ case EOpUnpackSnorm2x16:out.debug << "unpackSnorm2x16"; break;
+ case EOpPackUnorm2x16: out.debug << "packUnorm2x16"; break;
+ case EOpUnpackUnorm2x16:out.debug << "unpackUnorm2x16"; break;
+ case EOpPackHalf2x16: out.debug << "packHalf2x16"; break;
+ case EOpUnpackHalf2x16: out.debug << "unpackHalf2x16"; break;
+ case EOpPack16: out.debug << "pack16"; break;
+ case EOpPack32: out.debug << "pack32"; break;
+ case EOpPack64: out.debug << "pack64"; break;
+ case EOpUnpack32: out.debug << "unpack32"; break;
+ case EOpUnpack16: out.debug << "unpack16"; break;
+ case EOpUnpack8: out.debug << "unpack8"; break;
+
+ case EOpPackSnorm4x8: out.debug << "PackSnorm4x8"; break;
+ case EOpUnpackSnorm4x8: out.debug << "UnpackSnorm4x8"; break;
+ case EOpPackUnorm4x8: out.debug << "PackUnorm4x8"; break;
+ case EOpUnpackUnorm4x8: out.debug << "UnpackUnorm4x8"; break;
+ case EOpPackDouble2x32: out.debug << "PackDouble2x32"; break;
+ case EOpUnpackDouble2x32: out.debug << "UnpackDouble2x32"; break;
+
+ case EOpPackInt2x32: out.debug << "packInt2x32"; break;
+ case EOpUnpackInt2x32: out.debug << "unpackInt2x32"; break;
+ case EOpPackUint2x32: out.debug << "packUint2x32"; break;
+ case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break;
+
+ case EOpPackInt2x16: out.debug << "packInt2x16"; break;
+ case EOpUnpackInt2x16: out.debug << "unpackInt2x16"; break;
+ case EOpPackUint2x16: out.debug << "packUint2x16"; break;
+ case EOpUnpackUint2x16: out.debug << "unpackUint2x16"; break;
+
+ case EOpPackInt4x16: out.debug << "packInt4x16"; break;
+ case EOpUnpackInt4x16: out.debug << "unpackInt4x16"; break;
+ case EOpPackUint4x16: out.debug << "packUint4x16"; break;
+ case EOpUnpackUint4x16: out.debug << "unpackUint4x16"; break;
+ case EOpPackFloat2x16: out.debug << "packFloat2x16"; break;
+ case EOpUnpackFloat2x16: out.debug << "unpackFloat2x16"; break;
+
+ case EOpLength: out.debug << "length"; break;
+ case EOpNormalize: out.debug << "normalize"; break;
+ case EOpDPdx: out.debug << "dPdx"; break;
+ case EOpDPdy: out.debug << "dPdy"; break;
+ case EOpFwidth: out.debug << "fwidth"; break;
+ case EOpDPdxFine: out.debug << "dPdxFine"; break;
+ case EOpDPdyFine: out.debug << "dPdyFine"; break;
+ case EOpFwidthFine: out.debug << "fwidthFine"; break;
+ case EOpDPdxCoarse: out.debug << "dPdxCoarse"; break;
+ case EOpDPdyCoarse: out.debug << "dPdyCoarse"; break;
+ case EOpFwidthCoarse: out.debug << "fwidthCoarse"; break;
+
+ case EOpInterpolateAtCentroid: out.debug << "interpolateAtCentroid"; break;
+
+ case EOpDeterminant: out.debug << "determinant"; break;
+ case EOpMatrixInverse: out.debug << "inverse"; break;
+ case EOpTranspose: out.debug << "transpose"; break;
+
+ case EOpAny: out.debug << "any"; break;
+ case EOpAll: out.debug << "all"; break;
+
+ case EOpArrayLength: out.debug << "array length"; break;
+
+ case EOpEmitStreamVertex: out.debug << "EmitStreamVertex"; break;
+ case EOpEndStreamPrimitive: out.debug << "EndStreamPrimitive"; break;
+
+ case EOpAtomicCounterIncrement: out.debug << "AtomicCounterIncrement";break;
+ case EOpAtomicCounterDecrement: out.debug << "AtomicCounterDecrement";break;
+ case EOpAtomicCounter: out.debug << "AtomicCounter"; break;
+
+ case EOpTextureQuerySize: out.debug << "textureSize"; break;
+ case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
+ case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break;
+ case EOpTextureQuerySamples: out.debug << "textureSamples"; break;
+ case EOpImageQuerySize: out.debug << "imageQuerySize"; break;
+ case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break;
+ case EOpImageLoad: out.debug << "imageLoad"; break;
+
+ case EOpBitFieldReverse: out.debug << "bitFieldReverse"; break;
+ case EOpBitCount: out.debug << "bitCount"; break;
+ case EOpFindLSB: out.debug << "findLSB"; break;
+ case EOpFindMSB: out.debug << "findMSB"; break;
+
+ case EOpNoise: out.debug << "noise"; break;
+
+ case EOpBallot: out.debug << "ballot"; break;
+ case EOpReadFirstInvocation: out.debug << "readFirstInvocation"; break;
+
+ case EOpAnyInvocation: out.debug << "anyInvocation"; break;
+ case EOpAllInvocations: out.debug << "allInvocations"; break;
+ case EOpAllInvocationsEqual: out.debug << "allInvocationsEqual"; break;
+
+ case EOpSubgroupElect: out.debug << "subgroupElect"; break;
+ case EOpSubgroupAll: out.debug << "subgroupAll"; break;
+ case EOpSubgroupAny: out.debug << "subgroupAny"; break;
+ case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break;
+ case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break;
+ case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break;
+ case EOpSubgroupBallot: out.debug << "subgroupBallot"; break;
+ case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break;
+ case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break;
+ case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break;
+ case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break;
+ case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break;
+ case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break;
+ case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break;
+ case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break;
+ case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break;
+ case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break;
+ case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break;
+ case EOpSubgroupAdd: out.debug << "subgroupAdd"; break;
+ case EOpSubgroupMul: out.debug << "subgroupMul"; break;
+ case EOpSubgroupMin: out.debug << "subgroupMin"; break;
+ case EOpSubgroupMax: out.debug << "subgroupMax"; break;
+ case EOpSubgroupAnd: out.debug << "subgroupAnd"; break;
+ case EOpSubgroupOr: out.debug << "subgroupOr"; break;
+ case EOpSubgroupXor: out.debug << "subgroupXor"; break;
+ case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break;
+ case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break;
+ case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break;
+ case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break;
+ case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break;
+ case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break;
+ case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break;
+ case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break;
+ case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break;
+ case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break;
+ case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break;
+ case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break;
+ case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break;
+ case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break;
+ case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break;
+ case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break;
+ case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break;
+ case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break;
+ case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break;
+ case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break;
+ case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break;
+ case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break;
+ case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break;
+ case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
+ case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
+
+#ifdef NV_EXTENSIONS
+ case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
+ case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
+ case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
+ case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break;
+ case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break;
+ case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break;
+ case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break;
+ case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break;
+ case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break;
+ case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break;
+ case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break;
+ case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break;
+ case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break;
+ case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break;
+ case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break;
+ case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break;
+ case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break;
+ case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break;
+ case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break;
+ case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
+ case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
+ case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
+#endif
+
+ case EOpClip: out.debug << "clip"; break;
+ case EOpIsFinite: out.debug << "isfinite"; break;
+ case EOpLog10: out.debug << "log10"; break;
+ case EOpRcp: out.debug << "rcp"; break;
+ case EOpSaturate: out.debug << "saturate"; break;
+
+ case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break;
+
+#ifdef AMD_EXTENSIONS
+ case EOpMinInvocations: out.debug << "minInvocations"; break;
+ case EOpMaxInvocations: out.debug << "maxInvocations"; break;
+ case EOpAddInvocations: out.debug << "addInvocations"; break;
+ case EOpMinInvocationsNonUniform: out.debug << "minInvocationsNonUniform"; break;
+ case EOpMaxInvocationsNonUniform: out.debug << "maxInvocationsNonUniform"; break;
+ case EOpAddInvocationsNonUniform: out.debug << "addInvocationsNonUniform"; break;
+
+ case EOpMinInvocationsInclusiveScan: out.debug << "minInvocationsInclusiveScan"; break;
+ case EOpMaxInvocationsInclusiveScan: out.debug << "maxInvocationsInclusiveScan"; break;
+ case EOpAddInvocationsInclusiveScan: out.debug << "addInvocationsInclusiveScan"; break;
+ case EOpMinInvocationsInclusiveScanNonUniform: out.debug << "minInvocationsInclusiveScanNonUniform"; break;
+ case EOpMaxInvocationsInclusiveScanNonUniform: out.debug << "maxInvocationsInclusiveScanNonUniform"; break;
+ case EOpAddInvocationsInclusiveScanNonUniform: out.debug << "addInvocationsInclusiveScanNonUniform"; break;
+
+ case EOpMinInvocationsExclusiveScan: out.debug << "minInvocationsExclusiveScan"; break;
+ case EOpMaxInvocationsExclusiveScan: out.debug << "maxInvocationsExclusiveScan"; break;
+ case EOpAddInvocationsExclusiveScan: out.debug << "addInvocationsExclusiveScan"; break;
+ case EOpMinInvocationsExclusiveScanNonUniform: out.debug << "minInvocationsExclusiveScanNonUniform"; break;
+ case EOpMaxInvocationsExclusiveScanNonUniform: out.debug << "maxInvocationsExclusiveScanNonUniform"; break;
+ case EOpAddInvocationsExclusiveScanNonUniform: out.debug << "addInvocationsExclusiveScanNonUniform"; break;
+
+ case EOpMbcnt: out.debug << "mbcnt"; break;
+
+ case EOpFragmentMaskFetch: out.debug << "fragmentMaskFetchAMD"; break;
+ case EOpFragmentFetch: out.debug << "fragmentFetchAMD"; break;
+
+ case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
+ case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
+#endif
+
+ case EOpSubpassLoad: out.debug << "subpassLoad"; break;
+ case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
+
+ case EOpConstructReference: out.debug << "Construct reference type"; break;
+
+ default: out.debug.message(EPrefixError, "Bad unary op");
+ }
+
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ TInfoSink& out = infoSink;
+
+ if (node->getOp() == EOpNull) {
+ out.debug.message(EPrefixError, "node is still EOpNull!");
+ return true;
+ }
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getOp()) {
+ case EOpSequence: out.debug << "Sequence\n"; return true;
+ case EOpLinkerObjects: out.debug << "Linker Objects\n"; return true;
+ case EOpComma: out.debug << "Comma"; break;
+ case EOpFunction: out.debug << "Function Definition: " << node->getName(); break;
+ case EOpFunctionCall: out.debug << "Function Call: " << node->getName(); break;
+ case EOpParameters: out.debug << "Function Parameters: "; break;
+
+ case EOpConstructFloat: out.debug << "Construct float"; break;
+ case EOpConstructDouble:out.debug << "Construct double"; break;
+
+ case EOpConstructVec2: out.debug << "Construct vec2"; break;
+ case EOpConstructVec3: out.debug << "Construct vec3"; break;
+ case EOpConstructVec4: out.debug << "Construct vec4"; break;
+ case EOpConstructDVec2: out.debug << "Construct dvec2"; break;
+ case EOpConstructDVec3: out.debug << "Construct dvec3"; break;
+ case EOpConstructDVec4: out.debug << "Construct dvec4"; break;
+ case EOpConstructBool: out.debug << "Construct bool"; break;
+ case EOpConstructBVec2: out.debug << "Construct bvec2"; break;
+ case EOpConstructBVec3: out.debug << "Construct bvec3"; break;
+ case EOpConstructBVec4: out.debug << "Construct bvec4"; break;
+ case EOpConstructInt8: out.debug << "Construct int8_t"; break;
+ case EOpConstructI8Vec2: out.debug << "Construct i8vec2"; break;
+ case EOpConstructI8Vec3: out.debug << "Construct i8vec3"; break;
+ case EOpConstructI8Vec4: out.debug << "Construct i8vec4"; break;
+ case EOpConstructInt: out.debug << "Construct int"; break;
+ case EOpConstructIVec2: out.debug << "Construct ivec2"; break;
+ case EOpConstructIVec3: out.debug << "Construct ivec3"; break;
+ case EOpConstructIVec4: out.debug << "Construct ivec4"; break;
+ case EOpConstructUint8: out.debug << "Construct uint8_t"; break;
+ case EOpConstructU8Vec2: out.debug << "Construct u8vec2"; break;
+ case EOpConstructU8Vec3: out.debug << "Construct u8vec3"; break;
+ case EOpConstructU8Vec4: out.debug << "Construct u8vec4"; break;
+ case EOpConstructUint: out.debug << "Construct uint"; break;
+ case EOpConstructUVec2: out.debug << "Construct uvec2"; break;
+ case EOpConstructUVec3: out.debug << "Construct uvec3"; break;
+ case EOpConstructUVec4: out.debug << "Construct uvec4"; break;
+ case EOpConstructInt64: out.debug << "Construct int64"; break;
+ case EOpConstructI64Vec2: out.debug << "Construct i64vec2"; break;
+ case EOpConstructI64Vec3: out.debug << "Construct i64vec3"; break;
+ case EOpConstructI64Vec4: out.debug << "Construct i64vec4"; break;
+ case EOpConstructUint64: out.debug << "Construct uint64"; break;
+ case EOpConstructU64Vec2: out.debug << "Construct u64vec2"; break;
+ case EOpConstructU64Vec3: out.debug << "Construct u64vec3"; break;
+ case EOpConstructU64Vec4: out.debug << "Construct u64vec4"; break;
+ case EOpConstructInt16: out.debug << "Construct int16_t"; break;
+ case EOpConstructI16Vec2: out.debug << "Construct i16vec2"; break;
+ case EOpConstructI16Vec3: out.debug << "Construct i16vec3"; break;
+ case EOpConstructI16Vec4: out.debug << "Construct i16vec4"; break;
+ case EOpConstructUint16: out.debug << "Construct uint16_t"; break;
+ case EOpConstructU16Vec2: out.debug << "Construct u16vec2"; break;
+ case EOpConstructU16Vec3: out.debug << "Construct u16vec3"; break;
+ case EOpConstructU16Vec4: out.debug << "Construct u16vec4"; break;
+ case EOpConstructMat2x2: out.debug << "Construct mat2"; break;
+ case EOpConstructMat2x3: out.debug << "Construct mat2x3"; break;
+ case EOpConstructMat2x4: out.debug << "Construct mat2x4"; break;
+ case EOpConstructMat3x2: out.debug << "Construct mat3x2"; break;
+ case EOpConstructMat3x3: out.debug << "Construct mat3"; break;
+ case EOpConstructMat3x4: out.debug << "Construct mat3x4"; break;
+ case EOpConstructMat4x2: out.debug << "Construct mat4x2"; break;
+ case EOpConstructMat4x3: out.debug << "Construct mat4x3"; break;
+ case EOpConstructMat4x4: out.debug << "Construct mat4"; break;
+ case EOpConstructDMat2x2: out.debug << "Construct dmat2"; break;
+ case EOpConstructDMat2x3: out.debug << "Construct dmat2x3"; break;
+ case EOpConstructDMat2x4: out.debug << "Construct dmat2x4"; break;
+ case EOpConstructDMat3x2: out.debug << "Construct dmat3x2"; break;
+ case EOpConstructDMat3x3: out.debug << "Construct dmat3"; break;
+ case EOpConstructDMat3x4: out.debug << "Construct dmat3x4"; break;
+ case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break;
+ case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break;
+ case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break;
+ case EOpConstructIMat2x2: out.debug << "Construct imat2"; break;
+ case EOpConstructIMat2x3: out.debug << "Construct imat2x3"; break;
+ case EOpConstructIMat2x4: out.debug << "Construct imat2x4"; break;
+ case EOpConstructIMat3x2: out.debug << "Construct imat3x2"; break;
+ case EOpConstructIMat3x3: out.debug << "Construct imat3"; break;
+ case EOpConstructIMat3x4: out.debug << "Construct imat3x4"; break;
+ case EOpConstructIMat4x2: out.debug << "Construct imat4x2"; break;
+ case EOpConstructIMat4x3: out.debug << "Construct imat4x3"; break;
+ case EOpConstructIMat4x4: out.debug << "Construct imat4"; break;
+ case EOpConstructUMat2x2: out.debug << "Construct umat2"; break;
+ case EOpConstructUMat2x3: out.debug << "Construct umat2x3"; break;
+ case EOpConstructUMat2x4: out.debug << "Construct umat2x4"; break;
+ case EOpConstructUMat3x2: out.debug << "Construct umat3x2"; break;
+ case EOpConstructUMat3x3: out.debug << "Construct umat3"; break;
+ case EOpConstructUMat3x4: out.debug << "Construct umat3x4"; break;
+ case EOpConstructUMat4x2: out.debug << "Construct umat4x2"; break;
+ case EOpConstructUMat4x3: out.debug << "Construct umat4x3"; break;
+ case EOpConstructUMat4x4: out.debug << "Construct umat4"; break;
+ case EOpConstructBMat2x2: out.debug << "Construct bmat2"; break;
+ case EOpConstructBMat2x3: out.debug << "Construct bmat2x3"; break;
+ case EOpConstructBMat2x4: out.debug << "Construct bmat2x4"; break;
+ case EOpConstructBMat3x2: out.debug << "Construct bmat3x2"; break;
+ case EOpConstructBMat3x3: out.debug << "Construct bmat3"; break;
+ case EOpConstructBMat3x4: out.debug << "Construct bmat3x4"; break;
+ case EOpConstructBMat4x2: out.debug << "Construct bmat4x2"; break;
+ case EOpConstructBMat4x3: out.debug << "Construct bmat4x3"; break;
+ case EOpConstructBMat4x4: out.debug << "Construct bmat4"; break;
+ case EOpConstructFloat16: out.debug << "Construct float16_t"; break;
+ case EOpConstructF16Vec2: out.debug << "Construct f16vec2"; break;
+ case EOpConstructF16Vec3: out.debug << "Construct f16vec3"; break;
+ case EOpConstructF16Vec4: out.debug << "Construct f16vec4"; break;
+ case EOpConstructF16Mat2x2: out.debug << "Construct f16mat2"; break;
+ case EOpConstructF16Mat2x3: out.debug << "Construct f16mat2x3"; break;
+ case EOpConstructF16Mat2x4: out.debug << "Construct f16mat2x4"; break;
+ case EOpConstructF16Mat3x2: out.debug << "Construct f16mat3x2"; break;
+ case EOpConstructF16Mat3x3: out.debug << "Construct f16mat3"; break;
+ case EOpConstructF16Mat3x4: out.debug << "Construct f16mat3x4"; break;
+ case EOpConstructF16Mat4x2: out.debug << "Construct f16mat4x2"; break;
+ case EOpConstructF16Mat4x3: out.debug << "Construct f16mat4x3"; break;
+ case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break;
+ case EOpConstructStruct: out.debug << "Construct structure"; break;
+ case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
+ case EOpConstructReference: out.debug << "Construct reference"; break;
+ case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break;
+
+ case EOpLessThan: out.debug << "Compare Less Than"; break;
+ case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
+ case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break;
+ case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break;
+ case EOpVectorEqual: out.debug << "Equal"; break;
+ case EOpVectorNotEqual: out.debug << "NotEqual"; break;
+
+ case EOpMod: out.debug << "mod"; break;
+ case EOpModf: out.debug << "modf"; break;
+ case EOpPow: out.debug << "pow"; break;
+
+ case EOpAtan: out.debug << "arc tangent"; break;
+
+ case EOpMin: out.debug << "min"; break;
+ case EOpMax: out.debug << "max"; break;
+ case EOpClamp: out.debug << "clamp"; break;
+ case EOpMix: out.debug << "mix"; break;
+ case EOpStep: out.debug << "step"; break;
+ case EOpSmoothStep: out.debug << "smoothstep"; break;
+
+ case EOpDistance: out.debug << "distance"; break;
+ case EOpDot: out.debug << "dot-product"; break;
+ case EOpCross: out.debug << "cross-product"; break;
+ case EOpFaceForward: out.debug << "face-forward"; break;
+ case EOpReflect: out.debug << "reflect"; break;
+ case EOpRefract: out.debug << "refract"; break;
+ case EOpMul: out.debug << "component-wise multiply"; break;
+ case EOpOuterProduct: out.debug << "outer product"; break;
+
+ case EOpEmitVertex: out.debug << "EmitVertex"; break;
+ case EOpEndPrimitive: out.debug << "EndPrimitive"; break;
+
+ case EOpBarrier: out.debug << "Barrier"; break;
+ case EOpMemoryBarrier: out.debug << "MemoryBarrier"; break;
+ case EOpMemoryBarrierAtomicCounter: out.debug << "MemoryBarrierAtomicCounter"; break;
+ case EOpMemoryBarrierBuffer: out.debug << "MemoryBarrierBuffer"; break;
+ case EOpMemoryBarrierImage: out.debug << "MemoryBarrierImage"; break;
+ case EOpMemoryBarrierShared: out.debug << "MemoryBarrierShared"; break;
+ case EOpGroupMemoryBarrier: out.debug << "GroupMemoryBarrier"; break;
+
+ case EOpReadInvocation: out.debug << "readInvocation"; break;
+
+#ifdef AMD_EXTENSIONS
+ case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break;
+ case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break;
+ case EOpWriteInvocation: out.debug << "writeInvocation"; break;
+
+ case EOpMin3: out.debug << "min3"; break;
+ case EOpMax3: out.debug << "max3"; break;
+ case EOpMid3: out.debug << "mid3"; break;
+
+ case EOpTime: out.debug << "time"; break;
+#endif
+
+ case EOpAtomicAdd: out.debug << "AtomicAdd"; break;
+ case EOpAtomicMin: out.debug << "AtomicMin"; break;
+ case EOpAtomicMax: out.debug << "AtomicMax"; break;
+ case EOpAtomicAnd: out.debug << "AtomicAnd"; break;
+ case EOpAtomicOr: out.debug << "AtomicOr"; break;
+ case EOpAtomicXor: out.debug << "AtomicXor"; break;
+ case EOpAtomicExchange: out.debug << "AtomicExchange"; break;
+ case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break;
+ case EOpAtomicLoad: out.debug << "AtomicLoad"; break;
+ case EOpAtomicStore: out.debug << "AtomicStore"; break;
+
+ case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break;
+ case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break;
+ case EOpAtomicCounterMin: out.debug << "AtomicCounterMin"; break;
+ case EOpAtomicCounterMax: out.debug << "AtomicCounterMax"; break;
+ case EOpAtomicCounterAnd: out.debug << "AtomicCounterAnd"; break;
+ case EOpAtomicCounterOr: out.debug << "AtomicCounterOr"; break;
+ case EOpAtomicCounterXor: out.debug << "AtomicCounterXor"; break;
+ case EOpAtomicCounterExchange: out.debug << "AtomicCounterExchange"; break;
+ case EOpAtomicCounterCompSwap: out.debug << "AtomicCounterCompSwap"; break;
+
+ case EOpImageQuerySize: out.debug << "imageQuerySize"; break;
+ case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break;
+ case EOpImageLoad: out.debug << "imageLoad"; break;
+ case EOpImageStore: out.debug << "imageStore"; break;
+ case EOpImageAtomicAdd: out.debug << "imageAtomicAdd"; break;
+ case EOpImageAtomicMin: out.debug << "imageAtomicMin"; break;
+ case EOpImageAtomicMax: out.debug << "imageAtomicMax"; break;
+ case EOpImageAtomicAnd: out.debug << "imageAtomicAnd"; break;
+ case EOpImageAtomicOr: out.debug << "imageAtomicOr"; break;
+ case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break;
+ case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break;
+ case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
+ case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
+ case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
+ case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
+#endif
+
+ case EOpTextureQuerySize: out.debug << "textureSize"; break;
+ case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
+ case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break;
+ case EOpTextureQuerySamples: out.debug << "textureSamples"; break;
+ case EOpTexture: out.debug << "texture"; break;
+ case EOpTextureProj: out.debug << "textureProj"; break;
+ case EOpTextureLod: out.debug << "textureLod"; break;
+ case EOpTextureOffset: out.debug << "textureOffset"; break;
+ case EOpTextureFetch: out.debug << "textureFetch"; break;
+ case EOpTextureFetchOffset: out.debug << "textureFetchOffset"; break;
+ case EOpTextureProjOffset: out.debug << "textureProjOffset"; break;
+ case EOpTextureLodOffset: out.debug << "textureLodOffset"; break;
+ case EOpTextureProjLod: out.debug << "textureProjLod"; break;
+ case EOpTextureProjLodOffset: out.debug << "textureProjLodOffset"; break;
+ case EOpTextureGrad: out.debug << "textureGrad"; break;
+ case EOpTextureGradOffset: out.debug << "textureGradOffset"; break;
+ case EOpTextureProjGrad: out.debug << "textureProjGrad"; break;
+ case EOpTextureProjGradOffset: out.debug << "textureProjGradOffset"; break;
+ case EOpTextureGather: out.debug << "textureGather"; break;
+ case EOpTextureGatherOffset: out.debug << "textureGatherOffset"; break;
+ case EOpTextureGatherOffsets: out.debug << "textureGatherOffsets"; break;
+ case EOpTextureClamp: out.debug << "textureClamp"; break;
+ case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break;
+ case EOpTextureGradClamp: out.debug << "textureGradClamp"; break;
+ case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpTextureGatherLod: out.debug << "textureGatherLod"; break;
+ case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break;
+ case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break;
+#endif
+
+ case EOpSparseTexture: out.debug << "sparseTexture"; break;
+ case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break;
+ case EOpSparseTextureLod: out.debug << "sparseTextureLod"; break;
+ case EOpSparseTextureLodOffset: out.debug << "sparseTextureLodOffset"; break;
+ case EOpSparseTextureFetch: out.debug << "sparseTexelFetch"; break;
+ case EOpSparseTextureFetchOffset: out.debug << "sparseTexelFetchOffset"; break;
+ case EOpSparseTextureGrad: out.debug << "sparseTextureGrad"; break;
+ case EOpSparseTextureGradOffset: out.debug << "sparseTextureGradOffset"; break;
+ case EOpSparseTextureGather: out.debug << "sparseTextureGather"; break;
+ case EOpSparseTextureGatherOffset: out.debug << "sparseTextureGatherOffset"; break;
+ case EOpSparseTextureGatherOffsets: out.debug << "sparseTextureGatherOffsets"; break;
+ case EOpSparseImageLoad: out.debug << "sparseImageLoad"; break;
+ case EOpSparseTextureClamp: out.debug << "sparseTextureClamp"; break;
+ case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break;
+ case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break;
+ case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break;
+ case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break;
+ case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break;
+ case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break;
+#endif
+#ifdef NV_EXTENSIONS
+ case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break;
+ case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break;
+ case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break;
+ case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break;
+ case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break;
+#endif
+ case EOpAddCarry: out.debug << "addCarry"; break;
+ case EOpSubBorrow: out.debug << "subBorrow"; break;
+ case EOpUMulExtended: out.debug << "uMulExtended"; break;
+ case EOpIMulExtended: out.debug << "iMulExtended"; break;
+ case EOpBitfieldExtract: out.debug << "bitfieldExtract"; break;
+ case EOpBitfieldInsert: out.debug << "bitfieldInsert"; break;
+
+ case EOpFma: out.debug << "fma"; break;
+ case EOpFrexp: out.debug << "frexp"; break;
+ case EOpLdexp: out.debug << "ldexp"; break;
+
+ case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break;
+ case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break;
+#ifdef AMD_EXTENSIONS
+ case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break;
+#endif
+
+ case EOpSinCos: out.debug << "sincos"; break;
+ case EOpGenMul: out.debug << "mul"; break;
+
+ case EOpAllMemoryBarrierWithGroupSync: out.debug << "AllMemoryBarrierWithGroupSync"; break;
+ case EOpDeviceMemoryBarrier: out.debug << "DeviceMemoryBarrier"; break;
+ case EOpDeviceMemoryBarrierWithGroupSync: out.debug << "DeviceMemoryBarrierWithGroupSync"; break;
+ case EOpWorkgroupMemoryBarrier: out.debug << "WorkgroupMemoryBarrier"; break;
+ case EOpWorkgroupMemoryBarrierWithGroupSync: out.debug << "WorkgroupMemoryBarrierWithGroupSync"; break;
+
+ case EOpSubgroupBarrier: out.debug << "subgroupBarrier"; break;
+ case EOpSubgroupMemoryBarrier: out.debug << "subgroupMemoryBarrier"; break;
+ case EOpSubgroupMemoryBarrierBuffer: out.debug << "subgroupMemoryBarrierBuffer"; break;
+ case EOpSubgroupMemoryBarrierImage: out.debug << "subgroupMemoryBarrierImage"; break;
+ case EOpSubgroupMemoryBarrierShared: out.debug << "subgroupMemoryBarrierShared"; break;
+ case EOpSubgroupElect: out.debug << "subgroupElect"; break;
+ case EOpSubgroupAll: out.debug << "subgroupAll"; break;
+ case EOpSubgroupAny: out.debug << "subgroupAny"; break;
+ case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break;
+ case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break;
+ case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break;
+ case EOpSubgroupBallot: out.debug << "subgroupBallot"; break;
+ case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break;
+ case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break;
+ case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break;
+ case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break;
+ case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break;
+ case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break;
+ case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break;
+ case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break;
+ case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break;
+ case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break;
+ case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break;
+ case EOpSubgroupAdd: out.debug << "subgroupAdd"; break;
+ case EOpSubgroupMul: out.debug << "subgroupMul"; break;
+ case EOpSubgroupMin: out.debug << "subgroupMin"; break;
+ case EOpSubgroupMax: out.debug << "subgroupMax"; break;
+ case EOpSubgroupAnd: out.debug << "subgroupAnd"; break;
+ case EOpSubgroupOr: out.debug << "subgroupOr"; break;
+ case EOpSubgroupXor: out.debug << "subgroupXor"; break;
+ case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break;
+ case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break;
+ case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break;
+ case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break;
+ case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break;
+ case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break;
+ case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break;
+ case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break;
+ case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break;
+ case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break;
+ case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break;
+ case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break;
+ case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break;
+ case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break;
+ case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break;
+ case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break;
+ case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break;
+ case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break;
+ case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break;
+ case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break;
+ case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break;
+ case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break;
+ case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break;
+ case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
+ case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
+
+ case EOpSubpassLoad: out.debug << "subpassLoad"; break;
+ case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
+
+#ifdef NV_EXTENSIONS
+ case EOpTraceNV: out.debug << "traceNV"; break;
+ case EOpReportIntersectionNV: out.debug << "reportIntersectionNV"; break;
+ case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
+ case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
+ case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
+ case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
+#endif
+
+ case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break;
+ case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break;
+ case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break;
+
+ default: out.debug.message(EPrefixError, "Bad aggregation op");
+ }
+
+ if (node->getOp() != EOpSequence && node->getOp() != EOpParameters)
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ out.debug << "\n";
+
+ return true;
+}
+
+bool TOutputTraverser::visitSelection(TVisit /* visit */, TIntermSelection* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ out.debug << "Test condition and select";
+ out.debug << " (" << node->getCompleteString() << ")";
+
+ if (node->getShortCircuit() == false)
+ out.debug << ": no shortcircuit";
+ if (node->getFlatten())
+ out.debug << ": Flatten";
+ if (node->getDontFlatten())
+ out.debug << ": DontFlatten";
+ out.debug << "\n";
+
+ ++depth;
+
+ OutputTreeText(out, node, depth);
+ out.debug << "Condition\n";
+ node->getCondition()->traverse(this);
+
+ OutputTreeText(out, node, depth);
+ if (node->getTrueBlock()) {
+ out.debug << "true case\n";
+ node->getTrueBlock()->traverse(this);
+ } else
+ out.debug << "true case is null\n";
+
+ if (node->getFalseBlock()) {
+ OutputTreeText(out, node, depth);
+ out.debug << "false case\n";
+ node->getFalseBlock()->traverse(this);
+ }
+
+ --depth;
+
+ return false;
+}
+
+// Print infinities and NaNs, and numbers in a portable way.
+// Goals:
+// - portable (across IEEE 754 platforms)
+// - shows all possible IEEE values
+// - shows simple numbers in a simple way, e.g., no leading/trailing 0s
+// - shows all digits, no premature rounding
+static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraOutput extra)
+{
+ if (IsInfinity(value)) {
+ if (value < 0)
+ out.debug << "-1.#INF";
+ else
+ out.debug << "+1.#INF";
+ } else if (IsNan(value))
+ out.debug << "1.#IND";
+ else {
+ const int maxSize = 340;
+ char buf[maxSize];
+ const char* format = "%f";
+ if (fabs(value) > 0.0 && (fabs(value) < 1e-5 || fabs(value) > 1e12))
+ format = "%-.13e";
+ int len = snprintf(buf, maxSize, format, value);
+ assert(len < maxSize);
+
+ // remove a leading zero in the 100s slot in exponent; it is not portable
+ // pattern: XX...XXXe+0XX or XX...XXXe-0XX
+ if (len > 5) {
+ if (buf[len-5] == 'e' && (buf[len-4] == '+' || buf[len-4] == '-') && buf[len-3] == '0') {
+ buf[len-3] = buf[len-2];
+ buf[len-2] = buf[len-1];
+ buf[len-1] = '\0';
+ }
+ }
+
+ out.debug << buf;
+
+ switch (extra) {
+ case TOutputTraverser::BinaryDoubleOutput:
+ {
+ uint64_t b;
+ static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)");
+ memcpy(&b, &value, sizeof(b));
+
+ out.debug << " : ";
+ for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) {
+ out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0");
+ b <<= 1;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const TConstUnionArray& constUnion,
+ TOutputTraverser::EExtraOutput extra, int depth)
+{
+ int size = node->getType().computeNumComponents();
+
+ for (int i = 0; i < size; i++) {
+ OutputTreeText(out, node, depth);
+ switch (constUnion[i].getType()) {
+ case EbtBool:
+ if (constUnion[i].getBConst())
+ out.debug << "true";
+ else
+ out.debug << "false";
+
+ out.debug << " (" << "const bool" << ")";
+
+ out.debug << "\n";
+ break;
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ OutputDouble(out, constUnion[i].getDConst(), extra);
+ out.debug << "\n";
+ break;
+ case EbtInt8:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI8Const(), "const int8_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint8:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU8Const(), "const uint8_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt16:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI16Const(), "const int16_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint16:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU16Const(), "const uint16_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%d (%s)", constUnion[i].getIConst(), "const int");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%u (%s)", constUnion[i].getUConst(), "const uint");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtInt64:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%lld (%s)", constUnion[i].getI64Const(), "const int64_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ case EbtUint64:
+ {
+ const int maxSize = 300;
+ char buf[maxSize];
+ snprintf(buf, maxSize, "%llu (%s)", constUnion[i].getU64Const(), "const uint64_t");
+
+ out.debug << buf << "\n";
+ }
+ break;
+ default:
+ out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc());
+ break;
+ }
+ }
+}
+
+void TOutputTraverser::visitConstantUnion(TIntermConstantUnion* node)
+{
+ OutputTreeText(infoSink, node, depth);
+ infoSink.debug << "Constant:\n";
+
+ OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1);
+}
+
+void TOutputTraverser::visitSymbol(TIntermSymbol* node)
+{
+ OutputTreeText(infoSink, node, depth);
+
+ infoSink.debug << "'" << node->getName() << "' (" << node->getCompleteString() << ")\n";
+
+ if (! node->getConstArray().empty())
+ OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1);
+ else if (node->getConstSubtree()) {
+ incrementDepth(node);
+ node->getConstSubtree()->traverse(this);
+ decrementDepth();
+ }
+}
+
+bool TOutputTraverser::visitLoop(TVisit /* visit */, TIntermLoop* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ out.debug << "Loop with condition ";
+ if (! node->testFirst())
+ out.debug << "not ";
+ out.debug << "tested first";
+
+ if (node->getUnroll())
+ out.debug << ": Unroll";
+ if (node->getDontUnroll())
+ out.debug << ": DontUnroll";
+ if (node->getLoopDependency()) {
+ out.debug << ": Dependency ";
+ out.debug << node->getLoopDependency();
+ }
+ out.debug << "\n";
+
+ ++depth;
+
+ OutputTreeText(infoSink, node, depth);
+ if (node->getTest()) {
+ out.debug << "Loop Condition\n";
+ node->getTest()->traverse(this);
+ } else
+ out.debug << "No loop condition\n";
+
+ OutputTreeText(infoSink, node, depth);
+ if (node->getBody()) {
+ out.debug << "Loop Body\n";
+ node->getBody()->traverse(this);
+ } else
+ out.debug << "No loop body\n";
+
+ if (node->getTerminal()) {
+ OutputTreeText(infoSink, node, depth);
+ out.debug << "Loop Terminal Expression\n";
+ node->getTerminal()->traverse(this);
+ }
+
+ --depth;
+
+ return false;
+}
+
+bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+
+ switch (node->getFlowOp()) {
+ case EOpKill: out.debug << "Branch: Kill"; break;
+ case EOpBreak: out.debug << "Branch: Break"; break;
+ case EOpContinue: out.debug << "Branch: Continue"; break;
+ case EOpReturn: out.debug << "Branch: Return"; break;
+ case EOpCase: out.debug << "case: "; break;
+ case EOpDefault: out.debug << "default: "; break;
+ default: out.debug << "Branch: Unknown Branch"; break;
+ }
+
+ if (node->getExpression()) {
+ out.debug << " with expression\n";
+ ++depth;
+ node->getExpression()->traverse(this);
+ --depth;
+ } else
+ out.debug << "\n";
+
+ return false;
+}
+
+bool TOutputTraverser::visitSwitch(TVisit /* visit */, TIntermSwitch* node)
+{
+ TInfoSink& out = infoSink;
+
+ OutputTreeText(out, node, depth);
+ out.debug << "switch";
+
+ if (node->getFlatten())
+ out.debug << ": Flatten";
+ if (node->getDontFlatten())
+ out.debug << ": DontFlatten";
+ out.debug << "\n";
+
+ OutputTreeText(out, node, depth);
+ out.debug << "condition\n";
+ ++depth;
+ node->getCondition()->traverse(this);
+
+ --depth;
+ OutputTreeText(out, node, depth);
+ out.debug << "body\n";
+ ++depth;
+ node->getBody()->traverse(this);
+
+ --depth;
+
+ return false;
+}
+
+//
+// This function is the one to call externally to start the traversal.
+// Individual functions can be initialized to 0 to skip processing of that
+// type of node. It's children will still be processed.
+//
+void TIntermediate::output(TInfoSink& infoSink, bool tree)
+{
+ infoSink.debug << "Shader version: " << version << "\n";
+ if (requestedExtensions.size() > 0) {
+ for (auto extIt = requestedExtensions.begin(); extIt != requestedExtensions.end(); ++extIt)
+ infoSink.debug << "Requested " << *extIt << "\n";
+ }
+
+ if (xfbMode)
+ infoSink.debug << "in xfb mode\n";
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+
+ case EShLangTessControl:
+ infoSink.debug << "vertices = " << vertices << "\n";
+
+ if (inputPrimitive != ElgNone)
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ if (vertexSpacing != EvsNone)
+ infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n";
+ if (vertexOrder != EvoNone)
+ infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n";
+ break;
+
+ case EShLangTessEvaluation:
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n";
+ infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n";
+ if (pointMode)
+ infoSink.debug << "using point mode\n";
+ break;
+
+ case EShLangGeometry:
+ infoSink.debug << "invocations = " << invocations << "\n";
+ infoSink.debug << "max_vertices = " << vertices << "\n";
+ infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n";
+ infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
+ break;
+
+ case EShLangFragment:
+ if (pixelCenterInteger)
+ infoSink.debug << "gl_FragCoord pixel center is integer\n";
+ if (originUpperLeft)
+ infoSink.debug << "gl_FragCoord origin is upper left\n";
+ if (earlyFragmentTests)
+ infoSink.debug << "using early_fragment_tests\n";
+ if (postDepthCoverage)
+ infoSink.debug << "using post_depth_coverage\n";
+ if (depthLayout != EldNone)
+ infoSink.debug << "using " << TQualifier::getLayoutDepthString(depthLayout) << "\n";
+ if (blendEquations != 0) {
+ infoSink.debug << "using";
+ // blendEquations is a mask, decode it
+ for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) {
+ if (blendEquations & (1 << be))
+ infoSink.debug << " " << TQualifier::getBlendEquationString(be);
+ }
+ infoSink.debug << "\n";
+ }
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangMeshNV:
+ infoSink.debug << "max_vertices = " << vertices << "\n";
+ infoSink.debug << "max_primitives = " << primitives << "\n";
+ infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
+ // Fall through
+
+ case EShLangTaskNV:
+ // Fall through
+#endif
+ case EShLangCompute:
+ infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n";
+ {
+ if (localSizeSpecId[0] != TQualifier::layoutNotSet ||
+ localSizeSpecId[1] != TQualifier::layoutNotSet ||
+ localSizeSpecId[2] != TQualifier::layoutNotSet) {
+ infoSink.debug << "local_size ids = (" <<
+ localSizeSpecId[0] << ", " <<
+ localSizeSpecId[1] << ", " <<
+ localSizeSpecId[2] << ")\n";
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (treeRoot == 0 || ! tree)
+ return;
+
+ TOutputTraverser it(infoSink);
+ if (getBinaryDoubleOutput())
+ it.setDoubleOutput(TOutputTraverser::BinaryDoubleOutput);
+ treeRoot->traverse(&it);
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.cpp
new file mode 100644
index 0000000..46c7558
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.cpp
@@ -0,0 +1,818 @@
+//
+// Copyright (C) 2016-2017 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "../Include/InfoSink.h"
+#include "iomapper.h"
+#include "LiveTraverser.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+//
+// Map IO bindings.
+//
+// High-level algorithm for one stage:
+//
+// 1. Traverse all code (live+dead) to find the explicitly provided bindings.
+//
+// 2. Traverse (just) the live code to determine which non-provided bindings
+// require auto-numbering. We do not auto-number dead ones.
+//
+// 3. Traverse all the code to apply the bindings:
+// a. explicitly given bindings are offset according to their type
+// b. implicit live bindings are auto-numbered into the holes, using
+// any open binding slot.
+// c. implicit dead bindings are left un-bound.
+//
+
+
+namespace glslang {
+
+struct TVarEntryInfo
+{
+ int id;
+ TIntermSymbol* symbol;
+ bool live;
+ int newBinding;
+ int newSet;
+ int newLocation;
+ int newComponent;
+ int newIndex;
+
+ struct TOrderById
+ {
+ inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
+ {
+ return l.id < r.id;
+ }
+ };
+
+ struct TOrderByPriority
+ {
+ // ordering:
+ // 1) has both binding and set
+ // 2) has binding but no set
+ // 3) has no binding but set
+ // 4) has no binding and no set
+ inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
+ {
+ const TQualifier& lq = l.symbol->getQualifier();
+ const TQualifier& rq = r.symbol->getQualifier();
+
+ // simple rules:
+ // has binding gives 2 points
+ // has set gives 1 point
+ // who has the most points is more important.
+ int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
+ int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
+
+ if (lPoints == rPoints)
+ return l.id < r.id;
+ return lPoints > rPoints;
+ }
+ };
+};
+
+
+
+typedef std::vector<TVarEntryInfo> TVarLiveMap;
+
+class TVarGatherTraverser : public TLiveTraverser
+{
+public:
+ TVarGatherTraverser(const TIntermediate& i, bool traverseDeadCode, TVarLiveMap& inList, TVarLiveMap& outList, TVarLiveMap& uniformList)
+ : TLiveTraverser(i, traverseDeadCode, true, true, false)
+ , inputList(inList)
+ , outputList(outList)
+ , uniformList(uniformList)
+ {
+ }
+
+
+ virtual void visitSymbol(TIntermSymbol* base)
+ {
+ TVarLiveMap* target = nullptr;
+ if (base->getQualifier().storage == EvqVaryingIn)
+ target = &inputList;
+ else if (base->getQualifier().storage == EvqVaryingOut)
+ target = &outputList;
+ else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().layoutPushConstant)
+ target = &uniformList;
+
+ if (target) {
+ TVarEntryInfo ent = { base->getId(), base, !traverseAll };
+ TVarLiveMap::iterator at = std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById());
+ if (at != target->end() && at->id == ent.id)
+ at->live = at->live || !traverseAll; // update live state
+ else
+ target->insert(at, ent);
+ }
+ }
+
+private:
+ TVarLiveMap& inputList;
+ TVarLiveMap& outputList;
+ TVarLiveMap& uniformList;
+};
+
+class TVarSetTraverser : public TLiveTraverser
+{
+public:
+ TVarSetTraverser(const TIntermediate& i, const TVarLiveMap& inList, const TVarLiveMap& outList, const TVarLiveMap& uniformList)
+ : TLiveTraverser(i, true, true, true, false)
+ , inputList(inList)
+ , outputList(outList)
+ , uniformList(uniformList)
+ {
+ }
+
+
+ virtual void visitSymbol(TIntermSymbol* base)
+ {
+ const TVarLiveMap* source;
+ if (base->getQualifier().storage == EvqVaryingIn)
+ source = &inputList;
+ else if (base->getQualifier().storage == EvqVaryingOut)
+ source = &outputList;
+ else if (base->getQualifier().isUniformOrBuffer())
+ source = &uniformList;
+ else
+ return;
+
+ TVarEntryInfo ent = { base->getId() };
+ TVarLiveMap::const_iterator at = std::lower_bound(source->begin(), source->end(), ent, TVarEntryInfo::TOrderById());
+ if (at == source->end())
+ return;
+
+ if (at->id != ent.id)
+ return;
+
+ if (at->newBinding != -1)
+ base->getWritableType().getQualifier().layoutBinding = at->newBinding;
+ if (at->newSet != -1)
+ base->getWritableType().getQualifier().layoutSet = at->newSet;
+ if (at->newLocation != -1)
+ base->getWritableType().getQualifier().layoutLocation = at->newLocation;
+ if (at->newComponent != -1)
+ base->getWritableType().getQualifier().layoutComponent = at->newComponent;
+ if (at->newIndex != -1)
+ base->getWritableType().getQualifier().layoutIndex = at->newIndex;
+ }
+
+ private:
+ const TVarLiveMap& inputList;
+ const TVarLiveMap& outputList;
+ const TVarLiveMap& uniformList;
+};
+
+struct TNotifyUniformAdaptor
+{
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ inline TNotifyUniformAdaptor(EShLanguage s, TIoMapResolver& r)
+ : stage(s)
+ , resolver(r)
+ {
+ }
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ resolver.notifyBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ }
+private:
+ TNotifyUniformAdaptor& operator=(TNotifyUniformAdaptor&);
+};
+
+struct TNotifyInOutAdaptor
+{
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ inline TNotifyInOutAdaptor(EShLanguage s, TIoMapResolver& r)
+ : stage(s)
+ , resolver(r)
+ {
+ }
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ resolver.notifyInOut(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ }
+private:
+ TNotifyInOutAdaptor& operator=(TNotifyInOutAdaptor&);
+};
+
+struct TResolverUniformAdaptor
+{
+ TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
+ : stage(s)
+ , resolver(r)
+ , infoSink(i)
+ , error(e)
+ , intermediate(interm)
+ {
+ }
+
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ ent.newLocation = -1;
+ ent.newComponent = -1;
+ ent.newBinding = -1;
+ ent.newSet = -1;
+ ent.newIndex = -1;
+ const bool isValid = resolver.validateBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
+ ent.live);
+ if (isValid) {
+ ent.newBinding = resolver.resolveBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
+ ent.live);
+ ent.newSet = resolver.resolveSet(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
+ ent.newLocation = resolver.resolveUniformLocation(stage, ent.symbol->getName().c_str(),
+ ent.symbol->getType(), ent.live);
+
+ if (ent.newBinding != -1) {
+ if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) {
+ TString err = "mapped binding out of range: " + ent.symbol->getName();
+
+ infoSink.info.message(EPrefixInternalError, err.c_str());
+ error = true;
+ }
+ }
+ if (ent.newSet != -1) {
+ if (ent.newSet >= int(TQualifier::layoutSetEnd)) {
+ TString err = "mapped set out of range: " + ent.symbol->getName();
+
+ infoSink.info.message(EPrefixInternalError, err.c_str());
+ error = true;
+ }
+ }
+ } else {
+ TString errorMsg = "Invalid binding: " + ent.symbol->getName();
+ infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
+ error = true;
+ }
+ }
+
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ TInfoSink& infoSink;
+ bool& error;
+ TIntermediate& intermediate;
+
+private:
+ TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&);
+};
+
+struct TResolverInOutAdaptor
+{
+ TResolverInOutAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
+ : stage(s)
+ , resolver(r)
+ , infoSink(i)
+ , error(e)
+ , intermediate(interm)
+ {
+ }
+
+ inline void operator()(TVarEntryInfo& ent)
+ {
+ ent.newLocation = -1;
+ ent.newComponent = -1;
+ ent.newBinding = -1;
+ ent.newSet = -1;
+ ent.newIndex = -1;
+ const bool isValid = resolver.validateInOut(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ if (isValid) {
+ ent.newLocation = resolver.resolveInOutLocation(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ ent.newComponent = resolver.resolveInOutComponent(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ ent.newIndex = resolver.resolveInOutIndex(stage,
+ ent.symbol->getName().c_str(),
+ ent.symbol->getType(),
+ ent.live);
+ } else {
+ TString errorMsg;
+ if (ent.symbol->getType().getQualifier().semanticName != nullptr) {
+ errorMsg = "Invalid shader In/Out variable semantic: ";
+ errorMsg += ent.symbol->getType().getQualifier().semanticName;
+ } else {
+ errorMsg = "Invalid shader In/Out variable: ";
+ errorMsg += ent.symbol->getName();
+ }
+ infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
+ error = true;
+ }
+ }
+
+ EShLanguage stage;
+ TIoMapResolver& resolver;
+ TInfoSink& infoSink;
+ bool& error;
+ TIntermediate& intermediate;
+
+private:
+ TResolverInOutAdaptor& operator=(TResolverInOutAdaptor&);
+};
+
+// Base class for shared TIoMapResolver services, used by several derivations.
+struct TDefaultIoResolverBase : public glslang::TIoMapResolver
+{
+ TDefaultIoResolverBase(const TIntermediate &intermediate) :
+ intermediate(intermediate),
+ nextUniformLocation(intermediate.getUniformLocationBase()),
+ nextInputLocation(0),
+ nextOutputLocation(0)
+ { }
+
+ int getBaseBinding(TResourceType res, unsigned int set) const {
+ return selectBaseBinding(intermediate.getShiftBinding(res),
+ intermediate.getShiftBindingForSet(res, set));
+ }
+
+ const std::vector<std::string>& getResourceSetBinding() const { return intermediate.getResourceSetBinding(); }
+
+ bool doAutoBindingMapping() const { return intermediate.getAutoMapBindings(); }
+ bool doAutoLocationMapping() const { return intermediate.getAutoMapLocations(); }
+
+ typedef std::vector<int> TSlotSet;
+ typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
+ TSlotSetMap slots;
+
+ TSlotSet::iterator findSlot(int set, int slot)
+ {
+ return std::lower_bound(slots[set].begin(), slots[set].end(), slot);
+ }
+
+ bool checkEmpty(int set, int slot)
+ {
+ TSlotSet::iterator at = findSlot(set, slot);
+ return !(at != slots[set].end() && *at == slot);
+ }
+
+ int reserveSlot(int set, int slot, int size = 1)
+ {
+ TSlotSet::iterator at = findSlot(set, slot);
+
+ // tolerate aliasing, by not double-recording aliases
+ // (policy about appropriateness of the alias is higher up)
+ for (int i = 0; i < size; i++) {
+ if (at == slots[set].end() || *at != slot + i)
+ at = slots[set].insert(at, slot + i);
+ ++at;
+ }
+
+ return slot;
+ }
+
+ int getFreeSlot(int set, int base, int size = 1)
+ {
+ TSlotSet::iterator at = findSlot(set, base);
+ if (at == slots[set].end())
+ return reserveSlot(set, base, size);
+
+ // look for a big enough gap
+ for (; at != slots[set].end(); ++at) {
+ if (*at - base >= size)
+ break;
+ base = *at + 1;
+ }
+ return reserveSlot(set, base, size);
+ }
+
+ virtual bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override = 0;
+
+ virtual int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override = 0;
+
+ int resolveSet(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override
+ {
+ if (type.getQualifier().hasSet())
+ return type.getQualifier().layoutSet;
+
+ // If a command line or API option requested a single descriptor set, use that (if not overrided by spaceN)
+ if (getResourceSetBinding().size() == 1)
+ return atoi(getResourceSetBinding()[0].c_str());
+
+ return 0;
+ }
+ int resolveUniformLocation(EShLanguage /*stage*/, const char* name, const glslang::TType& type, bool /*is_live*/) override
+ {
+ // kick out of not doing this
+ if (!doAutoLocationMapping())
+ return -1;
+
+ // no locations added if already present, a built-in variable, a block, or an opaque
+ if (type.getQualifier().hasLocation() || type.isBuiltIn() ||
+ type.getBasicType() == EbtBlock ||
+ type.getBasicType() == EbtAtomicUint ||
+ (type.containsOpaque() && intermediate.getSpv().openGl == 0))
+ return -1;
+
+ // no locations on blocks of built-in variables
+ if (type.isStruct()) {
+ if (type.getStruct()->size() < 1)
+ return -1;
+ if ((*type.getStruct())[0].type->isBuiltIn())
+ return -1;
+ }
+
+ int location = intermediate.getUniformLocationOverride(name);
+ if (location != -1)
+ return location;
+
+ location = nextUniformLocation;
+
+ nextUniformLocation += TIntermediate::computeTypeUniformLocationSize(type);
+
+ return location;
+ }
+ bool validateInOut(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+ int resolveInOutLocation(EShLanguage stage, const char* /*name*/, const TType& type, bool /*is_live*/) override
+ {
+ // kick out of not doing this
+ if (!doAutoLocationMapping())
+ return -1;
+
+ // no locations added if already present, or a built-in variable
+ if (type.getQualifier().hasLocation() || type.isBuiltIn())
+ return -1;
+
+ // no locations on blocks of built-in variables
+ if (type.isStruct()) {
+ if (type.getStruct()->size() < 1)
+ return -1;
+ if ((*type.getStruct())[0].type->isBuiltIn())
+ return -1;
+ }
+
+ // point to the right input or output location counter
+ int& nextLocation = type.getQualifier().isPipeInput() ? nextInputLocation : nextOutputLocation;
+
+ // Placeholder. This does not do proper cross-stage lining up, nor
+ // work with mixed location/no-location declarations.
+ int location = nextLocation;
+ int typeLocationSize;
+ // Don’t take into account the outer-most array if the stage’s
+ // interface is automatically an array.
+ if (type.getQualifier().isArrayedIo(stage)) {
+ TType elementType(type, 0);
+ typeLocationSize = TIntermediate::computeTypeLocationSize(elementType, stage);
+ } else {
+ typeLocationSize = TIntermediate::computeTypeLocationSize(type, stage);
+ }
+ nextLocation += typeLocationSize;
+
+ return location;
+ }
+ int resolveInOutComponent(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return -1;
+ }
+ int resolveInOutIndex(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
+ {
+ return -1;
+ }
+
+ void notifyBinding(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
+ void notifyInOut(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
+ void endNotifications(EShLanguage) override {}
+ void beginNotifications(EShLanguage) override {}
+ void beginResolve(EShLanguage) override {}
+ void endResolve(EShLanguage) override {}
+
+protected:
+ TDefaultIoResolverBase(TDefaultIoResolverBase&);
+ TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
+
+ const TIntermediate &intermediate;
+ int nextUniformLocation;
+ int nextInputLocation;
+ int nextOutputLocation;
+
+ // Return descriptor set specific base if there is one, and the generic base otherwise.
+ int selectBaseBinding(int base, int descriptorSetBase) const {
+ return descriptorSetBase != -1 ? descriptorSetBase : base;
+ }
+
+ static int getLayoutSet(const glslang::TType& type) {
+ if (type.getQualifier().hasSet())
+ return type.getQualifier().layoutSet;
+ else
+ return 0;
+ }
+
+ static bool isSamplerType(const glslang::TType& type) {
+ return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
+ }
+
+ static bool isTextureType(const glslang::TType& type) {
+ return (type.getBasicType() == glslang::EbtSampler &&
+ (type.getSampler().isTexture() || type.getSampler().isSubpass()));
+ }
+
+ static bool isUboType(const glslang::TType& type) {
+ return type.getQualifier().storage == EvqUniform;
+ }
+};
+
+/*
+ * Basic implementation of glslang::TIoMapResolver that replaces the
+ * previous offset behavior.
+ * It does the same, uses the offsets for the corresponding uniform
+ * types. Also respects the EOptionAutoMapBindings flag and binds
+ * them if needed.
+ */
+/*
+ * Default resolver
+ */
+struct TDefaultIoResolver : public TDefaultIoResolverBase
+{
+ TDefaultIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
+
+ bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+
+ int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
+ {
+ const int set = getLayoutSet(type);
+ // On OpenGL arrays of opaque types take a seperate binding for each element
+ int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
+
+ if (type.getQualifier().hasBinding()) {
+ if (isImageType(type))
+ return reserveSlot(set, getBaseBinding(EResImage, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isTextureType(type))
+ return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isSsboType(type))
+ return reserveSlot(set, getBaseBinding(EResSsbo, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isSamplerType(type))
+ return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding, numBindings);
+
+ if (isUboType(type))
+ return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding, numBindings);
+ } else if (is_live && doAutoBindingMapping()) {
+ // find free slot, the caller did make sure it passes all vars with binding
+ // first and now all are passed that do not have a binding and needs one
+
+ if (isImageType(type))
+ return getFreeSlot(set, getBaseBinding(EResImage, set), numBindings);
+
+ if (isTextureType(type))
+ return getFreeSlot(set, getBaseBinding(EResTexture, set), numBindings);
+
+ if (isSsboType(type))
+ return getFreeSlot(set, getBaseBinding(EResSsbo, set), numBindings);
+
+ if (isSamplerType(type))
+ return getFreeSlot(set, getBaseBinding(EResSampler, set), numBindings);
+
+ if (isUboType(type))
+ return getFreeSlot(set, getBaseBinding(EResUbo, set), numBindings);
+ }
+
+ return -1;
+ }
+
+protected:
+ static bool isImageType(const glslang::TType& type) {
+ return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
+ }
+
+ static bool isSsboType(const glslang::TType& type) {
+ return type.getQualifier().storage == EvqBuffer;
+ }
+};
+
+/********************************************************************************
+The following IO resolver maps types in HLSL register space, as follows:
+
+t - for shader resource views (SRV)
+ TEXTURE1D
+ TEXTURE1DARRAY
+ TEXTURE2D
+ TEXTURE2DARRAY
+ TEXTURE3D
+ TEXTURECUBE
+ TEXTURECUBEARRAY
+ TEXTURE2DMS
+ TEXTURE2DMSARRAY
+ STRUCTUREDBUFFER
+ BYTEADDRESSBUFFER
+ BUFFER
+ TBUFFER
+
+s - for samplers
+ SAMPLER
+ SAMPLER1D
+ SAMPLER2D
+ SAMPLER3D
+ SAMPLERCUBE
+ SAMPLERSTATE
+ SAMPLERCOMPARISONSTATE
+
+u - for unordered access views (UAV)
+ RWBYTEADDRESSBUFFER
+ RWSTRUCTUREDBUFFER
+ APPENDSTRUCTUREDBUFFER
+ CONSUMESTRUCTUREDBUFFER
+ RWBUFFER
+ RWTEXTURE1D
+ RWTEXTURE1DARRAY
+ RWTEXTURE2D
+ RWTEXTURE2DARRAY
+ RWTEXTURE3D
+
+b - for constant buffer views (CBV)
+ CBUFFER
+ CONSTANTBUFFER
+ ********************************************************************************/
+struct TDefaultHlslIoResolver : public TDefaultIoResolverBase
+{
+ TDefaultHlslIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
+
+ bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
+ {
+ return true;
+ }
+
+ int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
+ {
+ const int set = getLayoutSet(type);
+
+ if (type.getQualifier().hasBinding()) {
+ if (isUavType(type))
+ return reserveSlot(set, getBaseBinding(EResUav, set) + type.getQualifier().layoutBinding);
+
+ if (isSrvType(type))
+ return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding);
+
+ if (isSamplerType(type))
+ return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding);
+
+ if (isUboType(type))
+ return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding);
+ } else if (is_live && doAutoBindingMapping()) {
+ // find free slot, the caller did make sure it passes all vars with binding
+ // first and now all are passed that do not have a binding and needs one
+
+ if (isUavType(type))
+ return getFreeSlot(set, getBaseBinding(EResUav, set));
+
+ if (isSrvType(type))
+ return getFreeSlot(set, getBaseBinding(EResTexture, set));
+
+ if (isSamplerType(type))
+ return getFreeSlot(set, getBaseBinding(EResSampler, set));
+
+ if (isUboType(type))
+ return getFreeSlot(set, getBaseBinding(EResUbo, set));
+ }
+
+ return -1;
+ }
+
+protected:
+ // Return true if this is a SRV (shader resource view) type:
+ static bool isSrvType(const glslang::TType& type) {
+ return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
+ }
+
+ // Return true if this is a UAV (unordered access view) type:
+ static bool isUavType(const glslang::TType& type) {
+ if (type.getQualifier().readonly)
+ return false;
+
+ return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
+ (type.getQualifier().storage == EvqBuffer);
+ }
+};
+
+
+// Map I/O variables to provided offsets, and make bindings for
+// unbound but live variables.
+//
+// Returns false if the input is too malformed to do this.
+bool TIoMapper::addStage(EShLanguage stage, TIntermediate &intermediate, TInfoSink &infoSink, TIoMapResolver *resolver)
+{
+ bool somethingToDo = !intermediate.getResourceSetBinding().empty() ||
+ intermediate.getAutoMapBindings() ||
+ intermediate.getAutoMapLocations();
+
+ for (int res = 0; res < EResCount; ++res) {
+ somethingToDo = somethingToDo ||
+ (intermediate.getShiftBinding(TResourceType(res)) != 0) ||
+ intermediate.hasShiftBindingForSet(TResourceType(res));
+ }
+
+ if (!somethingToDo && resolver == nullptr)
+ return true;
+
+ if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive())
+ return false;
+
+ TIntermNode* root = intermediate.getTreeRoot();
+ if (root == nullptr)
+ return false;
+
+ // if no resolver is provided, use the default resolver with the given shifts and auto map settings
+ TDefaultIoResolver defaultResolver(intermediate);
+ TDefaultHlslIoResolver defaultHlslResolver(intermediate);
+
+ if (resolver == nullptr) {
+ // TODO: use a passed in IO mapper for this
+ if (intermediate.usingHlslIoMapping())
+ resolver = &defaultHlslResolver;
+ else
+ resolver = &defaultResolver;
+ }
+
+ TVarLiveMap inVarMap, outVarMap, uniformVarMap;
+ TVarGatherTraverser iter_binding_all(intermediate, true, inVarMap, outVarMap, uniformVarMap);
+ TVarGatherTraverser iter_binding_live(intermediate, false, inVarMap, outVarMap, uniformVarMap);
+
+ root->traverse(&iter_binding_all);
+ iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str());
+
+ while (!iter_binding_live.functions.empty()) {
+ TIntermNode* function = iter_binding_live.functions.back();
+ iter_binding_live.functions.pop_back();
+ function->traverse(&iter_binding_live);
+ }
+
+ // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info.
+ std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderByPriority());
+
+ bool hadError = false;
+ TNotifyInOutAdaptor inOutNotify(stage, *resolver);
+ TNotifyUniformAdaptor uniformNotify(stage, *resolver);
+ TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError, intermediate);
+ TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError, intermediate);
+ resolver->beginNotifications(stage);
+ std::for_each(inVarMap.begin(), inVarMap.end(), inOutNotify);
+ std::for_each(outVarMap.begin(), outVarMap.end(), inOutNotify);
+ std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformNotify);
+ resolver->endNotifications(stage);
+ resolver->beginResolve(stage);
+ std::for_each(inVarMap.begin(), inVarMap.end(), inOutResolve);
+ std::for_each(outVarMap.begin(), outVarMap.end(), inOutResolve);
+ std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformResolve);
+ resolver->endResolve(stage);
+
+ if (!hadError) {
+ // sort by id again, so we can use lower bound to find entries
+ std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderById());
+ TVarSetTraverser iter_iomap(intermediate, inVarMap, outVarMap, uniformVarMap);
+ root->traverse(&iter_iomap);
+ }
+
+ return !hadError;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.h b/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.h
new file mode 100644
index 0000000..5e0d439
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/iomapper.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _IOMAPPER_INCLUDED
+#define _IOMAPPER_INCLUDED
+
+#include "../Public/ShaderLang.h"
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+class TInfoSink;
+
+namespace glslang {
+
+class TIntermediate;
+
+// I/O mapper
+class TIoMapper {
+public:
+ TIoMapper() {}
+ virtual ~TIoMapper() {}
+
+ // grow the reflection stage by stage
+ bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
+};
+
+} // end namespace glslang
+
+#endif // _IOMAPPER_INCLUDED
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/limits.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/limits.cpp
new file mode 100644
index 0000000..64d191b
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/limits.cpp
@@ -0,0 +1,198 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Do sub tree walks for
+// 1) inductive loop bodies to see if the inductive variable is modified
+// 2) array-index expressions to see if they are "constant-index-expression"
+//
+// These are per Appendix A of ES 2.0:
+//
+// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the
+// argument to a function out or inout parameter."
+//
+// "The following are constant-index-expressions:
+// - Constant expressions
+// - Loop indices as defined in section 4
+// - Expressions composed of both of the above"
+//
+// N.B.: assuming the last rule excludes function calls
+//
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+//
+// The inductive loop-body traverser.
+//
+// Just look at things that might modify the loop index.
+//
+
+class TInductiveTraverser : public TIntermTraverser {
+public:
+ TInductiveTraverser(int id, TSymbolTable& st)
+ : loopId(id), symbolTable(st), bad(false) { }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual bool visitUnary(TVisit, TIntermUnary* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+
+ int loopId; // unique ID of the symbol that's the loop inductive variable
+ TSymbolTable& symbolTable;
+ bool bad;
+ TSourceLoc badLoc;
+
+protected:
+ TInductiveTraverser(TInductiveTraverser&);
+ TInductiveTraverser& operator=(TInductiveTraverser&);
+};
+
+// check binary operations for those modifying the loop index
+bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ if (node->modifiesState() && node->getLeft()->getAsSymbolNode() &&
+ node->getLeft()->getAsSymbolNode()->getId() == loopId) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+// check unary operations for those modifying the loop index
+bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
+{
+ if (node->modifiesState() && node->getOperand()->getAsSymbolNode() &&
+ node->getOperand()->getAsSymbolNode()->getId() == loopId) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+// check function calls for arguments modifying the loop index
+bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (node->getOp() == EOpFunctionCall) {
+ // see if an out or inout argument is the loop index
+ const TIntermSequence& args = node->getSequence();
+ for (int i = 0; i < (int)args.size(); ++i) {
+ if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) {
+ TSymbol* function = symbolTable.find(node->getName());
+ const TType* type = (*function->getAsFunction())[i].type;
+ if (type->getQualifier().storage == EvqOut ||
+ type->getQualifier().storage == EvqInOut) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+//
+// External function to call for loop check.
+//
+void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable)
+{
+ TInductiveTraverser it(loopId, symbolTable);
+
+ if (body == nullptr)
+ return;
+
+ body->traverse(&it);
+
+ if (it.bad)
+ error(it.badLoc, "inductive loop index modified", "limitations", "");
+}
+
+//
+// The "constant-index-expression" tranverser.
+//
+// Just look at things that can form an index.
+//
+
+class TIndexTraverser : public TIntermTraverser {
+public:
+ TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { }
+ virtual void visitSymbol(TIntermSymbol* symbol);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+ const TIdSetType& inductiveLoopIds;
+ bool bad;
+ TSourceLoc badLoc;
+
+protected:
+ TIndexTraverser(TIndexTraverser&);
+ TIndexTraverser& operator=(TIndexTraverser&);
+};
+
+// make sure symbols are inductive-loop indexes
+void TIndexTraverser::visitSymbol(TIntermSymbol* symbol)
+{
+ if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) {
+ bad = true;
+ badLoc = symbol->getLoc();
+ }
+}
+
+// check for function calls, assuming they are bad; spec. doesn't really say
+bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (node->getOp() == EOpFunctionCall) {
+ bad = true;
+ badLoc = node->getLoc();
+ }
+
+ return true;
+}
+
+//
+// External function to call for loop check.
+//
+void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
+{
+ TIndexTraverser it(inductiveLoopIds);
+
+ index->traverse(&it);
+
+ if (it.bad)
+ error(it.badLoc, "Non-constant-index-expression", "limitations", "");
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/linkValidate.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/linkValidate.cpp
new file mode 100644
index 0000000..0cf2d36
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/linkValidate.cpp
@@ -0,0 +1,1686 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Do link-time merging and validation of intermediate representations.
+//
+// Basic model is that during compilation, each compilation unit (shader) is
+// compiled into one TIntermediate instance. Then, at link time, multiple
+// units for the same stage can be merged together, which can generate errors.
+// Then, after all merging, a single instance of TIntermediate represents
+// the whole stage. A final error check can be done on the resulting stage,
+// even if no merging was done (i.e., the stage was only one compilation unit).
+//
+
+#include "localintermediate.h"
+#include "../Include/InfoSink.h"
+
+namespace glslang {
+
+//
+// Link-time error emitter.
+//
+void TIntermediate::error(TInfoSink& infoSink, const char* message)
+{
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
+
+ ++numErrors;
+}
+
+// Link-time warning.
+void TIntermediate::warn(TInfoSink& infoSink, const char* message)
+{
+ infoSink.info.prefix(EPrefixWarning);
+ infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
+}
+
+// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
+// name must have the exact same set of members qualified with offset and their integral-constant
+// expression values must be the same, or a link-time error results."
+
+//
+// Merge the information from 'unit' into 'this'
+//
+void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
+{
+ mergeCallGraphs(infoSink, unit);
+ mergeModes(infoSink, unit);
+ mergeTrees(infoSink, unit);
+}
+
+void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (unit.getNumEntryPoints() > 0) {
+ if (getNumEntryPoints() > 0)
+ error(infoSink, "can't handle multiple entry points per stage");
+ else {
+ entryPointName = unit.getEntryPointName();
+ entryPointMangledName = unit.getEntryPointMangledName();
+ }
+ }
+ numEntryPoints += unit.getNumEntryPoints();
+
+ callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
+}
+
+#define MERGE_MAX(member) member = std::max(member, unit.member)
+#define MERGE_TRUE(member) if (unit.member) member = unit.member;
+
+void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (language != unit.language)
+ error(infoSink, "stages must match when linking into a single stage");
+
+ if (source == EShSourceNone)
+ source = unit.source;
+ if (source != unit.source)
+ error(infoSink, "can't link compilation units from different source languages");
+
+ if (treeRoot == nullptr) {
+ profile = unit.profile;
+ version = unit.version;
+ requestedExtensions = unit.requestedExtensions;
+ } else {
+ if ((profile == EEsProfile) != (unit.profile == EEsProfile))
+ error(infoSink, "Cannot cross link ES and desktop profiles");
+ else if (unit.profile == ECompatibilityProfile)
+ profile = ECompatibilityProfile;
+ version = std::max(version, unit.version);
+ requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
+ }
+
+ MERGE_MAX(spvVersion.spv);
+ MERGE_MAX(spvVersion.vulkanGlsl);
+ MERGE_MAX(spvVersion.vulkan);
+ MERGE_MAX(spvVersion.openGl);
+
+ numErrors += unit.getNumErrors();
+ numPushConstants += unit.numPushConstants;
+
+ if (unit.invocations != TQualifier::layoutNotSet) {
+ if (invocations == TQualifier::layoutNotSet)
+ invocations = unit.invocations;
+ else if (invocations != unit.invocations)
+ error(infoSink, "number of invocations must match between compilation units");
+ }
+
+ if (vertices == TQualifier::layoutNotSet)
+ vertices = unit.vertices;
+ else if (vertices != unit.vertices) {
+ if (language == EShLangGeometry
+#ifdef NV_EXTENSIONS
+ || language == EShLangMeshNV
+#endif
+ )
+ error(infoSink, "Contradictory layout max_vertices values");
+ else if (language == EShLangTessControl)
+ error(infoSink, "Contradictory layout vertices values");
+ else
+ assert(0);
+ }
+#ifdef NV_EXTENSIONS
+ if (primitives == TQualifier::layoutNotSet)
+ primitives = unit.primitives;
+ else if (primitives != unit.primitives) {
+ if (language == EShLangMeshNV)
+ error(infoSink, "Contradictory layout max_primitives values");
+ else
+ assert(0);
+ }
+#endif
+
+ if (inputPrimitive == ElgNone)
+ inputPrimitive = unit.inputPrimitive;
+ else if (inputPrimitive != unit.inputPrimitive)
+ error(infoSink, "Contradictory input layout primitives");
+
+ if (outputPrimitive == ElgNone)
+ outputPrimitive = unit.outputPrimitive;
+ else if (outputPrimitive != unit.outputPrimitive)
+ error(infoSink, "Contradictory output layout primitives");
+
+ if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
+ error(infoSink, "gl_FragCoord redeclarations must match across shaders");
+
+ if (vertexSpacing == EvsNone)
+ vertexSpacing = unit.vertexSpacing;
+ else if (vertexSpacing != unit.vertexSpacing)
+ error(infoSink, "Contradictory input vertex spacing");
+
+ if (vertexOrder == EvoNone)
+ vertexOrder = unit.vertexOrder;
+ else if (vertexOrder != unit.vertexOrder)
+ error(infoSink, "Contradictory triangle ordering");
+
+ MERGE_TRUE(pointMode);
+
+ for (int i = 0; i < 3; ++i) {
+ if (localSize[i] > 1)
+ localSize[i] = unit.localSize[i];
+ else if (localSize[i] != unit.localSize[i])
+ error(infoSink, "Contradictory local size");
+
+ if (localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = unit.localSizeSpecId[i];
+ else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
+ error(infoSink, "Contradictory local size specialization ids");
+ }
+
+ MERGE_TRUE(earlyFragmentTests);
+ MERGE_TRUE(postDepthCoverage);
+
+ if (depthLayout == EldNone)
+ depthLayout = unit.depthLayout;
+ else if (depthLayout != unit.depthLayout)
+ error(infoSink, "Contradictory depth layouts");
+
+ MERGE_TRUE(depthReplacing);
+ MERGE_TRUE(hlslFunctionality1);
+
+ blendEquations |= unit.blendEquations;
+
+ MERGE_TRUE(xfbMode);
+
+ for (size_t b = 0; b < xfbBuffers.size(); ++b) {
+ if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
+ xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
+ else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
+ error(infoSink, "Contradictory xfb_stride");
+ xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
+ if (unit.xfbBuffers[b].contains64BitType)
+ xfbBuffers[b].contains64BitType = true;
+#ifdef AMD_EXTENSIONS
+ if (unit.xfbBuffers[b].contains32BitType)
+ xfbBuffers[b].contains32BitType = true;
+ if (unit.xfbBuffers[b].contains16BitType)
+ xfbBuffers[b].contains16BitType = true;
+#endif
+ // TODO: 4.4 link: enhanced layouts: compare ranges
+ }
+
+ MERGE_TRUE(multiStream);
+
+#ifdef NV_EXTENSIONS
+ MERGE_TRUE(layoutOverrideCoverage);
+ MERGE_TRUE(geoPassthroughEXT);
+#endif
+
+ for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
+ if (unit.shiftBinding[i] > 0)
+ setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
+ }
+
+ for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
+ for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
+ setShiftBindingForSet((TResourceType)i, it->second, it->first);
+ }
+
+ resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
+
+ MERGE_TRUE(autoMapBindings);
+ MERGE_TRUE(autoMapLocations);
+ MERGE_TRUE(invertY);
+ MERGE_TRUE(flattenUniformArrays);
+ MERGE_TRUE(useUnknownFormat);
+ MERGE_TRUE(hlslOffsets);
+ MERGE_TRUE(useStorageBuffer);
+ MERGE_TRUE(hlslIoMapping);
+
+ // TODO: sourceFile
+ // TODO: sourceText
+ // TODO: processes
+
+ MERGE_TRUE(needToLegalize);
+ MERGE_TRUE(binaryDoubleOutput);
+ MERGE_TRUE(usePhysicalStorageBuffer);
+}
+
+//
+// Merge the 'unit' AST into 'this' AST.
+// That includes rationalizing the unique IDs, which were set up independently,
+// and might have overlaps that are not the same symbol, or might have different
+// IDs for what should be the same shared symbol.
+//
+void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
+{
+ if (unit.treeRoot == nullptr)
+ return;
+
+ if (treeRoot == nullptr) {
+ treeRoot = unit.treeRoot;
+ return;
+ }
+
+ // Getting this far means we have two existing trees to merge...
+#ifdef NV_EXTENSIONS
+ numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
+#endif
+
+#ifdef NV_EXTENSIONS
+ numTaskNVBlocks += unit.numTaskNVBlocks;
+#endif
+
+ // Get the top-level globals of each unit
+ TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
+ TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
+
+ // Get the linker-object lists
+ TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
+ const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
+
+ // Map by global name to unique ID to rationalize the same object having
+ // differing IDs in different trees.
+ TMap<TString, int> idMap;
+ int maxId;
+ seedIdMap(idMap, maxId);
+ remapIds(idMap, maxId + 1, unit);
+
+ mergeBodies(infoSink, globals, unitGlobals);
+ mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects);
+ ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
+}
+
+// Traverser that seeds an ID map with all built-ins, and tracks the
+// maximum ID used.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TBuiltInIdTraverser : public TIntermTraverser {
+public:
+ TBuiltInIdTraverser(TMap<TString, int>& idMap) : idMap(idMap), maxId(0) { }
+ // If it's a built in, add it to the map.
+ // Track the max ID.
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ if (qualifier.builtIn != EbvNone)
+ idMap[symbol->getName()] = symbol->getId();
+ maxId = std::max(maxId, symbol->getId());
+ }
+ int getMaxId() const { return maxId; }
+protected:
+ TBuiltInIdTraverser(TBuiltInIdTraverser&);
+ TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
+ TMap<TString, int>& idMap;
+ int maxId;
+};
+
+// Traverser that seeds an ID map with non-builtins.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TUserIdTraverser : public TIntermTraverser {
+public:
+ TUserIdTraverser(TMap<TString, int>& idMap) : idMap(idMap) { }
+ // If its a non-built-in global, add it to the map.
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ if (qualifier.builtIn == EbvNone)
+ idMap[symbol->getName()] = symbol->getId();
+ }
+
+protected:
+ TUserIdTraverser(TUserIdTraverser&);
+ TUserIdTraverser& operator=(TUserIdTraverser&);
+ TMap<TString, int>& idMap; // over biggest id
+};
+
+// Initialize the the ID map with what we know of 'this' AST.
+void TIntermediate::seedIdMap(TMap<TString, int>& idMap, int& maxId)
+{
+ // all built-ins everywhere need to align on IDs and contribute to the max ID
+ TBuiltInIdTraverser builtInIdTraverser(idMap);
+ treeRoot->traverse(&builtInIdTraverser);
+ maxId = builtInIdTraverser.getMaxId();
+
+ // user variables in the linker object list need to align on ids
+ TUserIdTraverser userIdTraverser(idMap);
+ findLinkerObjects()->traverse(&userIdTraverser);
+}
+
+// Traverser to map an AST ID to what was known from the seeding AST.
+// (It would be nice to put this in a function, but that causes warnings
+// on having no bodies for the copy-constructor/operator=.)
+class TRemapIdTraverser : public TIntermTraverser {
+public:
+ TRemapIdTraverser(const TMap<TString, int>& idMap, int idShift) : idMap(idMap), idShift(idShift) { }
+ // Do the mapping:
+ // - if the same symbol, adopt the 'this' ID
+ // - otherwise, ensure a unique ID by shifting to a new space
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ const TQualifier& qualifier = symbol->getType().getQualifier();
+ bool remapped = false;
+ if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
+ auto it = idMap.find(symbol->getName());
+ if (it != idMap.end()) {
+ symbol->changeId(it->second);
+ remapped = true;
+ }
+ }
+ if (!remapped)
+ symbol->changeId(symbol->getId() + idShift);
+ }
+protected:
+ TRemapIdTraverser(TRemapIdTraverser&);
+ TRemapIdTraverser& operator=(TRemapIdTraverser&);
+ const TMap<TString, int>& idMap;
+ int idShift;
+};
+
+void TIntermediate::remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate& unit)
+{
+ // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
+ TRemapIdTraverser idTraverser(idMap, idShift);
+ unit.getTreeRoot()->traverse(&idTraverser);
+}
+
+//
+// Merge the function bodies and global-level initializers from unitGlobals into globals.
+// Will error check duplication of function bodies for the same signature.
+//
+void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
+{
+ // TODO: link-time performance: Processing in alphabetical order will be faster
+
+ // Error check the global objects, not including the linker objects
+ for (unsigned int child = 0; child < globals.size() - 1; ++child) {
+ for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
+ TIntermAggregate* body = globals[child]->getAsAggregate();
+ TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
+ if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
+ error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
+ infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n";
+ }
+ }
+ }
+
+ // Merge the global objects, just in front of the linker objects
+ globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
+}
+
+//
+// Merge the linker objects from unitLinkerObjects into linkerObjects.
+// Duplication is expected and filtered out, but contradictions are an error.
+//
+void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects)
+{
+ // Error check and merge the linker objects (duplicates should not be created)
+ std::size_t initialNumLinkerObjects = linkerObjects.size();
+ for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
+ bool merge = true;
+ for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
+ TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
+ TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
+ assert(symbol && unitSymbol);
+ if (symbol->getName() == unitSymbol->getName()) {
+ // filter out copy
+ merge = false;
+
+ // but if one has an initializer and the other does not, update
+ // the initializer
+ if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
+ symbol->setConstArray(unitSymbol->getConstArray());
+
+ // Similarly for binding
+ if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
+ symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
+
+ // Update implicit array sizes
+ mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
+
+ // Check for consistent types/qualification/initializers etc.
+ mergeErrorCheck(infoSink, *symbol, *unitSymbol, false);
+ }
+ }
+ if (merge)
+ linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
+ }
+}
+
+// TODO 4.5 link functionality: cull distance array size checking
+
+// Recursively merge the implicit array sizes through the objects' respective type trees.
+void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
+{
+ if (type.isUnsizedArray()) {
+ if (unitType.isUnsizedArray()) {
+ type.updateImplicitArraySize(unitType.getImplicitArraySize());
+ if (unitType.isArrayVariablyIndexed())
+ type.setArrayVariablyIndexed();
+ } else if (unitType.isSizedArray())
+ type.changeOuterArraySize(unitType.getOuterArraySize());
+ }
+
+ // Type mismatches are caught and reported after this, just be careful for now.
+ if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
+ return;
+
+ for (int i = 0; i < (int)type.getStruct()->size(); ++i)
+ mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
+}
+
+//
+// Compare two global objects from two compilation units and see if they match
+// well enough. Rules can be different for intra- vs. cross-stage matching.
+//
+// This function only does one of intra- or cross-stage matching per call.
+//
+void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
+{
+ bool writeTypeComparison = false;
+
+ // Types have to match
+ if (symbol.getType() != unitSymbol.getType()) {
+ // but, we make an exception if one is an implicit array and the other is sized
+ if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
+ symbol.getType().sameElementType(unitSymbol.getType()) &&
+ (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) {
+ error(infoSink, "Types must match:");
+ writeTypeComparison = true;
+ }
+ }
+
+ // Qualifiers have to (almost) match
+
+ // Storage...
+ if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
+ error(infoSink, "Storage qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Precision...
+ if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
+ error(infoSink, "Precision qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Invariance...
+ if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
+ error(infoSink, "Presence of invariant qualifier must match:");
+ writeTypeComparison = true;
+ }
+
+ // Precise...
+ if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
+ error(infoSink, "Presence of precise qualifier must match:");
+ writeTypeComparison = true;
+ }
+
+ // Auxiliary and interpolation...
+ if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
+ symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
+ symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
+ symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
+ symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
+ symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
+ error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Memory...
+ if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent ||
+ symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent ||
+ symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent ||
+ symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent ||
+ symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent ||
+ symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate ||
+ symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil ||
+ symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict ||
+ symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly ||
+ symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
+ error(infoSink, "Memory qualifiers must match:");
+ writeTypeComparison = true;
+ }
+
+ // Layouts...
+ // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
+ // requires separate user-supplied offset from actual computed offset, but
+ // current implementation only has one offset.
+ if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix ||
+ symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking ||
+ symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation ||
+ symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent ||
+ symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex ||
+ symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding ||
+ (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) {
+ error(infoSink, "Layout qualification must match:");
+ writeTypeComparison = true;
+ }
+
+ // Initializers have to match, if both are present, and if we don't already know the types don't match
+ if (! writeTypeComparison) {
+ if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
+ if (symbol.getConstArray() != unitSymbol.getConstArray()) {
+ error(infoSink, "Initializers must match:");
+ infoSink.info << " " << symbol.getName() << "\n";
+ }
+ }
+ }
+
+ if (writeTypeComparison)
+ infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
+ unitSymbol.getType().getCompleteString() << "\"\n";
+}
+
+//
+// Do final link-time error checking of a complete (merged) intermediate representation.
+// (Much error checking was done during merging).
+//
+// Also, lock in defaults of things not set, including array sizes.
+//
+void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
+{
+ if (getTreeRoot() == nullptr)
+ return;
+
+ if (numEntryPoints < 1) {
+ if (source == EShSourceGlsl)
+ error(infoSink, "Missing entry point: Each stage requires one entry point");
+ else
+ warn(infoSink, "Entry point not found");
+ }
+
+ if (numPushConstants > 1)
+ error(infoSink, "Only one push_constant block is allowed per stage");
+
+ // recursion and missing body checking
+ checkCallGraphCycles(infoSink);
+ checkCallGraphBodies(infoSink, keepUncalled);
+
+ // overlap/alias/missing I/O, etc.
+ inOutLocationCheck(infoSink);
+
+ // invocations
+ if (invocations == TQualifier::layoutNotSet)
+ invocations = 1;
+
+ if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
+ error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
+ if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
+ error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
+
+ if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
+ error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
+ if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
+ error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
+
+ for (size_t b = 0; b < xfbBuffers.size(); ++b) {
+ if (xfbBuffers[b].contains64BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 8);
+#ifdef AMD_EXTENSIONS
+ else if (xfbBuffers[b].contains32BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 4);
+ else if (xfbBuffers[b].contains16BitType)
+ RoundToPow2(xfbBuffers[b].implicitStride, 2);
+#endif
+
+ // "It is a compile-time or link-time error to have
+ // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
+ // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
+ // compile-time or link-time error to have different values specified for the stride for the same buffer."
+ if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
+ error(infoSink, "xfb_stride is too small to hold all buffer entries:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
+ }
+ if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
+ xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
+
+ // "If the buffer is capturing any
+ // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
+ // multiple of 4, or a compile-time or link-time error results."
+ if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
+ error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+#ifdef AMD_EXTENSIONS
+ } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
+#else
+ } else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
+#endif
+ error(infoSink, "xfb_stride must be multiple of 4:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+ }
+#ifdef AMD_EXTENSIONS
+ // "If the buffer is capturing any
+ // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
+ else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
+ error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
+ }
+
+#endif
+ // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
+ // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
+ if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
+ error(infoSink, "xfb_stride is too large:");
+ infoSink.info.prefix(EPrefixError);
+ infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n";
+ }
+ }
+
+ switch (language) {
+ case EShLangVertex:
+ break;
+ case EShLangTessControl:
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify an output layout(vertices=...)");
+ break;
+ case EShLangTessEvaluation:
+ if (source == EShSourceGlsl) {
+ if (inputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an input layout primitive");
+ if (vertexSpacing == EvsNone)
+ vertexSpacing = EvsEqual;
+ if (vertexOrder == EvoNone)
+ vertexOrder = EvoCcw;
+ }
+ break;
+ case EShLangGeometry:
+ if (inputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an input layout primitive");
+ if (outputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an output layout primitive");
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
+ break;
+ case EShLangFragment:
+ // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
+ // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
+ // requiring explicit early_fragment_tests
+ if (getPostDepthCoverage() && !getEarlyFragmentTests())
+ error(infoSink, "post_depth_coverage requires early_fragment_tests");
+ break;
+ case EShLangCompute:
+ break;
+
+#ifdef NV_EXTENSIONS
+ case EShLangRayGenNV:
+ case EShLangIntersectNV:
+ case EShLangAnyHitNV:
+ case EShLangClosestHitNV:
+ case EShLangMissNV:
+ case EShLangCallableNV:
+ if (numShaderRecordNVBlocks > 1)
+ error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
+ break;
+ case EShLangMeshNV:
+ // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
+ if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
+ error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
+ if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
+ error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
+ if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
+ error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
+ if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
+ error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
+ if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
+ error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
+ if (outputPrimitive == ElgNone)
+ error(infoSink, "At least one shader must specify an output layout primitive");
+ if (vertices == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
+ if (primitives == TQualifier::layoutNotSet)
+ error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
+ // fall through
+ case EShLangTaskNV:
+ if (numTaskNVBlocks > 1)
+ error(infoSink, "Only one taskNV interface block is allowed per shader");
+ break;
+#endif
+
+ default:
+ error(infoSink, "Unknown Stage.");
+ break;
+ }
+
+ // Process the tree for any node-specific work.
+ class TFinalLinkTraverser : public TIntermTraverser {
+ public:
+ TFinalLinkTraverser() { }
+ virtual ~TFinalLinkTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol* symbol)
+ {
+ // Implicitly size arrays.
+ // If an unsized array is left as unsized, it effectively
+ // becomes run-time sized.
+ symbol->getWritableType().adoptImplicitArraySizes(false);
+ }
+ } finalLinkTraverser;
+
+ treeRoot->traverse(&finalLinkTraverser);
+}
+
+//
+// See if the call graph contains any static recursion, which is disallowed
+// by the specification.
+//
+void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
+{
+ // Clear fields we'll use for this.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ call->visited = false;
+ call->currentPath = false;
+ call->errorGiven = false;
+ }
+
+ //
+ // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration.
+ //
+
+ TCall* newRoot;
+ do {
+ // See if we have unvisited parts of the graph.
+ newRoot = 0;
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (! call->visited) {
+ newRoot = &(*call);
+ break;
+ }
+ }
+
+ // If not, we are done.
+ if (! newRoot)
+ break;
+
+ // Otherwise, we found a new subgraph, process it:
+ // See what all can be reached by this new root, and if any of
+ // that is recursive. This is done by depth-first traversals, seeing
+ // if a new call is found that was already in the currentPath (a back edge),
+ // thereby detecting recursion.
+ std::list<TCall*> stack;
+ newRoot->currentPath = true; // currentPath will be true iff it is on the stack
+ stack.push_back(newRoot);
+ while (! stack.empty()) {
+ // get a caller
+ TCall* call = stack.back();
+
+ // Add to the stack just one callee.
+ // This algorithm always terminates, because only !visited and !currentPath causes a push
+ // and all pushes change currentPath to true, and all pops change visited to true.
+ TGraph::iterator child = callGraph.begin();
+ for (; child != callGraph.end(); ++child) {
+
+ // If we already visited this node, its whole subgraph has already been processed, so skip it.
+ if (child->visited)
+ continue;
+
+ if (call->callee == child->caller) {
+ if (child->currentPath) {
+ // Then, we found a back edge
+ if (! child->errorGiven) {
+ error(infoSink, "Recursion detected:");
+ infoSink.info << " " << call->callee << " calling " << child->callee << "\n";
+ child->errorGiven = true;
+ recursive = true;
+ }
+ } else {
+ child->currentPath = true;
+ stack.push_back(&(*child));
+ break;
+ }
+ }
+ }
+ if (child == callGraph.end()) {
+ // no more callees, we bottomed out, never look at this node again
+ stack.back()->currentPath = false;
+ stack.back()->visited = true;
+ stack.pop_back();
+ }
+ } // end while, meaning nothing left to process in this subtree
+
+ } while (newRoot); // redundant loop check; should always exit via the 'break' above
+}
+
+//
+// See which functions are reachable from the entry point and which have bodies.
+// Reachable ones with missing bodies are errors.
+// Unreachable bodies are dead code.
+//
+void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
+{
+ // Clear fields we'll use for this.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ call->visited = false;
+ call->calleeBodyPosition = -1;
+ }
+
+ // The top level of the AST includes function definitions (bodies).
+ // Compare these to function calls in the call graph.
+ // We'll end up knowing which have bodies, and if so,
+ // how to map the call-graph node to the location in the AST.
+ TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
+ std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
+ for (int f = 0; f < (int)functionSequence.size(); ++f) {
+ glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
+ if (node && (node->getOp() == glslang::EOpFunction)) {
+ if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
+ reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->callee == node->getName())
+ call->calleeBodyPosition = f;
+ }
+ }
+ }
+
+ // Start call-graph traversal by visiting the entry point nodes.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
+ call->visited = true;
+ }
+
+ // Propagate 'visited' through the call-graph to every part of the graph it
+ // can reach (seeded with the entry-point setting above).
+ bool changed;
+ do {
+ changed = false;
+ for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
+ if (call1->visited) {
+ for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
+ if (! call2->visited) {
+ if (call1->callee == call2->caller) {
+ changed = true;
+ call2->visited = true;
+ }
+ }
+ }
+ }
+ }
+ } while (changed);
+
+ // Any call-graph node set to visited but without a callee body is an error.
+ for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
+ if (call->visited) {
+ if (call->calleeBodyPosition == -1) {
+ error(infoSink, "No function definition (body) found: ");
+ infoSink.info << " " << call->callee << "\n";
+ } else
+ reachable[call->calleeBodyPosition] = true;
+ }
+ }
+
+ // Bodies in the AST not reached by the call graph are dead;
+ // clear them out, since they can't be reached and also can't
+ // be translated further due to possibility of being ill defined.
+ if (! keepUncalled) {
+ for (int f = 0; f < (int)functionSequence.size(); ++f) {
+ if (! reachable[f])
+ functionSequence[f] = nullptr;
+ }
+ functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
+ }
+}
+
+//
+// Satisfy rules for location qualifiers on inputs and outputs
+//
+void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
+{
+ // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
+ bool fragOutWithNoLocation = false;
+ int numFragOut = 0;
+
+ // TODO: linker functionality: location collision checking
+
+ TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
+ for (size_t i = 0; i < linkObjects.size(); ++i) {
+ const TType& type = linkObjects[i]->getAsTyped()->getType();
+ const TQualifier& qualifier = type.getQualifier();
+ if (language == EShLangFragment) {
+ if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
+ ++numFragOut;
+ if (!qualifier.hasAnyLocation())
+ fragOutWithNoLocation = true;
+ }
+ }
+ }
+
+ if (profile == EEsProfile) {
+ if (numFragOut > 1 && fragOutWithNoLocation)
+ error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
+ }
+}
+
+TIntermAggregate* TIntermediate::findLinkerObjects() const
+{
+ // Get the top-level globals
+ TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
+
+ // Get the last member of the sequences, expected to be the linker-object lists
+ assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
+
+ return globals.back()->getAsAggregate();
+}
+
+// See if a variable was both a user-declared output and used.
+// Note: the spec discusses writing to one, but this looks at read or write, which
+// is more useful, and perhaps the spec should be changed to reflect that.
+bool TIntermediate::userOutputUsed() const
+{
+ const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
+
+ bool found = false;
+ for (size_t i = 0; i < linkerObjects.size(); ++i) {
+ const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
+ if (symbolNode.getQualifier().storage == EvqVaryingOut &&
+ symbolNode.getName().compare(0, 3, "gl_") != 0 &&
+ inIoAccessed(symbolNode.getName())) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
+// as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+// typeCollision is set to true if there is no direct collision, but the types in the same location
+// are different.
+//
+int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
+{
+ typeCollision = false;
+
+ int set;
+ if (qualifier.isPipeInput())
+ set = 0;
+ else if (qualifier.isPipeOutput())
+ set = 1;
+ else if (qualifier.storage == EvqUniform)
+ set = 2;
+ else if (qualifier.storage == EvqBuffer)
+ set = 3;
+ else
+ return -1;
+
+ int size;
+ if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
+ if (type.isSizedArray())
+ size = type.getCumulativeArraySize();
+ else
+ size = 1;
+ } else {
+ // Strip off the outer array dimension for those having an extra one.
+ if (type.isArray() && qualifier.isArrayedIo(language)) {
+ TType elementType(type, 0);
+ size = computeTypeLocationSize(elementType, language);
+ } else
+ size = computeTypeLocationSize(type, language);
+ }
+
+ // Locations, and components within locations.
+ //
+ // Almost always, dealing with components means a single location is involved.
+ // The exception is a dvec3. From the spec:
+ //
+ // "A dvec3 will consume all four components of the first location and components 0 and 1 of
+ // the second location. This leaves components 2 and 3 available for other component-qualified
+ // declarations."
+ //
+ // That means, without ever mentioning a component, a component range
+ // for a different location gets specified, if it's not a vertex shader input. (!)
+ // (A vertex shader input will show using only one location, even for a dvec3/4.)
+ //
+ // So, for the case of dvec3, we need two independent ioRanges.
+
+ int collision = -1; // no collision
+ if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
+ (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
+ // Dealing with dvec3 in/out split across two locations.
+ // Need two io-ranges.
+ // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
+
+ // First range:
+ TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
+ TRange componentRange(0, 3);
+ TIoRange range(locationRange, componentRange, type.getBasicType(), 0);
+
+ // check for collisions
+ collision = checkLocationRange(set, range, type, typeCollision);
+ if (collision < 0) {
+ usedIo[set].push_back(range);
+
+ // Second range:
+ TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
+ TRange componentRange2(0, 1);
+ TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0);
+
+ // check for collisions
+ collision = checkLocationRange(set, range2, type, typeCollision);
+ if (collision < 0)
+ usedIo[set].push_back(range2);
+ }
+ } else {
+ // Not a dvec3 in/out split across two locations, generic path.
+ // Need a single IO-range block.
+
+ TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
+ TRange componentRange(0, 3);
+ if (qualifier.hasComponent() || type.getVectorSize() > 0) {
+ int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
+ if (qualifier.hasComponent())
+ componentRange.start = qualifier.layoutComponent;
+ componentRange.last = componentRange.start + consumedComponents - 1;
+ }
+
+ // combine location and component ranges
+ TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
+
+ // check for collisions, except for vertex inputs on desktop targeting OpenGL
+ if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
+ collision = checkLocationRange(set, range, type, typeCollision);
+
+ if (collision < 0)
+ usedIo[set].push_back(range);
+ }
+
+ return collision;
+}
+
+// Compare a new (the passed in) 'range' against the existing set, and see
+// if there are any collisions.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
+{
+ for (size_t r = 0; r < usedIo[set].size(); ++r) {
+ if (range.overlap(usedIo[set][r])) {
+ // there is a collision; pick one
+ return std::max(range.location.start, usedIo[set][r].location.start);
+ } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) {
+ // aliased-type mismatch
+ typeCollision = true;
+ return std::max(range.location.start, usedIo[set][r].location.start);
+ }
+ }
+
+ return -1; // no collision
+}
+
+// Accumulate bindings and offsets, and check for collisions
+// as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
+{
+ TRange bindingRange(binding, binding);
+ TRange offsetRange(offset, offset + numOffsets - 1);
+ TOffsetRange range(bindingRange, offsetRange);
+
+ // check for collisions, except for vertex inputs on desktop
+ for (size_t r = 0; r < usedAtomics.size(); ++r) {
+ if (range.overlap(usedAtomics[r])) {
+ // there is a collision; pick one
+ return std::max(offset, usedAtomics[r].offset.start);
+ }
+ }
+
+ usedAtomics.push_back(range);
+
+ return -1; // no collision
+}
+
+// Accumulate used constant_id values.
+//
+// Return false is one was already used.
+bool TIntermediate::addUsedConstantId(int id)
+{
+ if (usedConstantId.find(id) != usedConstantId.end())
+ return false;
+
+ usedConstantId.insert(id);
+
+ return true;
+}
+
+// Recursively figure out how many locations are used up by an input or output type.
+// Return the size of type, as measured by "locations".
+int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
+{
+ // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
+ // consecutive locations..."
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
+ TType elementType(type, 0);
+ if (type.isSizedArray()
+#ifdef NV_EXTENSIONS
+ && !type.getQualifier().isPerView()
+#endif
+ )
+ return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
+ else {
+#ifdef NV_EXTENSIONS
+ // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
+ elementType.getQualifier().perViewNV = false;
+#endif
+ return computeTypeLocationSize(elementType, stage);
+ }
+ }
+
+ // "The locations consumed by block and structure members are determined by applying the rules above
+ // recursively..."
+ if (type.isStruct()) {
+ int size = 0;
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ size += computeTypeLocationSize(memberType, stage);
+ }
+ return size;
+ }
+
+ // ES: "If a shader input is any scalar or vector type, it will consume a single location."
+
+ // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
+ // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
+ // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
+ // consume only a single location, in all stages."
+ if (type.isScalar())
+ return 1;
+ if (type.isVector()) {
+ if (stage == EShLangVertex && type.getQualifier().isPipeInput())
+ return 1;
+ if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
+ return 2;
+ else
+ return 1;
+ }
+
+ // "If the declared input is an n x m single- or double-precision matrix, ...
+ // The number of locations assigned for each matrix will be the same as
+ // for an n-element array of m-component vectors..."
+ if (type.isMatrix()) {
+ TType columnType(type, 0);
+ return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
+ }
+
+ assert(0);
+ return 1;
+}
+
+// Same as computeTypeLocationSize but for uniforms
+int TIntermediate::computeTypeUniformLocationSize(const TType& type)
+{
+ // "Individual elements of a uniform array are assigned
+ // consecutive locations with the first element taking location
+ // location."
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ TType elementType(type, 0);
+ if (type.isSizedArray()) {
+ return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
+ } else {
+ // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early.
+ return computeTypeUniformLocationSize(elementType);
+ }
+ }
+
+ // "Each subsequent inner-most member or element gets incremental
+ // locations for the entire structure or array."
+ if (type.isStruct()) {
+ int size = 0;
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ size += computeTypeUniformLocationSize(memberType);
+ }
+ return size;
+ }
+
+ return 1;
+}
+
+// Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
+//
+// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
+//
+int TIntermediate::addXfbBufferOffset(const TType& type)
+{
+ const TQualifier& qualifier = type.getQualifier();
+
+ assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
+ TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
+
+ // compute the range
+#ifdef AMD_EXTENSIONS
+ unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
+#else
+ unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType);
+#endif
+ buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
+ TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
+
+ // check for collisions
+ for (size_t r = 0; r < buffer.ranges.size(); ++r) {
+ if (range.overlap(buffer.ranges[r])) {
+ // there is a collision; pick an example to return
+ return std::max(range.start, buffer.ranges[r].start);
+ }
+ }
+
+ buffer.ranges.push_back(range);
+
+ return -1; // no collision
+}
+
+// Recursively figure out how many bytes of xfb buffer are used by the given type.
+// Return the size of type, in bytes.
+// Sets contains64BitType to true if the type contains a 64-bit data type.
+#ifdef AMD_EXTENSIONS
+// Sets contains32BitType to true if the type contains a 32-bit data type.
+// Sets contains16BitType to true if the type contains a 16-bit data type.
+// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
+unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
+#else
+// N.B. Caller must set contains64BitType to false before calling.
+unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType) const
+#endif
+{
+ // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
+ // and the space taken in the buffer will be a multiple of 8.
+ // ...within the qualified entity, subsequent components are each
+ // assigned, in order, to the next available offset aligned to a multiple of
+ // that component's size. Aggregate types are flattened down to the component
+ // level to get this sequence of components."
+
+ if (type.isArray()) {
+ // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ assert(type.isSizedArray());
+ TType elementType(type, 0);
+#ifdef AMD_EXTENSIONS
+ return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
+#else
+ return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType);
+#endif
+ }
+
+ if (type.isStruct()) {
+ unsigned int size = 0;
+ bool structContains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool structContains32BitType = false;
+ bool structContains16BitType = false;
+#endif
+ for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
+ TType memberType(type, member);
+ // "... if applied to
+ // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
+ // and the space taken in the buffer will be a multiple of 8."
+ bool memberContains64BitType = false;
+#ifdef AMD_EXTENSIONS
+ bool memberContains32BitType = false;
+ bool memberContains16BitType = false;
+ int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
+#else
+ int memberSize = computeTypeXfbSize(memberType, memberContains64BitType);
+#endif
+ if (memberContains64BitType) {
+ structContains64BitType = true;
+ RoundToPow2(size, 8);
+#ifdef AMD_EXTENSIONS
+ } else if (memberContains32BitType) {
+ structContains32BitType = true;
+ RoundToPow2(size, 4);
+ } else if (memberContains16BitType) {
+ structContains16BitType = true;
+ RoundToPow2(size, 2);
+#endif
+ }
+ size += memberSize;
+ }
+
+ if (structContains64BitType) {
+ contains64BitType = true;
+ RoundToPow2(size, 8);
+#ifdef AMD_EXTENSIONS
+ } else if (structContains32BitType) {
+ contains32BitType = true;
+ RoundToPow2(size, 4);
+ } else if (structContains16BitType) {
+ contains16BitType = true;
+ RoundToPow2(size, 2);
+#endif
+ }
+ return size;
+ }
+
+ int numComponents;
+ if (type.isScalar())
+ numComponents = 1;
+ else if (type.isVector())
+ numComponents = type.getVectorSize();
+ else if (type.isMatrix())
+ numComponents = type.getMatrixCols() * type.getMatrixRows();
+ else {
+ assert(0);
+ numComponents = 1;
+ }
+
+ if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
+ contains64BitType = true;
+ return 8 * numComponents;
+#ifdef AMD_EXTENSIONS
+ } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
+ contains16BitType = true;
+ return 2 * numComponents;
+ } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
+ return numComponents;
+ else {
+ contains32BitType = true;
+ return 4 * numComponents;
+ }
+#else
+ } else
+ return 4 * numComponents;
+#endif
+}
+
+const int baseAlignmentVec4Std140 = 16;
+
+// Return the size and alignment of a component of the given type.
+// The size is returned in the 'size' parameter
+// Return value is the alignment..
+int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
+{
+ switch (type.getBasicType()) {
+ case EbtInt64:
+ case EbtUint64:
+ case EbtDouble: size = 8; return 8;
+ case EbtFloat16: size = 2; return 2;
+ case EbtInt8:
+ case EbtUint8: size = 1; return 1;
+ case EbtInt16:
+ case EbtUint16: size = 2; return 2;
+ case EbtReference: size = 8; return 8;
+ default: size = 4; return 4;
+ }
+}
+
+// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
+// Operates recursively.
+//
+// If std140 is true, it does the rounding up to vec4 size required by std140,
+// otherwise it does not, yielding std430 rules.
+//
+// The size is returned in the 'size' parameter
+//
+// The stride is only non-0 for arrays or matrices, and is the stride of the
+// top-level object nested within the type. E.g., for an array of matrices,
+// it is the distances needed between matrices, despite the rules saying the
+// stride comes from the flattening down to vectors.
+//
+// Return value is the alignment of the type.
+int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
+{
+ int alignment;
+
+ bool std140 = layoutPacking == glslang::ElpStd140;
+ // When using the std140 storage layout, structures will be laid out in buffer
+ // storage with its members stored in monotonically increasing order based on their
+ // location in the declaration. A structure and each structure member have a base
+ // offset and a base alignment, from which an aligned offset is computed by rounding
+ // the base offset up to a multiple of the base alignment. The base offset of the first
+ // member of a structure is taken from the aligned offset of the structure itself. The
+ // base offset of all other structure members is derived by taking the offset of the
+ // last basic machine unit consumed by the previous member and adding one. Each
+ // structure member is stored in memory at its aligned offset. The members of a top-
+ // level uniform block are laid out in buffer storage by treating the uniform block as
+ // a structure with a base offset of zero.
+ //
+ // 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
+ //
+ // 2. If the member is a two- or four-component vector with components consuming N basic
+ // machine units, the base alignment is 2N or 4N, respectively.
+ //
+ // 3. If the member is a three-component vector with components consuming N
+ // basic machine units, the base alignment is 4N.
+ //
+ // 4. If the member is an array of scalars or vectors, the base alignment and array
+ // stride are set to match the base alignment of a single array element, according
+ // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
+ // array may have padding at the end; the base offset of the member following
+ // the array is rounded up to the next multiple of the base alignment.
+ //
+ // 5. If the member is a column-major matrix with C columns and R rows, the
+ // matrix is stored identically to an array of C column vectors with R
+ // components each, according to rule (4).
+ //
+ // 6. If the member is an array of S column-major matrices with C columns and
+ // R rows, the matrix is stored identically to a row of S X C column vectors
+ // with R components each, according to rule (4).
+ //
+ // 7. If the member is a row-major matrix with C columns and R rows, the matrix
+ // is stored identically to an array of R row vectors with C components each,
+ // according to rule (4).
+ //
+ // 8. If the member is an array of S row-major matrices with C columns and R
+ // rows, the matrix is stored identically to a row of S X R row vectors with C
+ // components each, according to rule (4).
+ //
+ // 9. If the member is a structure, the base alignment of the structure is N , where
+ // N is the largest base alignment value of any of its members, and rounded
+ // up to the base alignment of a vec4. The individual members of this substructure
+ // are then assigned offsets by applying this set of rules recursively,
+ // where the base offset of the first member of the sub-structure is equal to the
+ // aligned offset of the structure. The structure may have padding at the end;
+ // the base offset of the member following the sub-structure is rounded up to
+ // the next multiple of the base alignment of the structure.
+ //
+ // 10. If the member is an array of S structures, the S elements of the array are laid
+ // out in order, according to rule (9).
+ //
+ // Assuming, for rule 10: The stride is the same as the size of an element.
+
+ stride = 0;
+ int dummyStride;
+
+ // rules 4, 6, 8, and 10
+ if (type.isArray()) {
+ // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
+ TType derefType(type, 0);
+ alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
+ if (std140)
+ alignment = std::max(baseAlignmentVec4Std140, alignment);
+ RoundToPow2(size, alignment);
+ stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
+ // uses the assumption for rule 10 in the comment above
+ size = stride * type.getOuterArraySize();
+ return alignment;
+ }
+
+ // rule 9
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& memberList = *type.getStruct();
+
+ size = 0;
+ int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
+ for (size_t m = 0; m < memberList.size(); ++m) {
+ int memberSize;
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
+ int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
+ (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
+ maxAlignment = std::max(maxAlignment, memberAlignment);
+ RoundToPow2(size, memberAlignment);
+ size += memberSize;
+ }
+
+ // The structure may have padding at the end; the base offset of
+ // the member following the sub-structure is rounded up to the next
+ // multiple of the base alignment of the structure.
+ RoundToPow2(size, maxAlignment);
+
+ return maxAlignment;
+ }
+
+ // rule 1
+ if (type.isScalar())
+ return getBaseAlignmentScalar(type, size);
+
+ // rules 2 and 3
+ if (type.isVector()) {
+ int scalarAlign = getBaseAlignmentScalar(type, size);
+ switch (type.getVectorSize()) {
+ case 1: // HLSL has this, GLSL does not
+ return scalarAlign;
+ case 2:
+ size *= 2;
+ return 2 * scalarAlign;
+ default:
+ size *= type.getVectorSize();
+ return 4 * scalarAlign;
+ }
+ }
+
+ // rules 5 and 7
+ if (type.isMatrix()) {
+ // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
+ TType derefType(type, 0, rowMajor);
+
+ alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
+ if (std140)
+ alignment = std::max(baseAlignmentVec4Std140, alignment);
+ RoundToPow2(size, alignment);
+ stride = size; // use intra-matrix stride for stride of a just a matrix
+ if (rowMajor)
+ size = stride * type.getMatrixRows();
+ else
+ size = stride * type.getMatrixCols();
+
+ return alignment;
+ }
+
+ assert(0); // all cases should be covered above
+ size = baseAlignmentVec4Std140;
+ return baseAlignmentVec4Std140;
+}
+
+// To aid the basic HLSL rule about crossing vec4 boundaries.
+bool TIntermediate::improperStraddle(const TType& type, int size, int offset)
+{
+ if (! type.isVector() || type.isArray())
+ return false;
+
+ return size <= 16 ? offset / 16 != (offset + size - 1) / 16
+ : offset % 16 != 0;
+}
+
+int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
+{
+ int alignment;
+
+ stride = 0;
+ int dummyStride;
+
+ if (type.isArray()) {
+ TType derefType(type, 0);
+ alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
+
+ stride = size;
+ RoundToPow2(stride, alignment);
+
+ size = stride * (type.getOuterArraySize() - 1) + size;
+ return alignment;
+ }
+
+ if (type.getBasicType() == EbtStruct) {
+ const TTypeList& memberList = *type.getStruct();
+
+ size = 0;
+ int maxAlignment = 0;
+ for (size_t m = 0; m < memberList.size(); ++m) {
+ int memberSize;
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
+ int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
+ (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
+ maxAlignment = std::max(maxAlignment, memberAlignment);
+ RoundToPow2(size, memberAlignment);
+ size += memberSize;
+ }
+
+ return maxAlignment;
+ }
+
+ if (type.isScalar())
+ return getBaseAlignmentScalar(type, size);
+
+ if (type.isVector()) {
+ int scalarAlign = getBaseAlignmentScalar(type, size);
+
+ size *= type.getVectorSize();
+ return scalarAlign;
+ }
+
+ if (type.isMatrix()) {
+ TType derefType(type, 0, rowMajor);
+
+ alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
+
+ stride = size; // use intra-matrix stride for stride of a just a matrix
+ if (rowMajor)
+ size = stride * type.getMatrixRows();
+ else
+ size = stride * type.getMatrixCols();
+
+ return alignment;
+ }
+
+ assert(0); // all cases should be covered above
+ size = 1;
+ return 1;
+}
+
+int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
+{
+ if (layoutPacking == glslang::ElpScalar) {
+ return getScalarAlignment(type, size, stride, rowMajor);
+ } else {
+ return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
+ }
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/localintermediate.h b/src/3rdparty/glslang/glslang/MachineIndependent/localintermediate.h
new file mode 100644
index 0000000..ba17725
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/localintermediate.h
@@ -0,0 +1,896 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _LOCAL_INTERMEDIATE_INCLUDED_
+#define _LOCAL_INTERMEDIATE_INCLUDED_
+
+#include "../Include/intermediate.h"
+#include "../Public/ShaderLang.h"
+#include "Versions.h"
+
+#include <string>
+#include <vector>
+#include <algorithm>
+#include <set>
+#include <array>
+
+class TInfoSink;
+
+namespace glslang {
+
+struct TMatrixSelector {
+ int coord1; // stay agnostic about column/row; this is parse order
+ int coord2;
+};
+
+typedef int TVectorSelector;
+
+const int MaxSwizzleSelectors = 4;
+
+template<typename selectorType>
+class TSwizzleSelectors {
+public:
+ TSwizzleSelectors() : size_(0) { }
+
+ void push_back(selectorType comp)
+ {
+ if (size_ < MaxSwizzleSelectors)
+ components[size_++] = comp;
+ }
+ void resize(int s)
+ {
+ assert(s <= size_);
+ size_ = s;
+ }
+ int size() const { return size_; }
+ selectorType operator[](int i) const
+ {
+ assert(i < MaxSwizzleSelectors);
+ return components[i];
+ }
+
+private:
+ int size_;
+ selectorType components[MaxSwizzleSelectors];
+};
+
+//
+// Some helper structures for TIntermediate. Their contents are encapsulated
+// by TIntermediate.
+//
+
+// Used for call-graph algorithms for detecting recursion, missing bodies, and dead bodies.
+// A "call" is a pair: <caller, callee>.
+// There can be duplicates. General assumption is the list is small.
+struct TCall {
+ TCall(const TString& pCaller, const TString& pCallee) : caller(pCaller), callee(pCallee) { }
+ TString caller;
+ TString callee;
+ bool visited;
+ bool currentPath;
+ bool errorGiven;
+ int calleeBodyPosition;
+};
+
+// A generic 1-D range.
+struct TRange {
+ TRange(int start, int last) : start(start), last(last) { }
+ bool overlap(const TRange& rhs) const
+ {
+ return last >= rhs.start && start <= rhs.last;
+ }
+ int start;
+ int last;
+};
+
+// An IO range is a 3-D rectangle; the set of (location, component, index) triples all lying
+// within the same location range, component range, and index value. Locations don't alias unless
+// all other dimensions of their range overlap.
+struct TIoRange {
+ TIoRange(TRange location, TRange component, TBasicType basicType, int index)
+ : location(location), component(component), basicType(basicType), index(index) { }
+ bool overlap(const TIoRange& rhs) const
+ {
+ return location.overlap(rhs.location) && component.overlap(rhs.component) && index == rhs.index;
+ }
+ TRange location;
+ TRange component;
+ TBasicType basicType;
+ int index;
+};
+
+// An offset range is a 2-D rectangle; the set of (binding, offset) pairs all lying
+// within the same binding and offset range.
+struct TOffsetRange {
+ TOffsetRange(TRange binding, TRange offset)
+ : binding(binding), offset(offset) { }
+ bool overlap(const TOffsetRange& rhs) const
+ {
+ return binding.overlap(rhs.binding) && offset.overlap(rhs.offset);
+ }
+ TRange binding;
+ TRange offset;
+};
+
+// Things that need to be tracked per xfb buffer.
+struct TXfbBuffer {
+#ifdef AMD_EXTENSIONS
+ TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false),
+ contains32BitType(false), contains16BitType(false) { }
+#else
+ TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false) { }
+#endif
+ std::vector<TRange> ranges; // byte offsets that have already been assigned
+ unsigned int stride;
+ unsigned int implicitStride;
+ bool contains64BitType;
+#ifdef AMD_EXTENSIONS
+ bool contains32BitType;
+ bool contains16BitType;
+#endif
+};
+
+// Track a set of strings describing how the module was processed.
+// Using the form:
+// process arg0 arg1 arg2 ...
+// process arg0 arg1 arg2 ...
+// where everything is textual, and there can be zero or more arguments
+class TProcesses {
+public:
+ TProcesses() {}
+ ~TProcesses() {}
+
+ void addProcess(const char* process)
+ {
+ processes.push_back(process);
+ }
+ void addProcess(const std::string& process)
+ {
+ processes.push_back(process);
+ }
+ void addArgument(int arg)
+ {
+ processes.back().append(" ");
+ std::string argString = std::to_string(arg);
+ processes.back().append(argString);
+ }
+ void addArgument(const char* arg)
+ {
+ processes.back().append(" ");
+ processes.back().append(arg);
+ }
+ void addArgument(const std::string& arg)
+ {
+ processes.back().append(" ");
+ processes.back().append(arg);
+ }
+ void addIfNonZero(const char* process, int value)
+ {
+ if (value != 0) {
+ addProcess(process);
+ addArgument(value);
+ }
+ }
+
+ const std::vector<std::string>& getProcesses() const { return processes; }
+
+private:
+ std::vector<std::string> processes;
+};
+
+class TSymbolTable;
+class TSymbol;
+class TVariable;
+
+#ifdef NV_EXTENSIONS
+//
+// Texture and Sampler transformation mode.
+//
+enum ComputeDerivativeMode {
+ LayoutDerivativeNone, // default layout as SPV_NV_compute_shader_derivatives not enabled
+ LayoutDerivativeGroupQuads, // derivative_group_quadsNV
+ LayoutDerivativeGroupLinear, // derivative_group_linearNV
+};
+#endif
+
+//
+// Set of helper functions to help parse and build the tree.
+//
+class TIntermediate {
+public:
+ explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) :
+ implicitThisName("@this"), implicitCounterName("@count"),
+ language(l), source(EShSourceNone), profile(p), version(v), treeRoot(0),
+ numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
+ invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet),
+ inputPrimitive(ElgNone), outputPrimitive(ElgNone),
+ pixelCenterInteger(false), originUpperLeft(false),
+ vertexSpacing(EvsNone), vertexOrder(EvoNone), pointMode(false), earlyFragmentTests(false),
+ postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
+ hlslFunctionality1(false),
+ blendEquations(0), xfbMode(false), multiStream(false),
+#ifdef NV_EXTENSIONS
+ layoutOverrideCoverage(false),
+ geoPassthroughEXT(false),
+ numShaderRecordNVBlocks(0),
+ computeDerivativeMode(LayoutDerivativeNone),
+ primitives(TQualifier::layoutNotSet),
+ numTaskNVBlocks(0),
+#endif
+ autoMapBindings(false),
+ autoMapLocations(false),
+ invertY(false),
+ flattenUniformArrays(false),
+ useUnknownFormat(false),
+ hlslOffsets(false),
+ useStorageBuffer(false),
+ useVulkanMemoryModel(false),
+ hlslIoMapping(false),
+ useVariablePointers(false),
+ textureSamplerTransformMode(EShTexSampTransKeep),
+ needToLegalize(false),
+ binaryDoubleOutput(false),
+ usePhysicalStorageBuffer(false),
+ uniformLocationBase(0)
+ {
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ xfbBuffers.resize(TQualifier::layoutXfbBufferEnd);
+
+ shiftBinding.fill(0);
+ }
+ void setLimits(const TBuiltInResource& r) { resources = r; }
+
+ bool postProcess(TIntermNode*, EShLanguage);
+ void output(TInfoSink&, bool tree);
+ void removeTree();
+
+ void setSource(EShSource s) { source = s; }
+ EShSource getSource() const { return source; }
+ void setEntryPointName(const char* ep)
+ {
+ entryPointName = ep;
+ processes.addProcess("entry-point");
+ processes.addArgument(entryPointName);
+ }
+ void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
+ const std::string& getEntryPointName() const { return entryPointName; }
+ const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
+
+ void setShiftBinding(TResourceType res, unsigned int shift)
+ {
+ shiftBinding[res] = shift;
+
+ const char* name = getResourceName(res);
+ if (name != nullptr)
+ processes.addIfNonZero(name, shift);
+ }
+
+ unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
+
+ void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
+ {
+ if (shift == 0) // ignore if there's no shift: it's a no-op.
+ return;
+
+ shiftBindingForSet[res][set] = shift;
+
+ const char* name = getResourceName(res);
+ if (name != nullptr) {
+ processes.addProcess(name);
+ processes.addArgument(shift);
+ processes.addArgument(set);
+ }
+ }
+
+ int getShiftBindingForSet(TResourceType res, unsigned int set) const
+ {
+ const auto shift = shiftBindingForSet[res].find(set);
+ return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
+ }
+ bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
+
+ void setResourceSetBinding(const std::vector<std::string>& shift)
+ {
+ resourceSetBinding = shift;
+ if (shift.size() > 0) {
+ processes.addProcess("resource-set-binding");
+ for (int s = 0; s < (int)shift.size(); ++s)
+ processes.addArgument(shift[s]);
+ }
+ }
+ const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
+ void setAutoMapBindings(bool map)
+ {
+ autoMapBindings = map;
+ if (autoMapBindings)
+ processes.addProcess("auto-map-bindings");
+ }
+ bool getAutoMapBindings() const { return autoMapBindings; }
+ void setAutoMapLocations(bool map)
+ {
+ autoMapLocations = map;
+ if (autoMapLocations)
+ processes.addProcess("auto-map-locations");
+ }
+ bool getAutoMapLocations() const { return autoMapLocations; }
+ void setInvertY(bool invert)
+ {
+ invertY = invert;
+ if (invertY)
+ processes.addProcess("invert-y");
+ }
+ bool getInvertY() const { return invertY; }
+
+ void setFlattenUniformArrays(bool flatten)
+ {
+ flattenUniformArrays = flatten;
+ if (flattenUniformArrays)
+ processes.addProcess("flatten-uniform-arrays");
+ }
+ bool getFlattenUniformArrays() const { return flattenUniformArrays; }
+ void setNoStorageFormat(bool b)
+ {
+ useUnknownFormat = b;
+ if (useUnknownFormat)
+ processes.addProcess("no-storage-format");
+ }
+ bool getNoStorageFormat() const { return useUnknownFormat; }
+ void setHlslOffsets()
+ {
+ hlslOffsets = true;
+ if (hlslOffsets)
+ processes.addProcess("hlsl-offsets");
+ }
+ bool usingHlslOffsets() const { return hlslOffsets; }
+ void setUseStorageBuffer()
+ {
+ useStorageBuffer = true;
+ processes.addProcess("use-storage-buffer");
+ }
+ bool usingStorageBuffer() const { return useStorageBuffer; }
+ void setHlslIoMapping(bool b)
+ {
+ hlslIoMapping = b;
+ if (hlslIoMapping)
+ processes.addProcess("hlsl-iomap");
+ }
+ bool usingHlslIoMapping() { return hlslIoMapping; }
+ void setUseVulkanMemoryModel()
+ {
+ useVulkanMemoryModel = true;
+ processes.addProcess("use-vulkan-memory-model");
+ }
+ bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
+ void setUsePhysicalStorageBuffer()
+ {
+ usePhysicalStorageBuffer = true;
+ }
+ bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; }
+ void setUseVariablePointers()
+ {
+ useVariablePointers = true;
+ processes.addProcess("use-variable-pointers");
+ }
+ bool usingVariablePointers() const { return useVariablePointers; }
+
+ template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
+ bool hasCounterBufferName(const TString& name) const {
+ size_t len = strlen(implicitCounterName);
+ return name.size() > len &&
+ name.compare(name.size() - len, len, implicitCounterName) == 0;
+ }
+
+ void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
+
+ void setVersion(int v) { version = v; }
+ int getVersion() const { return version; }
+ void setProfile(EProfile p) { profile = p; }
+ EProfile getProfile() const { return profile; }
+ void setSpv(const SpvVersion& s)
+ {
+ spvVersion = s;
+
+ // client processes
+ if (spvVersion.vulkan > 0)
+ processes.addProcess("client vulkan100");
+ if (spvVersion.openGl > 0)
+ processes.addProcess("client opengl100");
+
+ // target SPV
+ switch (spvVersion.spv) {
+ case 0:
+ break;
+ case EShTargetSpv_1_0:
+ break;
+ case EShTargetSpv_1_1:
+ processes.addProcess("target-env spirv1.1");
+ break;
+ case EShTargetSpv_1_2:
+ processes.addProcess("target-env spirv1.2");
+ break;
+ case EShTargetSpv_1_3:
+ processes.addProcess("target-env spirv1.3");
+ break;
+ default:
+ processes.addProcess("target-env spirvUnknown");
+ break;
+ }
+
+ // target-environment processes
+ switch (spvVersion.vulkan) {
+ case 0:
+ break;
+ case EShTargetVulkan_1_0:
+ processes.addProcess("target-env vulkan1.0");
+ break;
+ case EShTargetVulkan_1_1:
+ processes.addProcess("target-env vulkan1.1");
+ break;
+ default:
+ processes.addProcess("target-env vulkanUnknown");
+ break;
+ }
+ if (spvVersion.openGl > 0)
+ processes.addProcess("target-env opengl");
+ }
+ const SpvVersion& getSpv() const { return spvVersion; }
+ EShLanguage getStage() const { return language; }
+ void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); }
+ const std::set<std::string>& getRequestedExtensions() const { return requestedExtensions; }
+
+ void setTreeRoot(TIntermNode* r) { treeRoot = r; }
+ TIntermNode* getTreeRoot() const { return treeRoot; }
+ void incrementEntryPointCount() { ++numEntryPoints; }
+ int getNumEntryPoints() const { return numEntryPoints; }
+ int getNumErrors() const { return numErrors; }
+ void addPushConstantCount() { ++numPushConstants; }
+#ifdef NV_EXTENSIONS
+ void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
+ void addTaskNVCount() { ++numTaskNVBlocks; }
+#endif
+
+ bool isRecursive() const { return recursive; }
+
+ TIntermSymbol* addSymbol(const TVariable&);
+ TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&);
+ TIntermSymbol* addSymbol(const TType&, const TSourceLoc&);
+ TIntermSymbol* addSymbol(const TIntermSymbol&);
+ TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*);
+ std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1);
+ TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*);
+ TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const;
+ void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode);
+ TIntermTyped* addShapeConversion(const TType&, TIntermTyped*);
+ TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
+ TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
+ TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc);
+ TIntermTyped* addUnaryMath(TOperator, TIntermTyped* child, TSourceLoc);
+ TIntermTyped* addBuiltInFunctionCall(const TSourceLoc& line, TOperator, bool unary, TIntermNode*, const TType& returnType);
+ bool canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op = EOpNull) const;
+ bool isIntegralPromotion(TBasicType from, TBasicType to) const;
+ bool isFPPromotion(TBasicType from, TBasicType to) const;
+ bool isIntegralConversion(TBasicType from, TBasicType to) const;
+ bool isFPConversion(TBasicType from, TBasicType to) const;
+ bool isFPIntegralConversion(TBasicType from, TBasicType to) const;
+ TOperator mapTypeToConstructorOp(const TType&) const;
+ TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right);
+ TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&);
+ TIntermAggregate* makeAggregate(TIntermNode* node);
+ TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&);
+ TIntermAggregate* makeAggregate(const TSourceLoc&);
+ TIntermTyped* setAggregateOperator(TIntermNode*, TOperator, const TType& type, TSourceLoc);
+ bool areAllChildConst(TIntermAggregate* aggrNode);
+ TIntermSelection* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&);
+ TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&);
+ TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&);
+ TIntermTyped* addMethod(TIntermTyped*, const TType&, const TString*, const TSourceLoc&);
+ TIntermConstantUnion* addConstantUnion(const TConstUnionArray&, const TType&, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(signed char, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned char, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(signed short, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned short, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(int, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned int, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(long long, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(unsigned long long, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(bool, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(double, TBasicType, const TSourceLoc&, bool literal = false) const;
+ TIntermConstantUnion* addConstantUnion(const TString*, const TSourceLoc&, bool literal = false) const;
+ TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*) const;
+ bool parseConstTree(TIntermNode*, TConstUnionArray, TOperator, const TType&, bool singleConstantParam = false);
+ TIntermLoop* addLoop(TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, const TSourceLoc&);
+ TIntermAggregate* addForLoop(TIntermNode*, TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst,
+ const TSourceLoc&, TIntermLoop*&);
+ TIntermBranch* addBranch(TOperator, const TSourceLoc&);
+ TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&);
+ template<typename selectorType> TIntermTyped* addSwizzle(TSwizzleSelectors<selectorType>&, const TSourceLoc&);
+
+ // Low level functions to add nodes (no conversions or other higher level transformations)
+ // If a type is provided, the node's type will be set to it.
+ TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc) const;
+ TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc, const TType&) const;
+ TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc) const;
+ TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc, const TType&) const;
+
+ // Constant folding (in Constant.cpp)
+ TIntermTyped* fold(TIntermAggregate* aggrNode);
+ TIntermTyped* foldConstructor(TIntermAggregate* aggrNode);
+ TIntermTyped* foldDereference(TIntermTyped* node, int index, const TSourceLoc&);
+ TIntermTyped* foldSwizzle(TIntermTyped* node, TSwizzleSelectors<TVectorSelector>& fields, const TSourceLoc&);
+
+ // Tree ops
+ static const TIntermTyped* findLValueBase(const TIntermTyped*, bool swizzleOkay);
+
+ // Linkage related
+ void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
+ void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
+
+ bool setInvocations(int i)
+ {
+ if (invocations != TQualifier::layoutNotSet)
+ return invocations == i;
+ invocations = i;
+ return true;
+ }
+ int getInvocations() const { return invocations; }
+ bool setVertices(int m)
+ {
+ if (vertices != TQualifier::layoutNotSet)
+ return vertices == m;
+ vertices = m;
+ return true;
+ }
+ int getVertices() const { return vertices; }
+ bool setInputPrimitive(TLayoutGeometry p)
+ {
+ if (inputPrimitive != ElgNone)
+ return inputPrimitive == p;
+ inputPrimitive = p;
+ return true;
+ }
+ TLayoutGeometry getInputPrimitive() const { return inputPrimitive; }
+ bool setVertexSpacing(TVertexSpacing s)
+ {
+ if (vertexSpacing != EvsNone)
+ return vertexSpacing == s;
+ vertexSpacing = s;
+ return true;
+ }
+ TVertexSpacing getVertexSpacing() const { return vertexSpacing; }
+ bool setVertexOrder(TVertexOrder o)
+ {
+ if (vertexOrder != EvoNone)
+ return vertexOrder == o;
+ vertexOrder = o;
+ return true;
+ }
+ TVertexOrder getVertexOrder() const { return vertexOrder; }
+ void setPointMode() { pointMode = true; }
+ bool getPointMode() const { return pointMode; }
+
+ bool setLocalSize(int dim, int size)
+ {
+ if (localSize[dim] > 1)
+ return size == localSize[dim];
+ localSize[dim] = size;
+ return true;
+ }
+ unsigned int getLocalSize(int dim) const { return localSize[dim]; }
+
+ bool setLocalSizeSpecId(int dim, int id)
+ {
+ if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
+ return id == localSizeSpecId[dim];
+ localSizeSpecId[dim] = id;
+ return true;
+ }
+ int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
+
+ void setXfbMode() { xfbMode = true; }
+ bool getXfbMode() const { return xfbMode; }
+ void setMultiStream() { multiStream = true; }
+ bool isMultiStream() const { return multiStream; }
+ bool setOutputPrimitive(TLayoutGeometry p)
+ {
+ if (outputPrimitive != ElgNone)
+ return outputPrimitive == p;
+ outputPrimitive = p;
+ return true;
+ }
+ TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; }
+ void setOriginUpperLeft() { originUpperLeft = true; }
+ bool getOriginUpperLeft() const { return originUpperLeft; }
+ void setPixelCenterInteger() { pixelCenterInteger = true; }
+ bool getPixelCenterInteger() const { return pixelCenterInteger; }
+ void setEarlyFragmentTests() { earlyFragmentTests = true; }
+ bool getEarlyFragmentTests() const { return earlyFragmentTests; }
+ void setPostDepthCoverage() { postDepthCoverage = true; }
+ bool getPostDepthCoverage() const { return postDepthCoverage; }
+ bool setDepth(TLayoutDepth d)
+ {
+ if (depthLayout != EldNone)
+ return depthLayout == d;
+ depthLayout = d;
+ return true;
+ }
+ TLayoutDepth getDepth() const { return depthLayout; }
+ void setDepthReplacing() { depthReplacing = true; }
+ bool isDepthReplacing() const { return depthReplacing; }
+
+ void setHlslFunctionality1() { hlslFunctionality1 = true; }
+ bool getHlslFunctionality1() const { return hlslFunctionality1; }
+
+ void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); }
+ unsigned int getBlendEquations() const { return blendEquations; }
+
+ void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
+ void merge(TInfoSink&, TIntermediate&);
+ void finalCheck(TInfoSink&, bool keepUncalled);
+
+ void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
+ bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
+
+ int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
+ int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
+ int addUsedOffsets(int binding, int offset, int numOffsets);
+ bool addUsedConstantId(int id);
+ static int computeTypeLocationSize(const TType&, EShLanguage);
+ static int computeTypeUniformLocationSize(const TType&);
+
+ bool setXfbBufferStride(int buffer, unsigned stride)
+ {
+ if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
+ return xfbBuffers[buffer].stride == stride;
+ xfbBuffers[buffer].stride = stride;
+ return true;
+ }
+ unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
+ int addXfbBufferOffset(const TType&);
+#ifdef AMD_EXTENSIONS
+ unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const;
+#else
+ unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const;
+#endif
+ static int getBaseAlignmentScalar(const TType&, int& size);
+ static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
+ static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
+ static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
+ static bool improperStraddle(const TType& type, int size, int offset);
+ bool promote(TIntermOperator*);
+
+#ifdef NV_EXTENSIONS
+ void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
+ bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
+ void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
+ bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
+ void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; }
+ ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; }
+ bool setPrimitives(int m)
+ {
+ if (primitives != TQualifier::layoutNotSet)
+ return primitives == m;
+ primitives = m;
+ return true;
+ }
+ int getPrimitives() const { return primitives; }
+#endif
+
+ const char* addSemanticName(const TString& name)
+ {
+ return semanticNameSet.insert(name).first->c_str();
+ }
+
+ void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
+ const std::string& getSourceFile() const { return sourceFile; }
+ void addSourceText(const char* text, size_t len) { sourceText.append(text, len); }
+ const std::string& getSourceText() const { return sourceText; }
+ const std::map<std::string, std::string>& getIncludeText() const { return includeText; }
+ void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); }
+ void addProcesses(const std::vector<std::string>& p)
+ {
+ for (int i = 0; i < (int)p.size(); ++i)
+ processes.addProcess(p[i]);
+ }
+ void addProcess(const std::string& process) { processes.addProcess(process); }
+ void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
+ const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
+
+ void addUniformLocationOverride(const char* nameStr, int location)
+ {
+ std::string name = nameStr;
+ uniformLocationOverrides[name] = location;
+ }
+
+ int getUniformLocationOverride(const char* nameStr) const
+ {
+ std::string name = nameStr;
+ auto pos = uniformLocationOverrides.find(name);
+ if (pos == uniformLocationOverrides.end())
+ return -1;
+ else
+ return pos->second;
+ }
+
+ void setUniformLocationBase(int base) { uniformLocationBase = base; }
+ int getUniformLocationBase() const { return uniformLocationBase; }
+
+ void setNeedsLegalization() { needToLegalize = true; }
+ bool needsLegalization() const { return needToLegalize; }
+
+ void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
+ bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
+
+ const char* const implicitThisName;
+ const char* const implicitCounterName;
+
+protected:
+ TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
+ void error(TInfoSink& infoSink, const char*);
+ void warn(TInfoSink& infoSink, const char*);
+ void mergeCallGraphs(TInfoSink&, TIntermediate&);
+ void mergeModes(TInfoSink&, TIntermediate&);
+ void mergeTrees(TInfoSink&, TIntermediate&);
+ void seedIdMap(TMap<TString, int>& idMap, int& maxId);
+ void remapIds(const TMap<TString, int>& idMap, int idShift, TIntermediate&);
+ void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals);
+ void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects);
+ void mergeImplicitArraySizes(TType&, const TType&);
+ void mergeErrorCheck(TInfoSink&, const TIntermSymbol&, const TIntermSymbol&, bool crossStage);
+ void checkCallGraphCycles(TInfoSink&);
+ void checkCallGraphBodies(TInfoSink&, bool keepUncalled);
+ void inOutLocationCheck(TInfoSink&);
+ TIntermAggregate* findLinkerObjects() const;
+ bool userOutputUsed() const;
+ bool isSpecializationOperation(const TIntermOperator&) const;
+ bool isNonuniformPropagating(TOperator) const;
+ bool promoteUnary(TIntermUnary&);
+ bool promoteBinary(TIntermBinary&);
+ void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&);
+ bool promoteAggregate(TIntermAggregate&);
+ void pushSelector(TIntermSequence&, const TVectorSelector&, const TSourceLoc&);
+ void pushSelector(TIntermSequence&, const TMatrixSelector&, const TSourceLoc&);
+ bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&);
+ void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root);
+ bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
+ TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
+ std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
+ bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
+ static const char* getResourceName(TResourceType);
+
+ const EShLanguage language; // stage, known at construction time
+ EShSource source; // source language, known a bit later
+ std::string entryPointName;
+ std::string entryPointMangledName;
+ typedef std::list<TCall> TGraph;
+ TGraph callGraph;
+
+ EProfile profile; // source profile
+ int version; // source version
+ SpvVersion spvVersion;
+ TIntermNode* treeRoot;
+ std::set<std::string> requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them
+ TBuiltInResource resources;
+ int numEntryPoints;
+ int numErrors;
+ int numPushConstants;
+ bool recursive;
+ int invocations;
+ int vertices;
+ TLayoutGeometry inputPrimitive;
+ TLayoutGeometry outputPrimitive;
+ bool pixelCenterInteger;
+ bool originUpperLeft;
+ TVertexSpacing vertexSpacing;
+ TVertexOrder vertexOrder;
+ bool pointMode;
+ int localSize[3];
+ int localSizeSpecId[3];
+ bool earlyFragmentTests;
+ bool postDepthCoverage;
+ TLayoutDepth depthLayout;
+ bool depthReplacing;
+ bool hlslFunctionality1;
+ int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift
+ bool xfbMode;
+ std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
+ bool multiStream;
+
+#ifdef NV_EXTENSIONS
+ bool layoutOverrideCoverage;
+ bool geoPassthroughEXT;
+ int numShaderRecordNVBlocks;
+ ComputeDerivativeMode computeDerivativeMode;
+ int primitives;
+ int numTaskNVBlocks;
+#endif
+
+ // Base shift values
+ std::array<unsigned int, EResCount> shiftBinding;
+
+ // Per-descriptor-set shift values
+ std::array<std::map<int, int>, EResCount> shiftBindingForSet;
+
+ std::vector<std::string> resourceSetBinding;
+ bool autoMapBindings;
+ bool autoMapLocations;
+ bool invertY;
+ bool flattenUniformArrays;
+ bool useUnknownFormat;
+ bool hlslOffsets;
+ bool useStorageBuffer;
+ bool useVulkanMemoryModel;
+ bool hlslIoMapping;
+ bool useVariablePointers;
+
+ std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
+ std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
+ std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
+ std::unordered_set<int> usedConstantId; // specialization constant ids used
+ std::set<TString> semanticNameSet;
+
+ EShTextureSamplerTransformMode textureSamplerTransformMode;
+
+ // source code of shader, useful as part of debug information
+ std::string sourceFile;
+ std::string sourceText;
+
+ // Included text. First string is a name, second is the included text
+ std::map<std::string, std::string> includeText;
+
+ // for OpModuleProcessed, or equivalent
+ TProcesses processes;
+
+ bool needToLegalize;
+ bool binaryDoubleOutput;
+ bool usePhysicalStorageBuffer;
+
+ std::unordered_map<std::string, int> uniformLocationOverrides;
+ int uniformLocationBase;
+
+private:
+ void operator=(TIntermediate&); // prevent assignments
+};
+
+} // end namespace glslang
+
+#endif // _LOCAL_INTERMEDIATE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/parseConst.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/parseConst.cpp
new file mode 100644
index 0000000..1a8e6d9
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/parseConst.cpp
@@ -0,0 +1,204 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Traverse a tree of constants to create a single folded constant.
+// It should only be used when the whole tree is known to be constant.
+//
+
+#include "ParseHelper.h"
+
+namespace glslang {
+
+class TConstTraverser : public TIntermTraverser {
+public:
+ TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t)
+ : unionArray(cUnion), type(t),
+ constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false),
+ matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; }
+
+ virtual void visitConstantUnion(TIntermConstantUnion* node);
+ virtual bool visitAggregate(TVisit, TIntermAggregate* node);
+
+ int index;
+ TConstUnionArray unionArray;
+ TOperator tOp;
+ const TType& type;
+ TOperator constructorType;
+ bool singleConstantParam;
+ bool error;
+ int size; // size of the constructor ( 4 for vec4)
+ bool isMatrix;
+ int matrixCols;
+ int matrixRows;
+
+protected:
+ TConstTraverser(TConstTraverser&);
+ TConstTraverser& operator=(TConstTraverser&);
+};
+
+bool TConstTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
+{
+ if (! node->isConstructor() && node->getOp() != EOpComma) {
+ error = true;
+
+ return false;
+ }
+
+ bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion();
+ if (flag) {
+ singleConstantParam = true;
+ constructorType = node->getOp();
+ size = node->getType().computeNumComponents();
+
+ if (node->getType().isMatrix()) {
+ isMatrix = true;
+ matrixCols = node->getType().getMatrixCols();
+ matrixRows = node->getType().getMatrixRows();
+ }
+ }
+
+ for (TIntermSequence::iterator p = node->getSequence().begin();
+ p != node->getSequence().end(); p++) {
+
+ if (node->getOp() == EOpComma)
+ index = 0;
+
+ (*p)->traverse(this);
+ }
+ if (flag)
+ {
+ singleConstantParam = false;
+ constructorType = EOpNull;
+ size = 0;
+ isMatrix = false;
+ matrixCols = 0;
+ matrixRows = 0;
+ }
+
+ return false;
+}
+
+void TConstTraverser::visitConstantUnion(TIntermConstantUnion* node)
+{
+ TConstUnionArray leftUnionArray(unionArray);
+ int instanceSize = type.computeNumComponents();
+
+ if (index >= instanceSize)
+ return;
+
+ if (! singleConstantParam) {
+ int rightUnionSize = node->getType().computeNumComponents();
+
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ for (int i = 0; i < rightUnionSize; i++) {
+ if (index >= instanceSize)
+ return;
+ leftUnionArray[index] = rightUnionArray[i];
+
+ index++;
+ }
+ } else {
+ int endIndex = index + size;
+ const TConstUnionArray& rightUnionArray = node->getConstArray();
+ if (! isMatrix) {
+ int count = 0;
+ int nodeComps = node->getType().computeNumComponents();
+ for (int i = index; i < endIndex; i++) {
+ if (i >= instanceSize)
+ return;
+
+ leftUnionArray[i] = rightUnionArray[count];
+
+ (index)++;
+
+ if (nodeComps > 1)
+ count++;
+ }
+ } else {
+ // constructing a matrix, but from what?
+ if (node->isMatrix()) {
+ // Matrix from a matrix; this has the outer matrix, node is the argument matrix.
+ // Traverse the outer, potentially bigger matrix, fill in missing pieces with the
+ // identity matrix.
+ for (int c = 0; c < matrixCols; ++c) {
+ for (int r = 0; r < matrixRows; ++r) {
+ int targetOffset = index + c * matrixRows + r;
+ if (r < node->getType().getMatrixRows() && c < node->getType().getMatrixCols()) {
+ int srcOffset = c * node->getType().getMatrixRows() + r;
+ leftUnionArray[targetOffset] = rightUnionArray[srcOffset];
+ } else if (r == c)
+ leftUnionArray[targetOffset].setDConst(1.0);
+ else
+ leftUnionArray[targetOffset].setDConst(0.0);
+ }
+ }
+ } else {
+ // matrix from vector
+ int count = 0;
+ const int startIndex = index;
+ int nodeComps = node->getType().computeNumComponents();
+ for (int i = startIndex; i < endIndex; i++) {
+ if (i >= instanceSize)
+ return;
+ if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 )
+ leftUnionArray[i] = rightUnionArray[count];
+ else
+ leftUnionArray[i].setDConst(0.0);
+
+ index++;
+
+ if (nodeComps > 1)
+ count++;
+ }
+ }
+ }
+ }
+}
+
+bool TIntermediate::parseConstTree(TIntermNode* root, TConstUnionArray unionArray, TOperator constructorType, const TType& t, bool singleConstantParam)
+{
+ if (root == 0)
+ return false;
+
+ TConstTraverser it(unionArray, singleConstantParam, constructorType, t);
+
+ root->traverse(&it);
+ if (it.error)
+ return true;
+ else
+ return false;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/parseVersions.h b/src/3rdparty/glslang/glslang/MachineIndependent/parseVersions.h
new file mode 100644
index 0000000..02af76a
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/parseVersions.h
@@ -0,0 +1,159 @@
+//
+// Copyright (C) 2015-2018 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// This is implemented in Versions.cpp
+
+#ifndef _PARSE_VERSIONS_INCLUDED_
+#define _PARSE_VERSIONS_INCLUDED_
+
+#include "../Public/ShaderLang.h"
+#include "../Include/InfoSink.h"
+#include "Scan.h"
+
+#include <map>
+
+namespace glslang {
+
+//
+// Base class for parse helpers.
+// This just has version-related information and checking.
+// This class should be sufficient for preprocessing.
+//
+class TParseVersions {
+public:
+ TParseVersions(TIntermediate& interm, int version, EProfile profile,
+ const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
+ bool forwardCompatible, EShMessages messages)
+ : infoSink(infoSink), version(version), profile(profile), language(language),
+ spvVersion(spvVersion), forwardCompatible(forwardCompatible),
+ intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
+ virtual ~TParseVersions() { }
+ virtual void initializeExtensionBehavior();
+ virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
+ virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
+ virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
+ virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
+ virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
+ virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
+ virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
+ virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual TExtensionBehavior getExtensionBehavior(const char*);
+ virtual bool extensionTurnedOn(const char* const extension);
+ virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
+ virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
+ virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
+ virtual void doubleCheck(const TSourceLoc&, const char* op);
+ virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool float16Arithmetic();
+ virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+ virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool int16Arithmetic();
+ virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+ virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual bool int8Arithmetic();
+ virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
+#ifdef AMD_EXTENSIONS
+ virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+#endif
+ virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
+ virtual void spvRemoved(const TSourceLoc&, const char* op);
+ virtual void vulkanRemoved(const TSourceLoc&, const char* op);
+ virtual void requireVulkan(const TSourceLoc&, const char* op);
+ virtual void requireSpv(const TSourceLoc&, const char* op);
+ virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
+ virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
+ virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
+ virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
+
+ virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+ virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
+ const char* szExtraInfoFormat, ...) = 0;
+
+ void addError() { ++numErrors; }
+ int getNumErrors() const { return numErrors; }
+
+ void setScanner(TInputScanner* scanner) { currentScanner = scanner; }
+ TInputScanner* getScanner() const { return currentScanner; }
+ const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); }
+ void setCurrentLine(int line) { currentScanner->setLine(line); }
+ void setCurrentColumn(int col) { currentScanner->setColumn(col); }
+ void setCurrentSourceName(const char* name) { currentScanner->setFile(name); }
+ void setCurrentString(int string) { currentScanner->setString(string); }
+
+ void getPreamble(std::string&);
+ bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
+ bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
+ bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
+ bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
+ bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; }
+
+ TInfoSink& infoSink;
+
+ // compilation mode
+ int version; // version, updated by #version in the shader
+ EProfile profile; // the declared profile in the shader (core by default)
+ EShLanguage language; // really the stage
+ SpvVersion spvVersion;
+ bool forwardCompatible; // true if errors are to be given for use of deprecated features
+ TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
+
+protected:
+ TMap<TString, TExtensionBehavior> extensionBehavior; // for each extension string, what its current behavior is set to
+ EShMessages messages; // errors/warnings/rule-sets
+ int numErrors; // number of compile-time errors encountered
+ TInputScanner* currentScanner;
+
+private:
+ explicit TParseVersions(const TParseVersions&);
+ TParseVersions& operator=(const TParseVersions&);
+};
+
+} // end namespace glslang
+
+#endif // _PARSE_VERSIONS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/pch.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/pch.cpp
new file mode 100644
index 0000000..b7a0865
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/pch.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright (C) 2018 The Khronos Group Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "pch.h"
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/pch.h b/src/3rdparty/glslang/glslang/MachineIndependent/pch.h
new file mode 100644
index 0000000..6ea3761
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/pch.h
@@ -0,0 +1,49 @@
+#ifndef _PCH_H
+#define _PCH_H
+//
+// Copyright (C) 2018 The Khronos Group Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#include <sstream>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <climits>
+#include <iostream>
+#include <sstream>
+#include <memory>
+#include "SymbolTable.h"
+#include "ParseHelper.h"
+#include "Scan.h"
+#include "ScanContext.h"
+
+#endif /* _PCH_H */
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp
new file mode 100644
index 0000000..c74e44f
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp
@@ -0,0 +1,1320 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <sstream>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <climits>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace glslang {
+
+// Handle #define
+int TPpContext::CPPdefine(TPpToken* ppToken)
+{
+ MacroSymbol mac;
+
+ // get the macro name
+ int token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#define", "");
+ return token;
+ }
+ if (ppToken->loc.string >= 0) {
+ // We are in user code; check for reserved name use:
+ parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define");
+ }
+
+ // save the macro name
+ const int defAtom = atomStrings.getAddAtom(ppToken->name);
+ TSourceLoc defineLoc = ppToken->loc; // because ppToken might go to the next line before we report errors
+
+ // gather parameters to the macro, between (...)
+ token = scanToken(ppToken);
+ if (token == '(' && !ppToken->space) {
+ mac.functionLike = 1;
+ do {
+ token = scanToken(ppToken);
+ if (mac.args.size() == 0 && token == ')')
+ break;
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "bad argument", "#define", "");
+
+ return token;
+ }
+ const int argAtom = atomStrings.getAddAtom(ppToken->name);
+
+ // check for duplication of parameter name
+ bool duplicate = false;
+ for (size_t a = 0; a < mac.args.size(); ++a) {
+ if (mac.args[a] == argAtom) {
+ parseContext.ppError(ppToken->loc, "duplicate macro parameter", "#define", "");
+ duplicate = true;
+ break;
+ }
+ }
+ if (! duplicate)
+ mac.args.push_back(argAtom);
+ token = scanToken(ppToken);
+ } while (token == ',');
+ if (token != ')') {
+ parseContext.ppError(ppToken->loc, "missing parenthesis", "#define", "");
+
+ return token;
+ }
+
+ token = scanToken(ppToken);
+ } else if (token != '\n' && token != EndOfInput && !ppToken->space) {
+ parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", "");
+
+ return token;
+ }
+
+ // record the definition of the macro
+ while (token != '\n' && token != EndOfInput) {
+ mac.body.putToken(token, ppToken);
+ token = scanToken(ppToken);
+ if (token != '\n' && ppToken->space)
+ mac.body.putToken(' ', ppToken);
+ }
+
+ // check for duplicate definition
+ MacroSymbol* existing = lookupMacroDef(defAtom);
+ if (existing != nullptr) {
+ if (! existing->undef) {
+ // Already defined -- need to make sure they are identical:
+ // "Two replacement lists are identical if and only if the
+ // preprocessing tokens in both have the same number,
+ // ordering, spelling, and white-space separation, where all
+ // white-space separations are considered identical."
+ if (existing->functionLike != mac.functionLike) {
+ parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define",
+ atomStrings.getString(defAtom));
+ } else if (existing->args.size() != mac.args.size()) {
+ parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define",
+ atomStrings.getString(defAtom));
+ } else {
+ if (existing->args != mac.args) {
+ parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define",
+ atomStrings.getString(defAtom));
+ }
+ // set up to compare the two
+ existing->body.reset();
+ mac.body.reset();
+ int newToken;
+ bool firstToken = true;
+ do {
+ int oldToken;
+ TPpToken oldPpToken;
+ TPpToken newPpToken;
+ oldToken = existing->body.getToken(parseContext, &oldPpToken);
+ newToken = mac.body.getToken(parseContext, &newPpToken);
+ // for the first token, preceding spaces don't matter
+ if (firstToken) {
+ newPpToken.space = oldPpToken.space;
+ firstToken = false;
+ }
+ if (oldToken != newToken || oldPpToken != newPpToken) {
+ parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define",
+ atomStrings.getString(defAtom));
+ break;
+ }
+ } while (newToken != EndOfInput);
+ }
+ }
+ *existing = mac;
+ } else
+ addMacroDef(defAtom, mac);
+
+ return '\n';
+}
+
+// Handle #undef
+int TPpContext::CPPundef(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#undef", "");
+
+ return token;
+ }
+
+ parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#undef");
+
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ if (macro != nullptr)
+ macro->undef = 1;
+ token = scanToken(ppToken);
+ if (token != '\n')
+ parseContext.ppError(ppToken->loc, "can only be followed by a single macro name", "#undef", "");
+
+ return token;
+}
+
+// Handle #else
+/* Skip forward to appropriate spot. This is used both
+** to skip to a #endif after seeing an #else, AND to skip to a #else,
+** #elif, or #endif after a #if/#ifdef/#ifndef/#elif test was false.
+*/
+int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
+{
+ int depth = 0;
+ int token = scanToken(ppToken);
+
+ while (token != EndOfInput) {
+ if (token != '#') {
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+
+ if (token == EndOfInput)
+ return token;
+
+ token = scanToken(ppToken);
+ continue;
+ }
+
+ if ((token = scanToken(ppToken)) != PpAtomIdentifier)
+ continue;
+
+ int nextAtom = atomStrings.getAtom(ppToken->name);
+ if (nextAtom == PpAtomIf || nextAtom == PpAtomIfdef || nextAtom == PpAtomIfndef) {
+ depth++;
+ if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if/#ifdef/#ifndef", "");
+ return EndOfInput;
+ } else {
+ ifdepth++;
+ elsetracker++;
+ }
+ } else if (nextAtom == PpAtomEndif) {
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ if (depth == 0) {
+ // found the #endif we are looking for
+ if (ifdepth > 0)
+ --ifdepth;
+ break;
+ }
+ --depth;
+ --ifdepth;
+ } else if (matchelse && depth == 0) {
+ if (nextAtom == PpAtomElse) {
+ elseSeen[elsetracker] = true;
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ // found the #else we are looking for
+ break;
+ } else if (nextAtom == PpAtomElif) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ /* we decrement ifdepth here, because CPPif will increment
+ * it and we really want to leave it alone */
+ if (ifdepth > 0) {
+ --ifdepth;
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ }
+
+ return CPPif(ppToken);
+ }
+ } else if (nextAtom == PpAtomElse) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#else after #else", "#else", "");
+ else
+ elseSeen[elsetracker] = true;
+ token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken));
+ } else if (nextAtom == PpAtomElif) {
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ }
+ }
+
+ return token;
+}
+
+// Call when there should be no more tokens left on a line.
+int TPpContext::extraTokenCheck(int contextAtom, TPpToken* ppToken, int token)
+{
+ if (token != '\n' && token != EndOfInput) {
+ static const char* message = "unexpected tokens following directive";
+
+ const char* label;
+ if (contextAtom == PpAtomElse)
+ label = "#else";
+ else if (contextAtom == PpAtomElif)
+ label = "#elif";
+ else if (contextAtom == PpAtomEndif)
+ label = "#endif";
+ else if (contextAtom == PpAtomIf)
+ label = "#if";
+ else if (contextAtom == PpAtomLine)
+ label = "#line";
+ else
+ label = "";
+
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, message, label, "");
+ else
+ parseContext.ppError(ppToken->loc, message, label, "");
+
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ }
+
+ return token;
+}
+
+enum eval_prec {
+ MIN_PRECEDENCE,
+ COND, LOGOR, LOGAND, OR, XOR, AND, EQUAL, RELATION, SHIFT, ADD, MUL, UNARY,
+ MAX_PRECEDENCE
+};
+
+namespace {
+
+ int op_logor(int a, int b) { return a || b; }
+ int op_logand(int a, int b) { return a && b; }
+ int op_or(int a, int b) { return a | b; }
+ int op_xor(int a, int b) { return a ^ b; }
+ int op_and(int a, int b) { return a & b; }
+ int op_eq(int a, int b) { return a == b; }
+ int op_ne(int a, int b) { return a != b; }
+ int op_ge(int a, int b) { return a >= b; }
+ int op_le(int a, int b) { return a <= b; }
+ int op_gt(int a, int b) { return a > b; }
+ int op_lt(int a, int b) { return a < b; }
+ int op_shl(int a, int b) { return a << b; }
+ int op_shr(int a, int b) { return a >> b; }
+ int op_add(int a, int b) { return a + b; }
+ int op_sub(int a, int b) { return a - b; }
+ int op_mul(int a, int b) { return a * b; }
+ int op_div(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a / b; }
+ int op_mod(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a % b; }
+ int op_pos(int a) { return a; }
+ int op_neg(int a) { return -a; }
+ int op_cmpl(int a) { return ~a; }
+ int op_not(int a) { return !a; }
+
+};
+
+struct TBinop {
+ int token, precedence, (*op)(int, int);
+} binop[] = {
+ { PpAtomOr, LOGOR, op_logor },
+ { PpAtomAnd, LOGAND, op_logand },
+ { '|', OR, op_or },
+ { '^', XOR, op_xor },
+ { '&', AND, op_and },
+ { PpAtomEQ, EQUAL, op_eq },
+ { PpAtomNE, EQUAL, op_ne },
+ { '>', RELATION, op_gt },
+ { PpAtomGE, RELATION, op_ge },
+ { '<', RELATION, op_lt },
+ { PpAtomLE, RELATION, op_le },
+ { PpAtomLeft, SHIFT, op_shl },
+ { PpAtomRight, SHIFT, op_shr },
+ { '+', ADD, op_add },
+ { '-', ADD, op_sub },
+ { '*', MUL, op_mul },
+ { '/', MUL, op_div },
+ { '%', MUL, op_mod },
+};
+
+struct TUnop {
+ int token, (*op)(int);
+} unop[] = {
+ { '+', op_pos },
+ { '-', op_neg },
+ { '~', op_cmpl },
+ { '!', op_not },
+};
+
+#define NUM_ELEMENTS(A) (sizeof(A) / sizeof(A[0]))
+
+int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
+{
+ TSourceLoc loc = ppToken->loc; // because we sometimes read the newline before reporting the error
+ if (token == PpAtomIdentifier) {
+ if (strcmp("defined", ppToken->name) == 0) {
+ if (! parseContext.isReadingHLSL() && isMacroInput()) {
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, "nonportable when expanded from macros for preprocessor expression",
+ "defined", "");
+ else
+ parseContext.ppError(ppToken->loc, "cannot use in preprocessor expression when expanded from macros",
+ "defined", "");
+ }
+ bool needclose = 0;
+ token = scanToken(ppToken);
+ if (token == '(') {
+ needclose = true;
+ token = scanToken(ppToken);
+ }
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(loc, "incorrect directive, expected identifier", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ res = macro != nullptr ? !macro->undef : 0;
+ token = scanToken(ppToken);
+ if (needclose) {
+ if (token != ')') {
+ parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ token = scanToken(ppToken);
+ }
+ } else {
+ token = evalToToken(token, shortCircuit, res, err, ppToken);
+ return eval(token, precedence, shortCircuit, res, err, ppToken);
+ }
+ } else if (token == PpAtomConstInt) {
+ res = ppToken->ival;
+ token = scanToken(ppToken);
+ } else if (token == '(') {
+ token = scanToken(ppToken);
+ token = eval(token, MIN_PRECEDENCE, shortCircuit, res, err, ppToken);
+ if (! err) {
+ if (token != ')') {
+ parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ token = scanToken(ppToken);
+ }
+ } else {
+ int op = NUM_ELEMENTS(unop) - 1;
+ for (; op >= 0; op--) {
+ if (unop[op].token == token)
+ break;
+ }
+ if (op >= 0) {
+ token = scanToken(ppToken);
+ token = eval(token, UNARY, shortCircuit, res, err, ppToken);
+ res = unop[op].op(res);
+ } else {
+ parseContext.ppError(loc, "bad expression", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+
+ return token;
+ }
+ }
+
+ token = evalToToken(token, shortCircuit, res, err, ppToken);
+
+ // Perform evaluation of binary operation, if there is one, otherwise we are done.
+ while (! err) {
+ if (token == ')' || token == '\n')
+ break;
+ int op;
+ for (op = NUM_ELEMENTS(binop) - 1; op >= 0; op--) {
+ if (binop[op].token == token)
+ break;
+ }
+ if (op < 0 || binop[op].precedence <= precedence)
+ break;
+ int leftSide = res;
+
+ // Setup short-circuiting, needed for ES, unless already in a short circuit.
+ // (Once in a short-circuit, can't turn off again, until that whole subexpression is done.
+ if (! shortCircuit) {
+ if ((token == PpAtomOr && leftSide == 1) ||
+ (token == PpAtomAnd && leftSide == 0))
+ shortCircuit = true;
+ }
+
+ token = scanToken(ppToken);
+ token = eval(token, binop[op].precedence, shortCircuit, res, err, ppToken);
+
+ if (binop[op].op == op_div || binop[op].op == op_mod) {
+ if (res == 0) {
+ parseContext.ppError(loc, "division by 0", "preprocessor evaluation", "");
+ res = 1;
+ }
+ }
+ res = binop[op].op(leftSide, res);
+ }
+
+ return token;
+}
+
+// Expand macros, skipping empty expansions, to get to the first real token in those expansions.
+int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
+{
+ while (token == PpAtomIdentifier && strcmp("defined", ppToken->name) != 0) {
+ switch (MacroExpand(ppToken, true, false)) {
+ case MacroExpandNotStarted:
+ case MacroExpandError:
+ parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
+ err = true;
+ res = 0;
+ break;
+ case MacroExpandStarted:
+ break;
+ case MacroExpandUndef:
+ if (! shortCircuit && parseContext.profile == EEsProfile) {
+ const char* message = "undefined macro in expression not allowed in es profile";
+ if (parseContext.relaxedErrors())
+ parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
+ else
+ parseContext.ppError(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
+ }
+ break;
+ }
+ token = scanToken(ppToken);
+ if (err)
+ break;
+ }
+
+ return token;
+}
+
+// Handle #if
+int TPpContext::CPPif(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if", "");
+ return EndOfInput;
+ } else {
+ elsetracker++;
+ ifdepth++;
+ }
+ int res = 0;
+ bool err = false;
+ token = eval(token, MIN_PRECEDENCE, false, res, err, ppToken);
+ token = extraTokenCheck(PpAtomIf, ppToken, token);
+ if (!res && !err)
+ token = CPPelse(1, ppToken);
+
+ return token;
+}
+
+// Handle #ifdef
+int TPpContext::CPPifdef(int defined, TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ if (ifdepth > maxIfNesting || elsetracker > maxIfNesting) {
+ parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#ifdef", "");
+ return EndOfInput;
+ } else {
+ elsetracker++;
+ ifdepth++;
+ }
+
+ if (token != PpAtomIdentifier) {
+ if (defined)
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifdef", "");
+ else
+ parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifndef", "");
+ } else {
+ MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name));
+ token = scanToken(ppToken);
+ if (token != '\n') {
+ parseContext.ppError(ppToken->loc, "unexpected tokens following #ifdef directive - expected a newline", "#ifdef", "");
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ }
+ if (((macro != nullptr && !macro->undef) ? 1 : 0) != defined)
+ token = CPPelse(1, ppToken);
+ }
+
+ return token;
+}
+
+// Handle #include ...
+// TODO: Handle macro expansions for the header name
+int TPpContext::CPPinclude(TPpToken* ppToken)
+{
+ const TSourceLoc directiveLoc = ppToken->loc;
+ bool startWithLocalSearch = true; // to additionally include the extra "" paths
+ int token = scanToken(ppToken);
+
+ // handle <header-name>-style #include
+ if (token == '<') {
+ startWithLocalSearch = false;
+ token = scanHeaderName(ppToken, '>');
+ }
+ // otherwise ppToken already has the header name and it was "header-name" style
+
+ if (token != PpAtomConstString) {
+ parseContext.ppError(directiveLoc, "must be followed by a header name", "#include", "");
+ return token;
+ }
+
+ // Make a copy of the name because it will be overwritten by the next token scan.
+ const std::string filename = ppToken->name;
+
+ // See if the directive was well formed
+ token = scanToken(ppToken);
+ if (token != '\n') {
+ if (token == EndOfInput)
+ parseContext.ppError(ppToken->loc, "expected newline after header name:", "#include", "%s", filename.c_str());
+ else
+ parseContext.ppError(ppToken->loc, "extra content after header name:", "#include", "%s", filename.c_str());
+ return token;
+ }
+
+ // Process well-formed directive
+
+ // Find the inclusion, first look in "Local" ("") paths, if requested,
+ // otherwise, only search the "System" (<>) paths.
+ TShader::Includer::IncludeResult* res = nullptr;
+ if (startWithLocalSearch)
+ res = includer.includeLocal(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1);
+ if (res == nullptr || res->headerName.empty()) {
+ includer.releaseInclude(res);
+ res = includer.includeSystem(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1);
+ }
+
+ // Process the results
+ if (res != nullptr && !res->headerName.empty()) {
+ if (res->headerData != nullptr && res->headerLength > 0) {
+ // path for processing one or more tokens from an included header, hand off 'res'
+ const bool forNextLine = parseContext.lineDirectiveShouldSetNextLine();
+ std::ostringstream prologue;
+ std::ostringstream epilogue;
+ prologue << "#line " << forNextLine << " " << "\"" << res->headerName << "\"\n";
+ epilogue << (res->headerData[res->headerLength - 1] == '\n'? "" : "\n") <<
+ "#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n";
+ pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this));
+ parseContext.intermediate.addIncludeText(res->headerName.c_str(), res->headerData, res->headerLength);
+ // There's no "current" location anymore.
+ parseContext.setCurrentColumn(0);
+ } else {
+ // things are okay, but there is nothing to process
+ includer.releaseInclude(res);
+ }
+ } else {
+ // error path, clean up
+ std::string message =
+ res != nullptr ? std::string(res->headerData, res->headerLength)
+ : std::string("Could not process include directive");
+ parseContext.ppError(directiveLoc, message.c_str(), "#include", "for header name: %s", filename.c_str());
+ includer.releaseInclude(res);
+ }
+
+ return token;
+}
+
+// Handle #line
+int TPpContext::CPPline(TPpToken* ppToken)
+{
+ // "#line must have, after macro substitution, one of the following forms:
+ // "#line line
+ // "#line line source-string-number"
+
+ int token = scanToken(ppToken);
+ const TSourceLoc directiveLoc = ppToken->loc;
+ if (token == '\n') {
+ parseContext.ppError(ppToken->loc, "must by followed by an integral literal", "#line", "");
+ return token;
+ }
+
+ int lineRes = 0; // Line number after macro expansion.
+ int lineToken = 0;
+ bool hasFile = false;
+ int fileRes = 0; // Source file number after macro expansion.
+ const char* sourceName = nullptr; // Optional source file name.
+ bool lineErr = false;
+ bool fileErr = false;
+ token = eval(token, MIN_PRECEDENCE, false, lineRes, lineErr, ppToken);
+ if (! lineErr) {
+ lineToken = lineRes;
+ if (token == '\n')
+ ++lineRes;
+
+ if (parseContext.lineDirectiveShouldSetNextLine())
+ --lineRes;
+ parseContext.setCurrentLine(lineRes);
+
+ if (token != '\n') {
+ if (token == PpAtomConstString) {
+ parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line");
+ // We need to save a copy of the string instead of pointing
+ // to the name field of the token since the name field
+ // will likely be overwritten by the next token scan.
+ sourceName = atomStrings.getString(atomStrings.getAddAtom(ppToken->name));
+ parseContext.setCurrentSourceName(sourceName);
+ hasFile = true;
+ token = scanToken(ppToken);
+ } else {
+ token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken);
+ if (! fileErr) {
+ parseContext.setCurrentString(fileRes);
+ hasFile = true;
+ }
+ }
+ }
+ }
+ if (!fileErr && !lineErr) {
+ parseContext.notifyLineDirective(directiveLoc.line, lineToken, hasFile, fileRes, sourceName);
+ }
+ token = extraTokenCheck(PpAtomLine, ppToken, token);
+
+ return token;
+}
+
+// Handle #error
+int TPpContext::CPPerror(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+ std::string message;
+ TSourceLoc loc = ppToken->loc;
+
+ while (token != '\n' && token != EndOfInput) {
+ if (token == PpAtomConstInt16 || token == PpAtomConstUint16 ||
+ token == PpAtomConstInt || token == PpAtomConstUint ||
+ token == PpAtomConstInt64 || token == PpAtomConstUint64 ||
+ token == PpAtomConstFloat16 ||
+ token == PpAtomConstFloat || token == PpAtomConstDouble) {
+ message.append(ppToken->name);
+ } else if (token == PpAtomIdentifier || token == PpAtomConstString) {
+ message.append(ppToken->name);
+ } else {
+ message.append(atomStrings.getString(token));
+ }
+ message.append(" ");
+ token = scanToken(ppToken);
+ }
+ parseContext.notifyErrorDirective(loc.line, message.c_str());
+ // store this msg into the shader's information log..set the Compile Error flag!!!!
+ parseContext.ppError(loc, message.c_str(), "#error", "");
+
+ return '\n';
+}
+
+// Handle #pragma
+int TPpContext::CPPpragma(TPpToken* ppToken)
+{
+ char SrcStrName[2];
+ TVector<TString> tokens;
+
+ TSourceLoc loc = ppToken->loc; // because we go to the next line before processing
+ int token = scanToken(ppToken);
+ while (token != '\n' && token != EndOfInput) {
+ switch (token) {
+ case PpAtomIdentifier:
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+#ifdef AMD_EXTENSIONS
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+#endif
+ case PpAtomConstFloat:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ tokens.push_back(ppToken->name);
+ break;
+ default:
+ SrcStrName[0] = (char)token;
+ SrcStrName[1] = '\0';
+ tokens.push_back(SrcStrName);
+ }
+ token = scanToken(ppToken);
+ }
+
+ if (token == EndOfInput)
+ parseContext.ppError(loc, "directive must end with a newline", "#pragma", "");
+ else
+ parseContext.handlePragma(loc, tokens);
+
+ return token;
+}
+
+// #version: This is just for error checking: the version and profile are decided before preprocessing starts
+int TPpContext::CPPversion(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+
+ if (errorOnVersion || versionSeen) {
+ if (parseContext.isReadingHLSL())
+ parseContext.ppError(ppToken->loc, "invalid preprocessor command", "#version", "");
+ else
+ parseContext.ppError(ppToken->loc, "must occur first in shader", "#version", "");
+ }
+ versionSeen = true;
+
+ if (token == '\n') {
+ parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", "");
+
+ return token;
+ }
+
+ if (token != PpAtomConstInt)
+ parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", "");
+
+ ppToken->ival = atoi(ppToken->name);
+ int versionNumber = ppToken->ival;
+ int line = ppToken->loc.line;
+ token = scanToken(ppToken);
+
+ if (token == '\n') {
+ parseContext.notifyVersion(line, versionNumber, nullptr);
+ return token;
+ } else {
+ int profileAtom = atomStrings.getAtom(ppToken->name);
+ if (profileAtom != PpAtomCore &&
+ profileAtom != PpAtomCompatibility &&
+ profileAtom != PpAtomEs)
+ parseContext.ppError(ppToken->loc, "bad profile name; use es, core, or compatibility", "#version", "");
+ parseContext.notifyVersion(line, versionNumber, ppToken->name);
+ token = scanToken(ppToken);
+
+ if (token == '\n')
+ return token;
+ else
+ parseContext.ppError(ppToken->loc, "bad tokens following profile -- expected newline", "#version", "");
+ }
+
+ return token;
+}
+
+// Handle #extension
+int TPpContext::CPPextension(TPpToken* ppToken)
+{
+ int line = ppToken->loc.line;
+ int token = scanToken(ppToken);
+ char extensionName[MaxTokenLength + 1];
+
+ if (token=='\n') {
+ parseContext.ppError(ppToken->loc, "extension name not specified", "#extension", "");
+ return token;
+ }
+
+ if (token != PpAtomIdentifier)
+ parseContext.ppError(ppToken->loc, "extension name expected", "#extension", "");
+
+ snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name);
+
+ token = scanToken(ppToken);
+ if (token != ':') {
+ parseContext.ppError(ppToken->loc, "':' missing after extension name", "#extension", "");
+ return token;
+ }
+
+ token = scanToken(ppToken);
+ if (token != PpAtomIdentifier) {
+ parseContext.ppError(ppToken->loc, "behavior for extension not specified", "#extension", "");
+ return token;
+ }
+
+ parseContext.updateExtensionBehavior(line, extensionName, ppToken->name);
+ parseContext.notifyExtensionDirective(line, extensionName, ppToken->name);
+
+ token = scanToken(ppToken);
+ if (token == '\n')
+ return token;
+ else
+ parseContext.ppError(ppToken->loc, "extra tokens -- expected newline", "#extension","");
+
+ return token;
+}
+
+int TPpContext::readCPPline(TPpToken* ppToken)
+{
+ int token = scanToken(ppToken);
+
+ if (token == PpAtomIdentifier) {
+ switch (atomStrings.getAtom(ppToken->name)) {
+ case PpAtomDefine:
+ token = CPPdefine(ppToken);
+ break;
+ case PpAtomElse:
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#else after #else", "#else", "");
+ elseSeen[elsetracker] = true;
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#else", "");
+ token = extraTokenCheck(PpAtomElse, ppToken, scanToken(ppToken));
+ token = CPPelse(0, ppToken);
+ break;
+ case PpAtomElif:
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#elif", "");
+ if (elseSeen[elsetracker])
+ parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", "");
+ // this token is really a dont care, but we still need to eat the tokens
+ token = scanToken(ppToken);
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+ token = CPPelse(0, ppToken);
+ break;
+ case PpAtomEndif:
+ if (ifdepth == 0)
+ parseContext.ppError(ppToken->loc, "mismatched statements", "#endif", "");
+ else {
+ elseSeen[elsetracker] = false;
+ --elsetracker;
+ --ifdepth;
+ }
+ token = extraTokenCheck(PpAtomEndif, ppToken, scanToken(ppToken));
+ break;
+ case PpAtomIf:
+ token = CPPif(ppToken);
+ break;
+ case PpAtomIfdef:
+ token = CPPifdef(1, ppToken);
+ break;
+ case PpAtomIfndef:
+ token = CPPifdef(0, ppToken);
+ break;
+ case PpAtomInclude:
+ if(!parseContext.isReadingHLSL()) {
+ parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include");
+ }
+ token = CPPinclude(ppToken);
+ break;
+ case PpAtomLine:
+ token = CPPline(ppToken);
+ break;
+ case PpAtomPragma:
+ token = CPPpragma(ppToken);
+ break;
+ case PpAtomUndef:
+ token = CPPundef(ppToken);
+ break;
+ case PpAtomError:
+ token = CPPerror(ppToken);
+ break;
+ case PpAtomVersion:
+ token = CPPversion(ppToken);
+ break;
+ case PpAtomExtension:
+ token = CPPextension(ppToken);
+ break;
+ default:
+ parseContext.ppError(ppToken->loc, "invalid directive:", "#", ppToken->name);
+ break;
+ }
+ } else if (token != '\n' && token != EndOfInput)
+ parseContext.ppError(ppToken->loc, "invalid directive", "#", "");
+
+ while (token != '\n' && token != EndOfInput)
+ token = scanToken(ppToken);
+
+ return token;
+}
+
+// Context-dependent parsing of a #include <header-name>.
+// Assumes no macro expansions etc. are being done; the name is just on the current input.
+// Always creates a name and returns PpAtomicConstString, unless we run out of input.
+int TPpContext::scanHeaderName(TPpToken* ppToken, char delimit)
+{
+ bool tooLong = false;
+
+ if (inputStack.empty())
+ return EndOfInput;
+
+ int len = 0;
+ ppToken->name[0] = '\0';
+ do {
+ int ch = inputStack.back()->getch();
+
+ // done yet?
+ if (ch == delimit) {
+ ppToken->name[len] = '\0';
+ if (tooLong)
+ parseContext.ppError(ppToken->loc, "header name too long", "", "");
+ return PpAtomConstString;
+ } else if (ch == EndOfInput)
+ return EndOfInput;
+
+ // found a character to expand the name with
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else
+ tooLong = true;
+ } while (true);
+}
+
+// Macro-expand a macro argument 'arg' to create 'expandedArg'.
+// Does not replace 'arg'.
+// Returns nullptr if no expanded argument is created.
+TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken* ppToken, bool newLineOkay)
+{
+ // expand the argument
+ TokenStream* expandedArg = new TokenStream;
+ pushInput(new tMarkerInput(this));
+ pushTokenStreamInput(arg);
+ int token;
+ while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) {
+ token = tokenPaste(token, *ppToken);
+ if (token == PpAtomIdentifier) {
+ switch (MacroExpand(ppToken, false, newLineOkay)) {
+ case MacroExpandNotStarted:
+ break;
+ case MacroExpandError:
+ // toss the rest of the pushed-input argument by scanning until tMarkerInput
+ while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput)
+ ;
+ break;
+ case MacroExpandStarted:
+ case MacroExpandUndef:
+ continue;
+ }
+ }
+ if (token == tMarkerInput::marker || token == EndOfInput)
+ break;
+ expandedArg->putToken(token, ppToken);
+ }
+
+ if (token != tMarkerInput::marker) {
+ // Error, or MacroExpand ate the marker, so had bad input, recover
+ delete expandedArg;
+ expandedArg = nullptr;
+ }
+
+ return expandedArg;
+}
+
+//
+// Return the next token for a macro expansion, handling macro arguments,
+// whose semantics are dependent on being adjacent to ##.
+//
+int TPpContext::tMacroInput::scan(TPpToken* ppToken)
+{
+ int token;
+ do {
+ token = mac->body.getToken(pp->parseContext, ppToken);
+ } while (token == ' '); // handle white space in macro
+
+ // Hash operators basically turn off a round of macro substitution
+ // (the round done on the argument before the round done on the RHS of the
+ // macro definition):
+ //
+ // "A parameter in the replacement list, unless preceded by a # or ##
+ // preprocessing token or followed by a ## preprocessing token (see below),
+ // is replaced by the corresponding argument after all macros contained
+ // therein have been expanded."
+ //
+ // "If, in the replacement list, a parameter is immediately preceded or
+ // followed by a ## preprocessing token, the parameter is replaced by the
+ // corresponding argument's preprocessing token sequence."
+
+ bool pasting = false;
+ if (postpaste) {
+ // don't expand next token
+ pasting = true;
+ postpaste = false;
+ }
+
+ if (prepaste) {
+ // already know we should be on a ##, verify
+ assert(token == PpAtomPaste);
+ prepaste = false;
+ postpaste = true;
+ }
+
+ // see if are preceding a ##
+ if (mac->body.peekUntokenizedPasting()) {
+ prepaste = true;
+ pasting = true;
+ }
+
+ // HLSL does expand macros before concatenation
+ if (pasting && pp->parseContext.isReadingHLSL())
+ pasting = false;
+
+ // TODO: preprocessor: properly handle whitespace (or lack of it) between tokens when expanding
+ if (token == PpAtomIdentifier) {
+ int i;
+ for (i = (int)mac->args.size() - 1; i >= 0; i--)
+ if (strcmp(pp->atomStrings.getString(mac->args[i]), ppToken->name) == 0)
+ break;
+ if (i >= 0) {
+ TokenStream* arg = expandedArgs[i];
+ if (arg == nullptr || pasting)
+ arg = args[i];
+ pp->pushTokenStreamInput(*arg, prepaste);
+
+ return pp->scanToken(ppToken);
+ }
+ }
+
+ if (token == EndOfInput)
+ mac->busy = 0;
+
+ return token;
+}
+
+// return a textual zero, for scanning a macro that was never defined
+int TPpContext::tZeroInput::scan(TPpToken* ppToken)
+{
+ if (done)
+ return EndOfInput;
+
+ ppToken->name[0] = '0';
+ ppToken->name[1] = 0;
+ ppToken->ival = 0;
+ ppToken->space = false;
+ done = true;
+
+ return PpAtomConstInt;
+}
+
+//
+// Check a token to see if it is a macro that should be expanded:
+// - If it is, and defined, push a tInput that will produce the appropriate
+// expansion and return MacroExpandStarted.
+// - If it is, but undefined, and expandUndef is requested, push a tInput
+// that will expand to 0 and return MacroExpandUndef.
+// - Otherwise, there is no expansion, and there are two cases:
+// * It might be okay there is no expansion, and no specific error was
+// detected. Returns MacroExpandNotStarted.
+// * The expansion was started, but could not be completed, due to an error
+// that cannot be recovered from. Returns MacroExpandError.
+//
+MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
+{
+ ppToken->space = false;
+ int macroAtom = atomStrings.getAtom(ppToken->name);
+ switch (macroAtom) {
+ case PpAtomLineMacro:
+ ppToken->ival = parseContext.getCurrentLoc().line;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+
+ case PpAtomFileMacro: {
+ if (parseContext.getCurrentLoc().name)
+ parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based __FILE__");
+ ppToken->ival = parseContext.getCurrentLoc().string;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%s", ppToken->loc.getStringNameOrNum().c_str());
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+ }
+
+ case PpAtomVersionMacro:
+ ppToken->ival = parseContext.version;
+ snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
+ UngetToken(PpAtomConstInt, ppToken);
+ return MacroExpandStarted;
+
+ default:
+ break;
+ }
+
+ MacroSymbol* macro = macroAtom == 0 ? nullptr : lookupMacroDef(macroAtom);
+
+ // no recursive expansions
+ if (macro != nullptr && macro->busy)
+ return MacroExpandNotStarted;
+
+ // not expanding undefined macros
+ if ((macro == nullptr || macro->undef) && ! expandUndef)
+ return MacroExpandNotStarted;
+
+ // 0 is the value of an undefined macro
+ if ((macro == nullptr || macro->undef) && expandUndef) {
+ pushInput(new tZeroInput(this));
+ return MacroExpandUndef;
+ }
+
+ tMacroInput *in = new tMacroInput(this);
+
+ TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
+ in->mac = macro;
+ if (macro->functionLike) {
+ // We don't know yet if this will be a successful call of a
+ // function-like macro; need to look for a '(', but without trashing
+ // the passed in ppToken, until we know we are no longer speculative.
+ TPpToken parenToken;
+ int token = scanToken(&parenToken);
+ if (newLineOkay) {
+ while (token == '\n')
+ token = scanToken(&parenToken);
+ }
+ if (token != '(') {
+ // Function-like macro called with object-like syntax: okay, don't expand.
+ // (We ate exactly one token that might not be white space; put it back.
+ UngetToken(token, &parenToken);
+ delete in;
+ return MacroExpandNotStarted;
+ }
+ in->args.resize(in->mac->args.size());
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->args[i] = new TokenStream;
+ in->expandedArgs.resize(in->mac->args.size());
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->expandedArgs[i] = nullptr;
+ size_t arg = 0;
+ bool tokenRecorded = false;
+ do {
+ TVector<char> nestStack;
+ while (true) {
+ token = scanToken(ppToken);
+ if (token == EndOfInput || token == tMarkerInput::marker) {
+ parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ if (token == '\n') {
+ if (! newLineOkay) {
+ parseContext.ppError(loc, "End of line in macro substitution:", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ continue;
+ }
+ if (token == '#') {
+ parseContext.ppError(ppToken->loc, "unexpected '#'", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ if (in->mac->args.size() == 0 && token != ')')
+ break;
+ if (nestStack.size() == 0 && (token == ',' || token == ')'))
+ break;
+ if (token == '(')
+ nestStack.push_back(')');
+ else if (token == '{' && parseContext.isReadingHLSL())
+ nestStack.push_back('}');
+ else if (nestStack.size() > 0 && token == nestStack.back())
+ nestStack.pop_back();
+ in->args[arg]->putToken(token, ppToken);
+ tokenRecorded = true;
+ }
+ // end of single argument scan
+
+ if (token == ')') {
+ // closing paren of call
+ if (in->mac->args.size() == 1 && !tokenRecorded)
+ break;
+ arg++;
+ break;
+ }
+ arg++;
+ } while (arg < in->mac->args.size());
+ // end of all arguments scan
+
+ if (arg < in->mac->args.size())
+ parseContext.ppError(loc, "Too few args in Macro", "macro expansion", atomStrings.getString(macroAtom));
+ else if (token != ')') {
+ // Error recover code; find end of call, if possible
+ int depth = 0;
+ while (token != EndOfInput && (depth > 0 || token != ')')) {
+ if (token == ')' || token == '}')
+ depth--;
+ token = scanToken(ppToken);
+ if (token == '(' || token == '{')
+ depth++;
+ }
+
+ if (token == EndOfInput) {
+ parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom));
+ delete in;
+ return MacroExpandError;
+ }
+ parseContext.ppError(loc, "Too many args in macro", "macro expansion", atomStrings.getString(macroAtom));
+ }
+
+ // We need both expanded and non-expanded forms of the argument, for whether or
+ // not token pasting will be applied later when the argument is consumed next to ##.
+ for (size_t i = 0; i < in->mac->args.size(); i++)
+ in->expandedArgs[i] = PrescanMacroArg(*in->args[i], ppToken, newLineOkay);
+ }
+
+ pushInput(in);
+ macro->busy = 1;
+ macro->body.reset();
+
+ return MacroExpandStarted;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp
new file mode 100644
index 0000000..06c2333
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp
@@ -0,0 +1,181 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace {
+
+using namespace glslang;
+
+const struct {
+ int val;
+ const char* str;
+} tokens[] = {
+
+ { PPAtomAddAssign, "+=" },
+ { PPAtomSubAssign, "-=" },
+ { PPAtomMulAssign, "*=" },
+ { PPAtomDivAssign, "/=" },
+ { PPAtomModAssign, "%=" },
+
+ { PpAtomRight, ">>" },
+ { PpAtomLeft, "<<" },
+ { PpAtomAnd, "&&" },
+ { PpAtomOr, "||" },
+ { PpAtomXor, "^^" },
+
+ { PpAtomRightAssign, ">>=" },
+ { PpAtomLeftAssign, "<<=" },
+ { PpAtomAndAssign, "&=" },
+ { PpAtomOrAssign, "|=" },
+ { PpAtomXorAssign, "^=" },
+
+ { PpAtomEQ, "==" },
+ { PpAtomNE, "!=" },
+ { PpAtomGE, ">=" },
+ { PpAtomLE, "<=" },
+
+ { PpAtomDecrement, "--" },
+ { PpAtomIncrement, "++" },
+
+ { PpAtomColonColon, "::" },
+
+ { PpAtomDefine, "define" },
+ { PpAtomUndef, "undef" },
+ { PpAtomIf, "if" },
+ { PpAtomElif, "elif" },
+ { PpAtomElse, "else" },
+ { PpAtomEndif, "endif" },
+ { PpAtomIfdef, "ifdef" },
+ { PpAtomIfndef, "ifndef" },
+ { PpAtomLine, "line" },
+ { PpAtomPragma, "pragma" },
+ { PpAtomError, "error" },
+
+ { PpAtomVersion, "version" },
+ { PpAtomCore, "core" },
+ { PpAtomCompatibility, "compatibility" },
+ { PpAtomEs, "es" },
+ { PpAtomExtension, "extension" },
+
+ { PpAtomLineMacro, "__LINE__" },
+ { PpAtomFileMacro, "__FILE__" },
+ { PpAtomVersionMacro, "__VERSION__" },
+
+ { PpAtomInclude, "include" },
+};
+
+} // end anonymous namespace
+
+namespace glslang {
+
+//
+// Initialize the atom table.
+//
+TStringAtomMap::TStringAtomMap()
+{
+ badToken.assign("<bad token>");
+
+ // Add single character tokens to the atom table:
+ const char* s = "~!%^&*()-+=|,.<>/?;:[]{}#\\";
+ char t[2];
+
+ t[1] = '\0';
+ while (*s) {
+ t[0] = *s;
+ addAtomFixed(t, s[0]);
+ s++;
+ }
+
+ // Add multiple character scanner tokens :
+ for (size_t ii = 0; ii < sizeof(tokens)/sizeof(tokens[0]); ii++)
+ addAtomFixed(tokens[ii].str, tokens[ii].val);
+
+ nextAtom = PpAtomLast;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp
new file mode 100644
index 0000000..cc003a8
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#include <cstdlib>
+#include <locale>
+
+#include "PpContext.h"
+
+namespace glslang {
+
+TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) :
+ preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false),
+ rootFileName(rootFileName),
+ currentSourceFile(rootFileName)
+{
+ ifdepth = 0;
+ for (elsetracker = 0; elsetracker < maxIfNesting; elsetracker++)
+ elseSeen[elsetracker] = false;
+ elsetracker = 0;
+
+ strtodStream.imbue(std::locale::classic());
+}
+
+TPpContext::~TPpContext()
+{
+ delete [] preamble;
+
+ // free up the inputStack
+ while (! inputStack.empty())
+ popInput();
+}
+
+void TPpContext::setInput(TInputScanner& input, bool versionWillBeError)
+{
+ assert(inputStack.size() == 0);
+
+ pushInput(new tStringInput(this, input));
+
+ errorOnVersion = versionWillBeError;
+ versionSeen = false;
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h
new file mode 100644
index 0000000..8470e17
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpContext.h
@@ -0,0 +1,702 @@
+//
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef PPCONTEXT_H
+#define PPCONTEXT_H
+
+#include <stack>
+#include <unordered_map>
+#include <sstream>
+
+#include "../ParseHelper.h"
+#include "PpTokens.h"
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4127)
+#endif
+
+namespace glslang {
+
+class TPpToken {
+public:
+ TPpToken() { clear(); }
+ void clear()
+ {
+ space = false;
+ i64val = 0;
+ loc.init();
+ name[0] = 0;
+ }
+
+ // Used for comparing macro definitions, so checks what is relevant for that.
+ bool operator==(const TPpToken& right)
+ {
+ return space == right.space &&
+ ival == right.ival && dval == right.dval && i64val == right.i64val &&
+ strncmp(name, right.name, MaxTokenLength) == 0;
+ }
+ bool operator!=(const TPpToken& right) { return ! operator==(right); }
+
+ TSourceLoc loc;
+ // True if a space (for white space or a removed comment) should also be
+ // recognized, in front of the token returned:
+ bool space;
+ // Numeric value of the token:
+ union {
+ int ival;
+ double dval;
+ long long i64val;
+ };
+ // Text string of the token:
+ char name[MaxTokenLength + 1];
+};
+
+class TStringAtomMap {
+//
+// Implementation is in PpAtom.cpp
+//
+// Maintain a bi-directional mapping between relevant preprocessor strings and
+// "atoms" which a unique integers (small, contiguous, not hash-like) per string.
+//
+public:
+ TStringAtomMap();
+
+ // Map string -> atom.
+ // Return 0 if no existing string.
+ int getAtom(const char* s) const
+ {
+ auto it = atomMap.find(s);
+ return it == atomMap.end() ? 0 : it->second;
+ }
+
+ // Map a new or existing string -> atom, inventing a new atom if necessary.
+ int getAddAtom(const char* s)
+ {
+ int atom = getAtom(s);
+ if (atom == 0) {
+ atom = nextAtom++;
+ addAtomFixed(s, atom);
+ }
+ return atom;
+ }
+
+ // Map atom -> string.
+ const char* getString(int atom) const { return stringMap[atom]->c_str(); }
+
+protected:
+ TStringAtomMap(TStringAtomMap&);
+ TStringAtomMap& operator=(TStringAtomMap&);
+
+ TUnorderedMap<TString, int> atomMap;
+ TVector<const TString*> stringMap; // these point into the TString in atomMap
+ int nextAtom;
+
+ // Bad source characters can lead to bad atoms, so gracefully handle those by
+ // pre-filling the table with them (to avoid if tests later).
+ TString badToken;
+
+ // Add bi-directional mappings:
+ // - string -> atom
+ // - atom -> string
+ void addAtomFixed(const char* s, int atom)
+ {
+ auto it = atomMap.insert(std::pair<TString, int>(s, atom)).first;
+ if (stringMap.size() < (size_t)atom + 1)
+ stringMap.resize(atom + 100, &badToken);
+ stringMap[atom] = &it->first;
+ }
+};
+
+class TInputScanner;
+
+enum MacroExpandResult {
+ MacroExpandNotStarted, // macro not expanded, which might not be an error
+ MacroExpandError, // a clear error occurred while expanding, no expansion
+ MacroExpandStarted, // macro expansion process has started
+ MacroExpandUndef // macro is undefined and will be expanded
+};
+
+// This class is the result of turning a huge pile of C code communicating through globals
+// into a class. This was done to allowing instancing to attain thread safety.
+// Don't expect too much in terms of OO design.
+class TPpContext {
+public:
+ TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&);
+ virtual ~TPpContext();
+
+ void setPreamble(const char* preamble, size_t length);
+
+ int tokenize(TPpToken& ppToken);
+ int tokenPaste(int token, TPpToken&);
+
+ class tInput {
+ public:
+ tInput(TPpContext* p) : done(false), pp(p) { }
+ virtual ~tInput() { }
+
+ virtual int scan(TPpToken*) = 0;
+ virtual int getch() = 0;
+ virtual void ungetch() = 0;
+ virtual bool peekPasting() { return false; } // true when about to see ##
+ virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
+ virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
+ virtual bool isMacroInput() { return false; }
+
+ // Will be called when we start reading tokens from this instance
+ virtual void notifyActivated() {}
+ // Will be called when we do not read tokens from this instance anymore
+ virtual void notifyDeleted() {}
+ protected:
+ bool done;
+ TPpContext* pp;
+ };
+
+ void setInput(TInputScanner& input, bool versionWillBeError);
+
+ void pushInput(tInput* in)
+ {
+ inputStack.push_back(in);
+ in->notifyActivated();
+ }
+ void popInput()
+ {
+ inputStack.back()->notifyDeleted();
+ delete inputStack.back();
+ inputStack.pop_back();
+ }
+
+ //
+ // From PpTokens.cpp
+ //
+
+ // Capture the needed parts of a token stream for macro recording/playback.
+ class TokenStream {
+ public:
+ // Manage a stream of these 'Token', which capture the relevant parts
+ // of a TPpToken, plus its atom.
+ class Token {
+ public:
+ Token(int atom, const TPpToken& ppToken) :
+ atom(atom),
+ space(ppToken.space),
+ i64val(ppToken.i64val),
+ name(ppToken.name) { }
+ int get(TPpToken& ppToken)
+ {
+ ppToken.clear();
+ ppToken.space = space;
+ ppToken.i64val = i64val;
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
+ return atom;
+ }
+ bool isAtom(int a) const { return atom == a; }
+ int getAtom() const { return atom; }
+ bool nonSpaced() const { return !space; }
+ protected:
+ Token() {}
+ int atom;
+ bool space; // did a space precede the token?
+ long long i64val;
+ TString name;
+ };
+
+ TokenStream() : currentPos(0) { }
+
+ void putToken(int token, TPpToken* ppToken);
+ bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
+ bool peekContinuedPasting(int atom)
+ {
+ // This is basically necessary because, for example, the PP
+ // tokenizer only accepts valid numeric-literals plus suffixes, so
+ // separates numeric-literals plus bad suffix into two tokens, which
+ // should get both pasted together as one token when token pasting.
+ //
+ // The following code is a bit more generalized than the above example.
+ if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
+ switch(stream[currentPos].getAtom()) {
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+ case PpAtomConstFloat:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ case PpAtomConstString:
+ case PpAtomIdentifier:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+ }
+ int getToken(TParseContextBase&, TPpToken*);
+ bool atEnd() { return currentPos >= stream.size(); }
+ bool peekTokenizedPasting(bool lastTokenPastes);
+ bool peekUntokenizedPasting();
+ void reset() { currentPos = 0; }
+
+ protected:
+ TVector<Token> stream;
+ size_t currentPos;
+ };
+
+ //
+ // From Pp.cpp
+ //
+
+ struct MacroSymbol {
+ MacroSymbol() : functionLike(0), busy(0), undef(0) { }
+ TVector<int> args;
+ TokenStream body;
+ unsigned functionLike : 1; // 0 means object-like, 1 means function-like
+ unsigned busy : 1;
+ unsigned undef : 1;
+ };
+
+ typedef TMap<int, MacroSymbol> TSymbolMap;
+ TSymbolMap macroDefs; // map atoms to macro definitions
+ MacroSymbol* lookupMacroDef(int atom)
+ {
+ auto existingMacroIt = macroDefs.find(atom);
+ return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second);
+ }
+ void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; }
+
+protected:
+ TPpContext(TPpContext&);
+ TPpContext& operator=(TPpContext&);
+
+ TStringAtomMap atomStrings;
+ char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble
+ int preambleLength;
+ char** strings; // official strings of shader, starting a string 0 line 1
+ size_t* lengths;
+ int numStrings; // how many official strings there are
+ int currentString; // which string we're currently parsing (-1 for preamble)
+
+ // Scanner data:
+ int previous_token;
+ TParseContextBase& parseContext;
+
+ // Get the next token from *stack* of input sources, popping input sources
+ // that are out of tokens, down until an input source is found that has a token.
+ // Return EndOfInput when there are no more tokens to be found by doing this.
+ int scanToken(TPpToken* ppToken)
+ {
+ int token = EndOfInput;
+
+ while (! inputStack.empty()) {
+ token = inputStack.back()->scan(ppToken);
+ if (token != EndOfInput || inputStack.empty())
+ break;
+ popInput();
+ }
+
+ return token;
+ }
+ int getChar() { return inputStack.back()->getch(); }
+ void ungetChar() { inputStack.back()->ungetch(); }
+ bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
+ bool peekContinuedPasting(int a)
+ {
+ return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
+ }
+ bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
+ bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
+
+ static const int maxIfNesting = 65;
+
+ int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
+ bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth
+ int elsetracker; // #if-#else and #endif constructs...Counter.
+
+ class tMacroInput : public tInput {
+ public:
+ tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { }
+ virtual ~tMacroInput()
+ {
+ for (size_t i = 0; i < args.size(); ++i)
+ delete args[i];
+ for (size_t i = 0; i < expandedArgs.size(); ++i)
+ delete expandedArgs[i];
+ }
+
+ virtual int scan(TPpToken*) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ bool peekPasting() override { return prepaste; }
+ bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
+ bool endOfReplacementList() override { return mac->body.atEnd(); }
+ bool isMacroInput() override { return true; }
+
+ MacroSymbol *mac;
+ TVector<TokenStream*> args;
+ TVector<TokenStream*> expandedArgs;
+
+ protected:
+ bool prepaste; // true if we are just before ##
+ bool postpaste; // true if we are right after ##
+ };
+
+ class tMarkerInput : public tInput {
+ public:
+ tMarkerInput(TPpContext* pp) : tInput(pp) { }
+ virtual int scan(TPpToken*) override
+ {
+ if (done)
+ return EndOfInput;
+ done = true;
+
+ return marker;
+ }
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ static const int marker = -3;
+ };
+
+ class tZeroInput : public tInput {
+ public:
+ tZeroInput(TPpContext* pp) : tInput(pp) { }
+ virtual int scan(TPpToken*) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ };
+
+ std::vector<tInput*> inputStack;
+ bool errorOnVersion;
+ bool versionSeen;
+
+ //
+ // from Pp.cpp
+ //
+
+ // Used to obtain #include content.
+ TShader::Includer& includer;
+
+ int CPPdefine(TPpToken * ppToken);
+ int CPPundef(TPpToken * ppToken);
+ int CPPelse(int matchelse, TPpToken * ppToken);
+ int extraTokenCheck(int atom, TPpToken* ppToken, int token);
+ int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
+ int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
+ int CPPif (TPpToken * ppToken);
+ int CPPifdef(int defined, TPpToken * ppToken);
+ int CPPinclude(TPpToken * ppToken);
+ int CPPline(TPpToken * ppToken);
+ int CPPerror(TPpToken * ppToken);
+ int CPPpragma(TPpToken * ppToken);
+ int CPPversion(TPpToken * ppToken);
+ int CPPextension(TPpToken * ppToken);
+ int readCPPline(TPpToken * ppToken);
+ int scanHeaderName(TPpToken* ppToken, char delimit);
+ TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
+ MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
+
+ //
+ // From PpTokens.cpp
+ //
+ void pushTokenStreamInput(TokenStream&, bool pasting = false);
+ void UngetToken(int token, TPpToken*);
+
+ class tTokenInput : public tInput {
+ public:
+ tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
+ tInput(pp),
+ tokens(t),
+ lastTokenPastes(prepasting) { }
+ virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
+ bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
+ protected:
+ TokenStream* tokens;
+ bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
+ };
+
+ class tUngotTokenInput : public tInput {
+ public:
+ tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { }
+ virtual int scan(TPpToken *) override;
+ virtual int getch() override { assert(0); return EndOfInput; }
+ virtual void ungetch() override { assert(0); }
+ protected:
+ int token;
+ TPpToken lval;
+ };
+
+ //
+ // From PpScanner.cpp
+ //
+ class tStringInput : public tInput {
+ public:
+ tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { }
+ virtual int scan(TPpToken*) override;
+
+ // Scanner used to get source stream characters.
+ // - Escaped newlines are handled here, invisibly to the caller.
+ // - All forms of newline are handled, and turned into just a '\n'.
+ int getch() override
+ {
+ int ch = input->get();
+
+ if (ch == '\\') {
+ // Move past escaped newlines, as many as sequentially exist
+ do {
+ if (input->peek() == '\r' || input->peek() == '\n') {
+ bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment);
+ if (! allowed && pp->inComment)
+ return '\\';
+
+ // escape one newline now
+ ch = input->get();
+ int nextch = input->get();
+ if (ch == '\r' && nextch == '\n')
+ ch = input->get();
+ else
+ ch = nextch;
+ } else
+ return '\\';
+ } while (ch == '\\');
+ }
+
+ // handle any non-escaped newline
+ if (ch == '\r' || ch == '\n') {
+ if (ch == '\r' && input->peek() == '\n')
+ input->get();
+ return '\n';
+ }
+
+ return ch;
+ }
+
+ // Scanner used to backup the source stream characters. Newlines are
+ // handled here, invisibly to the caller, meaning have to undo exactly
+ // what getch() above does (e.g., don't leave things in the middle of a
+ // sequence of escaped newlines).
+ void ungetch() override
+ {
+ input->unget();
+
+ do {
+ int ch = input->peek();
+ if (ch == '\r' || ch == '\n') {
+ if (ch == '\n') {
+ // correct for two-character newline
+ input->unget();
+ if (input->peek() != '\r')
+ input->get();
+ }
+ // now in front of a complete newline, move past an escape character
+ input->unget();
+ if (input->peek() == '\\')
+ input->unget();
+ else {
+ input->get();
+ break;
+ }
+ } else
+ break;
+ } while (true);
+ }
+
+ protected:
+ TInputScanner* input;
+ };
+
+ // Holds a reference to included file data, as well as a
+ // prologue and an epilogue string. This can be scanned using the tInput
+ // interface and acts as a single source string.
+ class TokenizableIncludeFile : public tInput {
+ public:
+ // Copies prologue and epilogue. The includedFile must remain valid
+ // until this TokenizableIncludeFile is no longer used.
+ TokenizableIncludeFile(const TSourceLoc& startLoc,
+ const std::string& prologue,
+ TShader::Includer::IncludeResult* includedFile,
+ const std::string& epilogue,
+ TPpContext* pp)
+ : tInput(pp),
+ prologue_(prologue),
+ epilogue_(epilogue),
+ includedFile_(includedFile),
+ scanner(3, strings, lengths, nullptr, 0, 0, true),
+ prevScanner(nullptr),
+ stringInput(pp, scanner)
+ {
+ strings[0] = prologue_.data();
+ strings[1] = includedFile_->headerData;
+ strings[2] = epilogue_.data();
+
+ lengths[0] = prologue_.size();
+ lengths[1] = includedFile_->headerLength;
+ lengths[2] = epilogue_.size();
+
+ scanner.setLine(startLoc.line);
+ scanner.setString(startLoc.string);
+
+ scanner.setFile(startLoc.getFilenameStr(), 0);
+ scanner.setFile(startLoc.getFilenameStr(), 1);
+ scanner.setFile(startLoc.getFilenameStr(), 2);
+ }
+
+ // tInput methods:
+ int scan(TPpToken* t) override { return stringInput.scan(t); }
+ int getch() override { return stringInput.getch(); }
+ void ungetch() override { stringInput.ungetch(); }
+
+ void notifyActivated() override
+ {
+ prevScanner = pp->parseContext.getScanner();
+ pp->parseContext.setScanner(&scanner);
+ pp->push_include(includedFile_);
+ }
+
+ void notifyDeleted() override
+ {
+ pp->parseContext.setScanner(prevScanner);
+ pp->pop_include();
+ }
+
+ private:
+ TokenizableIncludeFile& operator=(const TokenizableIncludeFile&);
+
+ // Stores the prologue for this string.
+ const std::string prologue_;
+
+ // Stores the epilogue for this string.
+ const std::string epilogue_;
+
+ // Points to the IncludeResult that this TokenizableIncludeFile represents.
+ TShader::Includer::IncludeResult* includedFile_;
+
+ // Will point to prologue_, includedFile_->headerData and epilogue_
+ // This is passed to scanner constructor.
+ // These do not own the storage and it must remain valid until this
+ // object has been destroyed.
+ const char* strings[3];
+ // Length of str_, passed to scanner constructor.
+ size_t lengths[3];
+ // Scans over str_.
+ TInputScanner scanner;
+ // The previous effective scanner before the scanner in this instance
+ // has been activated.
+ TInputScanner* prevScanner;
+ // Delegate object implementing the tInput interface.
+ tStringInput stringInput;
+ };
+
+ int ScanFromString(char* s);
+ void missingEndifCheck();
+ int lFloatConst(int len, int ch, TPpToken* ppToken);
+ int characterLiteral(TPpToken* ppToken);
+
+ void push_include(TShader::Includer::IncludeResult* result)
+ {
+ currentSourceFile = result->headerName;
+ includeStack.push(result);
+ }
+
+ void pop_include()
+ {
+ TShader::Includer::IncludeResult* include = includeStack.top();
+ includeStack.pop();
+ includer.releaseInclude(include);
+ if (includeStack.empty()) {
+ currentSourceFile = rootFileName;
+ } else {
+ currentSourceFile = includeStack.top()->headerName;
+ }
+ }
+
+ bool inComment;
+ std::string rootFileName;
+ std::stack<TShader::Includer::IncludeResult*> includeStack;
+ std::string currentSourceFile;
+
+ std::istringstream strtodStream;
+};
+
+} // end namespace glslang
+
+#endif // PPCONTEXT_H
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
new file mode 100644
index 0000000..f6f52d7
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
@@ -0,0 +1,1246 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#include <cstdlib>
+#include <cstring>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+#include "../Scan.h"
+
+namespace glslang {
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////// Floating point constants: /////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+//
+// Scan a single- or double-precision floating point constant.
+// Assumes that the scanner has seen at least one digit,
+// followed by either a decimal '.' or the letter 'e', or a
+// precision ending (e.g., F or LF).
+//
+// This is technically not correct, as the preprocessor should just
+// accept the numeric literal along with whatever suffix it has, but
+// currently, it stops on seeing a bad suffix, treating that as the
+// next token. This effects things like token pasting, where it is
+// relevant how many tokens something was broken into.
+//
+// See peekContinuedPasting().
+int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
+{
+ const auto saveName = [&](int ch) {
+ if (len <= MaxTokenLength)
+ ppToken->name[len++] = static_cast<char>(ch);
+ };
+
+ // find the range of non-zero digits before the decimal point
+ int startNonZero = 0;
+ while (startNonZero < len && ppToken->name[startNonZero] == '0')
+ ++startNonZero;
+ int endNonZero = len;
+ while (endNonZero > startNonZero && ppToken->name[endNonZero-1] == '0')
+ --endNonZero;
+ int numWholeNumberDigits = endNonZero - startNonZero;
+
+ // accumulate the range's value
+ bool fastPath = numWholeNumberDigits <= 15; // when the number gets too complex, set to false
+ unsigned long long wholeNumber = 0;
+ if (fastPath) {
+ for (int i = startNonZero; i < endNonZero; ++i)
+ wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0');
+ }
+ int decimalShift = len - endNonZero;
+
+ // Decimal point:
+ bool hasDecimalOrExponent = false;
+ if (ch == '.') {
+ hasDecimalOrExponent = true;
+ saveName(ch);
+ ch = getChar();
+ int firstDecimal = len;
+
+ // 1.#INF or -1.#INF
+ if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) {
+ if ((len < 2) ||
+ (len == 2 && ppToken->name[0] != '1') ||
+ (len == 3 && ppToken->name[1] != '1' && !(ppToken->name[0] == '-' || ppToken->name[0] == '+')) ||
+ (len > 3))
+ parseContext.ppError(ppToken->loc, "unexpected use of", "#", "");
+ else {
+ // we have 1.# or -1.# or +1.#, check for 'INF'
+ if ((ch = getChar()) != 'I' ||
+ (ch = getChar()) != 'N' ||
+ (ch = getChar()) != 'F')
+ parseContext.ppError(ppToken->loc, "expected 'INF'", "#", "");
+ else {
+ // we have [+-].#INF, and we are targeting IEEE 754, so wrap it up:
+ saveName('I');
+ saveName('N');
+ saveName('F');
+ ppToken->name[len] = '\0';
+ if (ppToken->name[0] == '-')
+ ppToken->i64val = 0xfff0000000000000; // -Infinity
+ else
+ ppToken->i64val = 0x7ff0000000000000; // +Infinity
+ return PpAtomConstFloat;
+ }
+ }
+ }
+
+ // Consume leading-zero digits after the decimal point
+ while (ch == '0') {
+ saveName(ch);
+ ch = getChar();
+ }
+ int startNonZeroDecimal = len;
+ int endNonZeroDecimal = len;
+
+ // Consume remaining digits, up to the exponent
+ while (ch >= '0' && ch <= '9') {
+ saveName(ch);
+ if (ch != '0')
+ endNonZeroDecimal = len;
+ ch = getChar();
+ }
+
+ // Compute accumulation up to the last non-zero digit
+ if (endNonZeroDecimal > startNonZeroDecimal) {
+ numWholeNumberDigits += endNonZeroDecimal - endNonZero - 1; // don't include the "."
+ if (numWholeNumberDigits > 15)
+ fastPath = false;
+ if (fastPath) {
+ for (int i = endNonZero; i < endNonZeroDecimal; ++i) {
+ if (ppToken->name[i] != '.')
+ wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0');
+ }
+ }
+ decimalShift = firstDecimal - endNonZeroDecimal;
+ }
+ }
+
+ // Exponent:
+ bool negativeExponent = false;
+ double exponentValue = 0.0;
+ int exponent = 0;
+ {
+ if (ch == 'e' || ch == 'E') {
+ hasDecimalOrExponent = true;
+ saveName(ch);
+ ch = getChar();
+ if (ch == '+' || ch == '-') {
+ negativeExponent = ch == '-';
+ saveName(ch);
+ ch = getChar();
+ }
+ if (ch >= '0' && ch <= '9') {
+ while (ch >= '0' && ch <= '9') {
+ exponent = exponent * 10 + (ch - '0');
+ saveName(ch);
+ ch = getChar();
+ }
+ } else {
+ parseContext.ppError(ppToken->loc, "bad character in float exponent", "", "");
+ }
+ }
+
+ // Compensate for location of decimal
+ if (negativeExponent)
+ exponent -= decimalShift;
+ else {
+ exponent += decimalShift;
+ if (exponent < 0) {
+ negativeExponent = true;
+ exponent = -exponent;
+ }
+ }
+ if (exponent > 22)
+ fastPath = false;
+
+ if (fastPath) {
+ // Compute the floating-point value of the exponent
+ exponentValue = 1.0;
+ if (exponent > 0) {
+ double expFactor = 10;
+ while (exponent > 0) {
+ if (exponent & 0x1)
+ exponentValue *= expFactor;
+ expFactor *= expFactor;
+ exponent >>= 1;
+ }
+ }
+ }
+ }
+
+ // Suffix:
+ bool isDouble = false;
+ bool isFloat16 = false;
+ if (ch == 'l' || ch == 'L') {
+ if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
+ parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ if (parseContext.intermediate.getSource() == EShSourceGlsl) {
+ int ch2 = getChar();
+ if (ch2 != 'f' && ch2 != 'F') {
+ ungetChar();
+ ungetChar();
+ } else {
+ saveName(ch);
+ saveName(ch2);
+ isDouble = true;
+ }
+ } else if (parseContext.intermediate.getSource() == EShSourceHlsl) {
+ saveName(ch);
+ isDouble = true;
+ }
+ } else if (ch == 'h' || ch == 'H') {
+ if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
+ parseContext.float16Check(ppToken->loc, "half floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ if (parseContext.intermediate.getSource() == EShSourceGlsl) {
+ int ch2 = getChar();
+ if (ch2 != 'f' && ch2 != 'F') {
+ ungetChar();
+ ungetChar();
+ } else {
+ saveName(ch);
+ saveName(ch2);
+ isFloat16 = true;
+ }
+ } else if (parseContext.intermediate.getSource() == EShSourceHlsl) {
+ saveName(ch);
+ isFloat16 = true;
+ }
+ } else if (ch == 'f' || ch == 'F') {
+ if (ifdepth == 0)
+ parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
+ if (ifdepth == 0 && !parseContext.relaxedErrors())
+ parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
+ if (ifdepth == 0 && !hasDecimalOrExponent)
+ parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
+ saveName(ch);
+ } else
+ ungetChar();
+
+ // Patch up the name and length for overflow
+
+ if (len > MaxTokenLength) {
+ len = MaxTokenLength;
+ parseContext.ppError(ppToken->loc, "float literal too long", "", "");
+ }
+ ppToken->name[len] = '\0';
+
+ // Compute the numerical value
+ if (fastPath) {
+ // compute the floating-point value of the exponent
+ if (exponentValue == 0.0)
+ ppToken->dval = (double)wholeNumber;
+ else if (negativeExponent)
+ ppToken->dval = (double)wholeNumber / exponentValue;
+ else
+ ppToken->dval = (double)wholeNumber * exponentValue;
+ } else {
+ // slow path
+ ppToken->dval = 0.0;
+
+ // remove suffix
+ TString numstr(ppToken->name);
+ if (numstr.back() == 'f' || numstr.back() == 'F')
+ numstr.pop_back();
+ if (numstr.back() == 'h' || numstr.back() == 'H')
+ numstr.pop_back();
+ if (numstr.back() == 'l' || numstr.back() == 'L')
+ numstr.pop_back();
+
+ // use platform library
+ strtodStream.clear();
+ strtodStream.str(numstr.c_str());
+ strtodStream >> ppToken->dval;
+ if (strtodStream.fail()) {
+ // Assume failure combined with a large exponent was overflow, in
+ // an attempt to set INF.
+ if (!negativeExponent && exponent + numWholeNumberDigits > 300)
+ ppToken->i64val = 0x7ff0000000000000; // +Infinity
+ // Assume failure combined with a small exponent was overflow.
+ if (negativeExponent && exponent + numWholeNumberDigits > 300)
+ ppToken->dval = 0.0;
+ // Unknown reason for failure. Theory is that either
+ // - the 0.0 is still there, or
+ // - something reasonable was written that is better than 0.0
+ }
+ }
+
+ // Return the right token type
+ if (isDouble)
+ return PpAtomConstDouble;
+ else if (isFloat16)
+ return PpAtomConstFloat16;
+ else
+ return PpAtomConstFloat;
+}
+
+// Recognize a character literal.
+//
+// The first ' has already been accepted, read the rest, through the closing '.
+//
+// Always returns PpAtomConstInt.
+//
+int TPpContext::characterLiteral(TPpToken* ppToken)
+{
+ ppToken->name[0] = 0;
+ ppToken->ival = 0;
+
+ if (parseContext.intermediate.getSource() != EShSourceHlsl) {
+ // illegal, except in macro definition, for which case we report the character
+ return '\'';
+ }
+
+ int ch = getChar();
+ switch (ch) {
+ case '\'':
+ // As empty sequence: ''
+ parseContext.ppError(ppToken->loc, "unexpected", "\'", "");
+ return PpAtomConstInt;
+ case '\\':
+ // As escape sequence: '\XXX'
+ switch (ch = getChar()) {
+ case 'a':
+ ppToken->ival = 7;
+ break;
+ case 'b':
+ ppToken->ival = 8;
+ break;
+ case 't':
+ ppToken->ival = 9;
+ break;
+ case 'n':
+ ppToken->ival = 10;
+ break;
+ case 'v':
+ ppToken->ival = 11;
+ break;
+ case 'f':
+ ppToken->ival = 12;
+ break;
+ case 'r':
+ ppToken->ival = 13;
+ break;
+ case 'x':
+ case '0':
+ parseContext.ppError(ppToken->loc, "octal and hex sequences not supported", "\\", "");
+ break;
+ default:
+ // This catches '\'', '\"', '\?', etc.
+ // Also, things like '\C' mean the same thing as 'C'
+ // (after the above cases are filtered out).
+ ppToken->ival = ch;
+ break;
+ }
+ break;
+ default:
+ ppToken->ival = ch;
+ break;
+ }
+ ppToken->name[0] = (char)ppToken->ival;
+ ppToken->name[1] = '\0';
+ ch = getChar();
+ if (ch != '\'') {
+ parseContext.ppError(ppToken->loc, "expected", "\'", "");
+ // Look ahead for a closing '
+ do {
+ ch = getChar();
+ } while (ch != '\'' && ch != EndOfInput && ch != '\n');
+ }
+
+ return PpAtomConstInt;
+}
+
+//
+// Scanner used to tokenize source stream.
+//
+// N.B. Invalid numeric suffixes are not consumed.//
+// This is technically not correct, as the preprocessor should just
+// accept the numeric literal along with whatever suffix it has, but
+// currently, it stops on seeing a bad suffix, treating that as the
+// next token. This effects things like token pasting, where it is
+// relevant how many tokens something was broken into.
+// See peekContinuedPasting().
+//
+int TPpContext::tStringInput::scan(TPpToken* ppToken)
+{
+ int AlreadyComplained = 0;
+ int len = 0;
+ int ch = 0;
+ int ii = 0;
+ unsigned long long ival = 0;
+ const auto floatingPointChar = [&](int ch) { return ch == '.' || ch == 'e' || ch == 'E' ||
+ ch == 'f' || ch == 'F' ||
+ ch == 'h' || ch == 'H'; };
+
+ static const char* const Int64_Extensions[] = {
+ E_GL_ARB_gpu_shader_int64,
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int64 };
+ static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
+
+ static const char* const Int16_Extensions[] = {
+#ifdef AMD_EXTENSIONS
+ E_GL_AMD_gpu_shader_int16,
+#endif
+ E_GL_EXT_shader_explicit_arithmetic_types,
+ E_GL_EXT_shader_explicit_arithmetic_types_int16 };
+ static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
+
+ ppToken->ival = 0;
+ ppToken->i64val = 0;
+ ppToken->space = false;
+ ch = getch();
+ for (;;) {
+ while (ch == ' ' || ch == '\t') {
+ ppToken->space = true;
+ ch = getch();
+ }
+
+ ppToken->loc = pp->parseContext.getCurrentLoc();
+ len = 0;
+ switch (ch) {
+ default:
+ // Single character token, including EndOfInput, '#' and '\' (escaped newlines are handled at a lower level, so this is just a '\' token)
+ if (ch > PpAtomMaxSingle)
+ ch = PpAtomBadToken;
+ return ch;
+
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z': case '_':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ do {
+ if (len < MaxTokenLength) {
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ } else {
+ if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "name too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ }
+ } while ((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '_');
+
+ // line continuation with no token before or after makes len == 0, and need to start over skipping white space, etc.
+ if (len == 0)
+ continue;
+
+ ppToken->name[len] = '\0';
+ ungetch();
+ return PpAtomIdentifier;
+ case '0':
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ if (ch == 'x' || ch == 'X') {
+ // must be hexadecimal
+
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ ppToken->name[len++] = (char)ch;
+ ch = getch();
+ if ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f')) {
+
+ ival = 0;
+ do {
+ if (len < MaxTokenLength && ival <= 0x0fffffffffffffffull) {
+ ppToken->name[len++] = (char)ch;
+ if (ch >= '0' && ch <= '9') {
+ ii = ch - '0';
+ } else if (ch >= 'A' && ch <= 'F') {
+ ii = ch - 'A' + 10;
+ } else if (ch >= 'a' && ch <= 'f') {
+ ii = ch - 'a' + 10;
+ } else
+ pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", "");
+ ival = (ival << 4) | ii;
+ } else {
+ if (! AlreadyComplained) {
+ if(len < MaxTokenLength)
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", "");
+ else
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ival = 0xffffffffffffffffull;
+ }
+ ch = getch();
+ } while ((ch >= '0' && ch <= '9') ||
+ (ch >= 'A' && ch <= 'F') ||
+ (ch >= 'a' && ch <= 'f'));
+ } else {
+ pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", "");
+ }
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+ ppToken->name[len] = '\0';
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit hexadecimal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit hexadecimal literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0) {
+ if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit hexadecimal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit hexadecimal literal");
+ }
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ if (ival > 0xffffffffu && !AlreadyComplained)
+ pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", "");
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ } else {
+ // could be octal integer or floating point, speculative pursue octal until it must be floating point
+
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ bool octalOverflow = false;
+ bool nonOctal = false;
+ ival = 0;
+
+ // see how much octal-like stuff we can read
+ while (ch >= '0' && ch <= '7') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ if (ival <= 0x1fffffffffffffffull) {
+ ii = ch - '0';
+ ival = (ival << 3) | ii;
+ } else
+ octalOverflow = true;
+ ch = getch();
+ }
+
+ // could be part of a float...
+ if (ch == '8' || ch == '9') {
+ nonOctal = true;
+ do {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ } while (ch >= '0' && ch <= '9');
+ }
+ if (floatingPointChar(ch))
+ return pp->lFloatConst(len, ch, ppToken);
+
+ // wasn't a float, so must be octal...
+ if (nonOctal)
+ pp->parseContext.ppError(ppToken->loc, "octal literal digit too large", "", "");
+
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+ ppToken->name[len] = '\0';
+
+ if (!isInt64 && ival > 0xffffffffu)
+ octalOverflow = true;
+
+ if (octalOverflow)
+ pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", "");
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit octal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit octal literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0) {
+ if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit octal literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit octal literal");
+ }
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ }
+ break;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // can't be hexadecimal or octal, is either decimal or floating point
+
+ do {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ else if (! AlreadyComplained) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", "");
+ AlreadyComplained = 1;
+ }
+ ch = getch();
+ } while (ch >= '0' && ch <= '9');
+ if (floatingPointChar(ch))
+ return pp->lFloatConst(len, ch, ppToken);
+ else {
+ // Finish handling signed and unsigned integers
+ int numericLen = len;
+ bool isUnsigned = false;
+ bool isInt64 = false;
+ bool isInt16 = false;
+ if (ch == 'u' || ch == 'U') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isUnsigned = true;
+
+ int nextCh = getch();
+ if (nextCh == 'l' || nextCh == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt64 = true;
+ } else
+ ungetch();
+
+#ifdef AMD_EXTENSIONS
+ nextCh = getch();
+ if ((nextCh == 's' || nextCh == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)nextCh;
+ isInt16 = true;
+ } else
+ ungetch();
+#endif
+ } else if (ch == 'l' || ch == 'L') {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt64 = true;
+#ifdef AMD_EXTENSIONS
+ } else if ((ch == 's' || ch == 'S') &&
+ pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (len < MaxTokenLength)
+ ppToken->name[len++] = (char)ch;
+ isInt16 = true;
+#endif
+ } else
+ ungetch();
+
+ ppToken->name[len] = '\0';
+ ival = 0;
+ const unsigned oneTenthMaxInt = 0xFFFFFFFFu / 10;
+ const unsigned remainderMaxInt = 0xFFFFFFFFu - 10 * oneTenthMaxInt;
+ const unsigned long long oneTenthMaxInt64 = 0xFFFFFFFFFFFFFFFFull / 10;
+ const unsigned long long remainderMaxInt64 = 0xFFFFFFFFFFFFFFFFull - 10 * oneTenthMaxInt64;
+ const unsigned short oneTenthMaxInt16 = 0xFFFFu / 10;
+ const unsigned short remainderMaxInt16 = 0xFFFFu - 10 * oneTenthMaxInt16;
+ for (int i = 0; i < numericLen; i++) {
+ ch = ppToken->name[i] - '0';
+ bool overflow = false;
+ if (isInt64)
+ overflow = (ival > oneTenthMaxInt64 || (ival == oneTenthMaxInt64 && (unsigned long long)ch > remainderMaxInt64));
+ else if (isInt16)
+ overflow = (ival > oneTenthMaxInt16 || (ival == oneTenthMaxInt16 && (unsigned short)ch > remainderMaxInt16));
+ else
+ overflow = (ival > oneTenthMaxInt || (ival == oneTenthMaxInt && (unsigned)ch > remainderMaxInt));
+ if (overflow) {
+ pp->parseContext.ppError(ppToken->loc, "numeric literal too big", "", "");
+ ival = 0xFFFFFFFFFFFFFFFFull;
+ break;
+ } else
+ ival = ival * 10 + ch;
+ }
+
+ if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ if (pp->ifdepth == 0) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "64-bit literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int64_Extensions, Int64_Extensions, "64-bit literal");
+ }
+ ppToken->i64val = ival;
+ return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64;
+ } else if (isInt16) {
+ if (pp->ifdepth == 0 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
+ pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
+ "16-bit literal");
+ pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0,
+ Num_Int16_Extensions, Int16_Extensions, "16-bit literal");
+ }
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
+ } else {
+ ppToken->ival = (int)ival;
+ return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
+ }
+ }
+ break;
+ case '-':
+ ch = getch();
+ if (ch == '-') {
+ return PpAtomDecrement;
+ } else if (ch == '=') {
+ return PPAtomSubAssign;
+ } else {
+ ungetch();
+ return '-';
+ }
+ case '+':
+ ch = getch();
+ if (ch == '+') {
+ return PpAtomIncrement;
+ } else if (ch == '=') {
+ return PPAtomAddAssign;
+ } else {
+ ungetch();
+ return '+';
+ }
+ case '*':
+ ch = getch();
+ if (ch == '=') {
+ return PPAtomMulAssign;
+ } else {
+ ungetch();
+ return '*';
+ }
+ case '%':
+ ch = getch();
+ if (ch == '=') {
+ return PPAtomModAssign;
+ } else {
+ ungetch();
+ return '%';
+ }
+ case '^':
+ ch = getch();
+ if (ch == '^') {
+ return PpAtomXor;
+ } else {
+ if (ch == '=')
+ return PpAtomXorAssign;
+ else{
+ ungetch();
+ return '^';
+ }
+ }
+
+ case '=':
+ ch = getch();
+ if (ch == '=') {
+ return PpAtomEQ;
+ } else {
+ ungetch();
+ return '=';
+ }
+ case '!':
+ ch = getch();
+ if (ch == '=') {
+ return PpAtomNE;
+ } else {
+ ungetch();
+ return '!';
+ }
+ case '|':
+ ch = getch();
+ if (ch == '|') {
+ return PpAtomOr;
+ } else if (ch == '=') {
+ return PpAtomOrAssign;
+ } else {
+ ungetch();
+ return '|';
+ }
+ case '&':
+ ch = getch();
+ if (ch == '&') {
+ return PpAtomAnd;
+ } else if (ch == '=') {
+ return PpAtomAndAssign;
+ } else {
+ ungetch();
+ return '&';
+ }
+ case '<':
+ ch = getch();
+ if (ch == '<') {
+ ch = getch();
+ if (ch == '=')
+ return PpAtomLeftAssign;
+ else {
+ ungetch();
+ return PpAtomLeft;
+ }
+ } else if (ch == '=') {
+ return PpAtomLE;
+ } else {
+ ungetch();
+ return '<';
+ }
+ case '>':
+ ch = getch();
+ if (ch == '>') {
+ ch = getch();
+ if (ch == '=')
+ return PpAtomRightAssign;
+ else {
+ ungetch();
+ return PpAtomRight;
+ }
+ } else if (ch == '=') {
+ return PpAtomGE;
+ } else {
+ ungetch();
+ return '>';
+ }
+ case '.':
+ ch = getch();
+ if (ch >= '0' && ch <= '9') {
+ ungetch();
+ return pp->lFloatConst(0, '.', ppToken);
+ } else {
+ ungetch();
+ return '.';
+ }
+ case '/':
+ ch = getch();
+ if (ch == '/') {
+ pp->inComment = true;
+ do {
+ ch = getch();
+ } while (ch != '\n' && ch != EndOfInput);
+ ppToken->space = true;
+ pp->inComment = false;
+
+ return ch;
+ } else if (ch == '*') {
+ ch = getch();
+ do {
+ while (ch != '*') {
+ if (ch == EndOfInput) {
+ pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", "");
+ return ch;
+ }
+ ch = getch();
+ }
+ ch = getch();
+ if (ch == EndOfInput) {
+ pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", "");
+ return ch;
+ }
+ } while (ch != '/');
+ ppToken->space = true;
+ // loop again to get the next token...
+ break;
+ } else if (ch == '=') {
+ return PPAtomDivAssign;
+ } else {
+ ungetch();
+ return '/';
+ }
+ break;
+ case '\'':
+ return pp->characterLiteral(ppToken);
+ case '"':
+ // TODO: If this gets enhanced to handle escape sequences, or
+ // anything that is different than what #include needs, then
+ // #include needs to use scanHeaderName() for this.
+ ch = getch();
+ while (ch != '"' && ch != '\n' && ch != EndOfInput) {
+ if (len < MaxTokenLength) {
+ ppToken->name[len] = (char)ch;
+ len++;
+ ch = getch();
+ } else
+ break;
+ };
+ ppToken->name[len] = '\0';
+ if (ch != '"') {
+ ungetch();
+ pp->parseContext.ppError(ppToken->loc, "End of line in string", "string", "");
+ }
+ return PpAtomConstString;
+ case ':':
+ ch = getch();
+ if (ch == ':')
+ return PpAtomColonColon;
+ ungetch();
+ return ':';
+ }
+
+ ch = getch();
+ }
+}
+
+//
+// The main functional entry point into the preprocessor, which will
+// scan the source strings to figure out and return the next processing token.
+//
+// Return the token, or EndOfInput when no more tokens.
+//
+int TPpContext::tokenize(TPpToken& ppToken)
+{
+ for(;;) {
+ int token = scanToken(&ppToken);
+
+ // Handle token-pasting logic
+ token = tokenPaste(token, ppToken);
+
+ if (token == EndOfInput) {
+ missingEndifCheck();
+ return EndOfInput;
+ }
+ if (token == '#') {
+ if (previous_token == '\n') {
+ token = readCPPline(&ppToken);
+ if (token == EndOfInput) {
+ missingEndifCheck();
+ return EndOfInput;
+ }
+ continue;
+ } else {
+ parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", "");
+ return EndOfInput;
+ }
+ }
+ previous_token = token;
+
+ if (token == '\n')
+ continue;
+
+ // expand macros
+ if (token == PpAtomIdentifier) {
+ switch (MacroExpand(&ppToken, false, true)) {
+ case MacroExpandNotStarted:
+ break;
+ case MacroExpandError:
+ return EndOfInput;
+ case MacroExpandStarted:
+ case MacroExpandUndef:
+ continue;
+ }
+ }
+
+ switch (token) {
+ case PpAtomIdentifier:
+ case PpAtomConstInt:
+ case PpAtomConstUint:
+ case PpAtomConstFloat:
+ case PpAtomConstInt64:
+ case PpAtomConstUint64:
+ case PpAtomConstInt16:
+ case PpAtomConstUint16:
+ case PpAtomConstDouble:
+ case PpAtomConstFloat16:
+ if (ppToken.name[0] == '\0')
+ continue;
+ break;
+ case PpAtomConstString:
+ if (ifdepth == 0 && parseContext.intermediate.getSource() != EShSourceHlsl) {
+ // HLSL allows string literals.
+ parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", "");
+ continue;
+ }
+ break;
+ case '\'':
+ parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
+ continue;
+ default:
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token));
+ break;
+ }
+
+ return token;
+ }
+}
+
+//
+// Do all token-pasting related combining of two pasted tokens when getting a
+// stream of tokens from a replacement list. Degenerates to no processing if a
+// replacement list is not the source of the token stream.
+//
+int TPpContext::tokenPaste(int token, TPpToken& ppToken)
+{
+ // starting with ## is illegal, skip to next token
+ if (token == PpAtomPaste) {
+ parseContext.ppError(ppToken.loc, "unexpected location", "##", "");
+ return scanToken(&ppToken);
+ }
+
+ int resultToken = token; // "foo" pasted with "35" is an identifier, not a number
+
+ // ## can be chained, process all in the chain at once
+ while (peekPasting()) {
+ TPpToken pastedPpToken;
+
+ // next token has to be ##
+ token = scanToken(&pastedPpToken);
+ assert(token == PpAtomPaste);
+
+ // This covers end of macro expansion
+ if (endOfReplacementList()) {
+ parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", "");
+ break;
+ }
+
+ // Get the token(s) after the ##.
+ // Because of "space" semantics, and prior tokenization, what
+ // appeared a single token, e.g. "3A", might have been tokenized
+ // into two tokens "3" and "A", but the "A" will have 'space' set to
+ // false. Accumulate all of these to recreate the original lexical
+ // appearing token.
+ do {
+ token = scanToken(&pastedPpToken);
+
+ // This covers end of argument expansion
+ if (token == tMarkerInput::marker) {
+ parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
+ return resultToken;
+ }
+
+ // get the token text
+ switch (resultToken) {
+ case PpAtomIdentifier:
+ // already have the correct text in token.names
+ break;
+ case '=':
+ case '!':
+ case '-':
+ case '~':
+ case '+':
+ case '*':
+ case '/':
+ case '%':
+ case '<':
+ case '>':
+ case '|':
+ case '^':
+ case '&':
+ case PpAtomRight:
+ case PpAtomLeft:
+ case PpAtomAnd:
+ case PpAtomOr:
+ case PpAtomXor:
+ snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
+ snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
+ break;
+ default:
+ parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
+ return resultToken;
+ }
+
+ // combine the tokens
+ if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
+ parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
+ return resultToken;
+ }
+ snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
+ "%s", pastedPpToken.name);
+
+ // correct the kind of token we are making, if needed (identifiers stay identifiers)
+ if (resultToken != PpAtomIdentifier) {
+ int newToken = atomStrings.getAtom(ppToken.name);
+ if (newToken > 0)
+ resultToken = newToken;
+ else
+ parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
+ }
+ } while (peekContinuedPasting(resultToken));
+ }
+
+ return resultToken;
+}
+
+// Checks if we've seen balanced #if...#endif
+void TPpContext::missingEndifCheck()
+{
+ if (ifdepth > 0)
+ parseContext.ppError(parseContext.getCurrentLoc(), "missing #endif", "", "");
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp
new file mode 100644
index 0000000..ac9d8ac
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp
@@ -0,0 +1,219 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+//
+// For recording and playing back the stream of tokens in a macro definition.
+//
+
+#ifndef _CRT_SECURE_NO_WARNINGS
+#define _CRT_SECURE_NO_WARNINGS
+#endif
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
+#define snprintf sprintf_s
+#endif
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+
+#include "PpContext.h"
+#include "PpTokens.h"
+
+namespace glslang {
+
+// Add a token (including backing string) to the end of a macro
+// token stream, for later playback.
+void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken)
+{
+ TokenStream::Token streamToken(atom, *ppToken);
+ stream.push_back(streamToken);
+}
+
+// Read the next token from a macro token stream.
+int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
+{
+ if (atEnd())
+ return EndOfInput;
+
+ int atom = stream[currentPos++].get(*ppToken);
+ ppToken->loc = parseContext.getCurrentLoc();
+
+ // Check for ##, unless the current # is the last character
+ if (atom == '#') {
+ if (peekToken('#')) {
+ parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
+ parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
+ currentPos++;
+ atom = PpAtomPaste;
+ }
+ }
+
+ return atom;
+}
+
+// We are pasting if
+// 1. we are preceding a pasting operator within this stream
+// or
+// 2. the entire macro is preceding a pasting operator (lastTokenPastes)
+// and we are also on the last token
+bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
+{
+ // 1. preceding ##?
+
+ size_t savePos = currentPos;
+ // skip white space
+ while (peekToken(' '))
+ ++currentPos;
+ if (peekToken(PpAtomPaste)) {
+ currentPos = savePos;
+ return true;
+ }
+
+ // 2. last token and we've been told after this there will be a ##
+
+ if (! lastTokenPastes)
+ return false;
+ // Getting here means the last token will be pasted, after this
+
+ // Are we at the last non-whitespace token?
+ savePos = currentPos;
+ bool moreTokens = false;
+ do {
+ if (atEnd())
+ break;
+ if (!peekToken(' ')) {
+ moreTokens = true;
+ break;
+ }
+ ++currentPos;
+ } while (true);
+ currentPos = savePos;
+
+ return !moreTokens;
+}
+
+// See if the next non-white-space tokens are two consecutive #
+bool TPpContext::TokenStream::peekUntokenizedPasting()
+{
+ // don't return early, have to restore this
+ size_t savePos = currentPos;
+
+ // skip white-space
+ while (peekToken(' '))
+ ++currentPos;
+
+ // check for ##
+ bool pasting = false;
+ if (peekToken('#')) {
+ ++currentPos;
+ if (peekToken('#'))
+ pasting = true;
+ }
+
+ currentPos = savePos;
+
+ return pasting;
+}
+
+void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting)
+{
+ pushInput(new tTokenInput(this, &ts, prepasting));
+ ts.reset();
+}
+
+int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken)
+{
+ if (done)
+ return EndOfInput;
+
+ int ret = token;
+ *ppToken = lval;
+ done = true;
+
+ return ret;
+}
+
+void TPpContext::UngetToken(int token, TPpToken* ppToken)
+{
+ pushInput(new tUngotTokenInput(this, token, ppToken));
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h
new file mode 100644
index 0000000..7b0f815
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h
@@ -0,0 +1,179 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+/****************************************************************************\
+Copyright (c) 2002, NVIDIA Corporation.
+
+NVIDIA Corporation("NVIDIA") supplies this software to you in
+consideration of your agreement to the following terms, and your use,
+installation, modification or redistribution of this NVIDIA software
+constitutes acceptance of these terms. If you do not agree with these
+terms, please do not use, install, modify or redistribute this NVIDIA
+software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, NVIDIA grants you a personal, non-exclusive
+license, under NVIDIA's copyrights in this original NVIDIA software (the
+"NVIDIA Software"), to use, reproduce, modify and redistribute the
+NVIDIA Software, with or without modifications, in source and/or binary
+forms; provided that if you redistribute the NVIDIA Software, you must
+retain the copyright notice of NVIDIA, this notice and the following
+text and disclaimers in all such redistributions of the NVIDIA Software.
+Neither the name, trademarks, service marks nor logos of NVIDIA
+Corporation may be used to endorse or promote products derived from the
+NVIDIA Software without specific prior written permission from NVIDIA.
+Except as expressly stated in this notice, no other rights or licenses
+express or implied, are granted by NVIDIA herein, including but not
+limited to any patent rights that may be infringed by your derivative
+works or by other works in which the NVIDIA Software may be
+incorporated. No hardware is licensed hereunder.
+
+THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
+INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
+ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
+PRODUCTS.
+
+IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
+INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
+OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
+NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
+TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
+NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+\****************************************************************************/
+
+#ifndef PARSER_H
+#define PARSER_H
+
+namespace glslang {
+
+// Multi-character tokens
+enum EFixedAtoms {
+ // single character tokens get their own char value as their token; start here for multi-character tokens
+ PpAtomMaxSingle = 127,
+
+ // replace bad character tokens with this, to avoid accidental aliasing with the below
+ PpAtomBadToken,
+
+ // Operators
+
+ PPAtomAddAssign,
+ PPAtomSubAssign,
+ PPAtomMulAssign,
+ PPAtomDivAssign,
+ PPAtomModAssign,
+
+ PpAtomRight,
+ PpAtomLeft,
+
+ PpAtomRightAssign,
+ PpAtomLeftAssign,
+ PpAtomAndAssign,
+ PpAtomOrAssign,
+ PpAtomXorAssign,
+
+ PpAtomAnd,
+ PpAtomOr,
+ PpAtomXor,
+
+ PpAtomEQ,
+ PpAtomNE,
+ PpAtomGE,
+ PpAtomLE,
+
+ PpAtomDecrement,
+ PpAtomIncrement,
+
+ PpAtomColonColon,
+
+ PpAtomPaste,
+
+ // Constants
+
+ PpAtomConstInt,
+ PpAtomConstUint,
+ PpAtomConstInt64,
+ PpAtomConstUint64,
+ PpAtomConstInt16,
+ PpAtomConstUint16,
+ PpAtomConstFloat,
+ PpAtomConstDouble,
+ PpAtomConstFloat16,
+ PpAtomConstString,
+
+ // Identifiers
+ PpAtomIdentifier,
+
+ // preprocessor "keywords"
+
+ PpAtomDefine,
+ PpAtomUndef,
+
+ PpAtomIf,
+ PpAtomIfdef,
+ PpAtomIfndef,
+ PpAtomElse,
+ PpAtomElif,
+ PpAtomEndif,
+
+ PpAtomLine,
+ PpAtomPragma,
+ PpAtomError,
+
+ // #version ...
+ PpAtomVersion,
+ PpAtomCore,
+ PpAtomCompatibility,
+ PpAtomEs,
+
+ // #extension
+ PpAtomExtension,
+
+ // __LINE__, __FILE__, __VERSION__
+
+ PpAtomLineMacro,
+ PpAtomFileMacro,
+ PpAtomVersionMacro,
+
+ // #include
+ PpAtomInclude,
+
+ PpAtomLast,
+};
+
+} // end namespace glslang
+
+#endif /* not PARSER_H */
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp
new file mode 100644
index 0000000..ae95688
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.cpp
@@ -0,0 +1,866 @@
+//
+// Copyright (C) 2015-2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// propagate the 'noContraction' qualifier.
+//
+
+#include "propagateNoContraction.h"
+
+#include <cstdlib>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "localintermediate.h"
+namespace {
+
+// Use a string to hold the access chain information, as in most cases the
+// access chain is short and may contain only one element, which is the symbol
+// ID.
+// Example: struct {float a; float b;} s;
+// Object s.a will be represented with: <symbol ID of s>/0
+// Object s.b will be represented with: <symbol ID of s>/1
+// Object s will be represented with: <symbol ID of s>
+// For members of vector, matrix and arrays, they will be represented with the
+// same symbol ID of their container symbol objects. This is because their
+// preciseness is always the same as their container symbol objects.
+typedef std::string ObjectAccessChain;
+
+// The delimiter used in the ObjectAccessChain string to separate symbol ID and
+// different level of struct indices.
+const char ObjectAccesschainDelimiter = '/';
+
+// Mapping from Symbol IDs of symbol nodes, to their defining operation
+// nodes.
+typedef std::unordered_multimap<ObjectAccessChain, glslang::TIntermOperator*> NodeMapping;
+// Mapping from object nodes to their access chain info string.
+typedef std::unordered_map<glslang::TIntermTyped*, ObjectAccessChain> AccessChainMapping;
+
+// Set of object IDs.
+typedef std::unordered_set<ObjectAccessChain> ObjectAccesschainSet;
+// Set of return branch nodes.
+typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
+
+// A helper function to tell whether a node is 'noContraction'. Returns true if
+// the node has 'noContraction' qualifier, otherwise false.
+bool isPreciseObjectNode(glslang::TIntermTyped* node)
+{
+ return node->getType().getQualifier().noContraction;
+}
+
+// Returns true if the opcode is a dereferencing one.
+bool isDereferenceOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpIndexDirect:
+ case glslang::EOpIndexDirectStruct:
+ case glslang::EOpIndexIndirect:
+ case glslang::EOpVectorSwizzle:
+ case glslang::EOpMatrixSwizzle:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Returns true if the opcode leads to an assignment operation.
+bool isAssignOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpAssign:
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+ case glslang::EOpAndAssign:
+ case glslang::EOpLeftShiftAssign:
+ case glslang::EOpRightShiftAssign:
+ case glslang::EOpInclusiveOrAssign:
+ case glslang::EOpExclusiveOrAssign:
+
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A helper function to get the unsigned int from a given constant union node.
+// Note the node should only hold a uint scalar.
+unsigned getStructIndexFromConstantUnion(glslang::TIntermTyped* node)
+{
+ assert(node->getAsConstantUnion() && node->getAsConstantUnion()->isScalar());
+ unsigned struct_dereference_index = node->getAsConstantUnion()->getConstArray()[0].getUConst();
+ return struct_dereference_index;
+}
+
+// A helper function to generate symbol_label.
+ObjectAccessChain generateSymbolLabel(glslang::TIntermSymbol* node)
+{
+ ObjectAccessChain symbol_id =
+ std::to_string(node->getId()) + "(" + node->getName().c_str() + ")";
+ return symbol_id;
+}
+
+// Returns true if the operation is an arithmetic operation and valid for
+// the 'NoContraction' decoration.
+bool isArithmeticOperation(glslang::TOperator op)
+{
+ switch (op) {
+ case glslang::EOpAddAssign:
+ case glslang::EOpSubAssign:
+ case glslang::EOpMulAssign:
+ case glslang::EOpVectorTimesMatrixAssign:
+ case glslang::EOpVectorTimesScalarAssign:
+ case glslang::EOpMatrixTimesScalarAssign:
+ case glslang::EOpMatrixTimesMatrixAssign:
+ case glslang::EOpDivAssign:
+ case glslang::EOpModAssign:
+
+ case glslang::EOpNegative:
+
+ case glslang::EOpAdd:
+ case glslang::EOpSub:
+ case glslang::EOpMul:
+ case glslang::EOpDiv:
+ case glslang::EOpMod:
+
+ case glslang::EOpVectorTimesScalar:
+ case glslang::EOpVectorTimesMatrix:
+ case glslang::EOpMatrixTimesVector:
+ case glslang::EOpMatrixTimesScalar:
+ case glslang::EOpMatrixTimesMatrix:
+
+ case glslang::EOpDot:
+
+ case glslang::EOpPostIncrement:
+ case glslang::EOpPostDecrement:
+ case glslang::EOpPreIncrement:
+ case glslang::EOpPreDecrement:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// A helper class to help manage the populating_initial_no_contraction_ flag.
+template <typename T> class StateSettingGuard {
+public:
+ StateSettingGuard(T* state_ptr, T new_state_value)
+ : state_ptr_(state_ptr), previous_state_(*state_ptr)
+ {
+ *state_ptr = new_state_value;
+ }
+ StateSettingGuard(T* state_ptr) : state_ptr_(state_ptr), previous_state_(*state_ptr) {}
+ void setState(T new_state_value) { *state_ptr_ = new_state_value; }
+ ~StateSettingGuard() { *state_ptr_ = previous_state_; }
+
+private:
+ T* state_ptr_;
+ T previous_state_;
+};
+
+// A helper function to get the front element from a given ObjectAccessChain
+ObjectAccessChain getFrontElement(const ObjectAccessChain& chain)
+{
+ size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
+ return pos_delimiter == std::string::npos ? chain : chain.substr(0, pos_delimiter);
+}
+
+// A helper function to get the access chain starting from the second element.
+ObjectAccessChain subAccessChainFromSecondElement(const ObjectAccessChain& chain)
+{
+ size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
+ return pos_delimiter == std::string::npos ? "" : chain.substr(pos_delimiter + 1);
+}
+
+// A helper function to get the access chain after removing a given prefix.
+ObjectAccessChain getSubAccessChainAfterPrefix(const ObjectAccessChain& chain,
+ const ObjectAccessChain& prefix)
+{
+ size_t pos = chain.find(prefix);
+ if (pos != 0)
+ return chain;
+ return chain.substr(prefix.length() + sizeof(ObjectAccesschainDelimiter));
+}
+
+//
+// A traverser which traverses the whole AST and populates:
+// 1) A mapping from symbol nodes' IDs to their defining operation nodes.
+// 2) A set of access chains of the initial precise object nodes.
+//
+class TSymbolDefinitionCollectingTraverser : public glslang::TIntermTraverser {
+public:
+ TSymbolDefinitionCollectingTraverser(NodeMapping* symbol_definition_mapping,
+ AccessChainMapping* accesschain_mapping,
+ ObjectAccesschainSet* precise_objects,
+ ReturnBranchNodeSet* precise_return_nodes);
+
+ bool visitUnary(glslang::TVisit, glslang::TIntermUnary*) override;
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary*) override;
+ void visitSymbol(glslang::TIntermSymbol*) override;
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*) override;
+ bool visitBranch(glslang::TVisit, glslang::TIntermBranch*) override;
+
+protected:
+ TSymbolDefinitionCollectingTraverser& operator=(const TSymbolDefinitionCollectingTraverser&);
+
+ // The mapping from symbol node IDs to their defining nodes. This should be
+ // populated along traversing the AST.
+ NodeMapping& symbol_definition_mapping_;
+ // The set of symbol node IDs for precise symbol nodes, the ones marked as
+ // 'noContraction'.
+ ObjectAccesschainSet& precise_objects_;
+ // The set of precise return nodes.
+ ReturnBranchNodeSet& precise_return_nodes_;
+ // A temporary cache of the symbol node whose defining node is to be found
+ // currently along traversing the AST.
+ ObjectAccessChain current_object_;
+ // A map from object node to its access chain. This traverser stores
+ // the built access chains into this map for each object node it has
+ // visited.
+ AccessChainMapping& accesschain_mapping_;
+ // The pointer to the Function Definition node, so we can get the
+ // preciseness of the return expression from it when we traverse the
+ // return branch node.
+ glslang::TIntermAggregate* current_function_definition_node_;
+};
+
+TSymbolDefinitionCollectingTraverser::TSymbolDefinitionCollectingTraverser(
+ NodeMapping* symbol_definition_mapping, AccessChainMapping* accesschain_mapping,
+ ObjectAccesschainSet* precise_objects,
+ std::unordered_set<glslang::TIntermBranch*>* precise_return_nodes)
+ : TIntermTraverser(true, false, false), symbol_definition_mapping_(*symbol_definition_mapping),
+ precise_objects_(*precise_objects), precise_return_nodes_(*precise_return_nodes),
+ current_object_(), accesschain_mapping_(*accesschain_mapping),
+ current_function_definition_node_(nullptr) {}
+
+// Visits a symbol node, set the current_object_ to the
+// current node symbol ID, and record a mapping from this node to the current
+// current_object_, which is the just obtained symbol
+// ID.
+void TSymbolDefinitionCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node)
+{
+ current_object_ = generateSymbolLabel(node);
+ accesschain_mapping_[node] = current_object_;
+}
+
+// Visits an aggregate node, traverses all of its children.
+bool TSymbolDefinitionCollectingTraverser::visitAggregate(glslang::TVisit,
+ glslang::TIntermAggregate* node)
+{
+ // This aggregate node might be a function definition node, in which case we need to
+ // cache this node, so we can get the preciseness information of the return value
+ // of this function later.
+ StateSettingGuard<glslang::TIntermAggregate*> current_function_definition_node_setting_guard(
+ &current_function_definition_node_);
+ if (node->getOp() == glslang::EOpFunction) {
+ // This is function definition node, we need to cache this node so that we can
+ // get the preciseness of the return value later.
+ current_function_definition_node_setting_guard.setState(node);
+ }
+ // Traverse the items in the sequence.
+ glslang::TIntermSequence& seq = node->getSequence();
+ for (int i = 0; i < (int)seq.size(); ++i) {
+ current_object_.clear();
+ seq[i]->traverse(this);
+ }
+ return false;
+}
+
+bool TSymbolDefinitionCollectingTraverser::visitBranch(glslang::TVisit,
+ glslang::TIntermBranch* node)
+{
+ if (node->getFlowOp() == glslang::EOpReturn && node->getExpression() &&
+ current_function_definition_node_ &&
+ current_function_definition_node_->getType().getQualifier().noContraction) {
+ // This node is a return node with an expression, and its function has a
+ // precise return value. We need to find the involved objects in its
+ // expression and add them to the set of initial precise objects.
+ precise_return_nodes_.insert(node);
+ node->getExpression()->traverse(this);
+ }
+ return false;
+}
+
+// Visits a unary node. This might be an implicit assignment like i++, i--. etc.
+bool TSymbolDefinitionCollectingTraverser::visitUnary(glslang::TVisit /* visit */,
+ glslang::TIntermUnary* node)
+{
+ current_object_.clear();
+ node->getOperand()->traverse(this);
+ if (isAssignOperation(node->getOp())) {
+ // We should always be able to get an access chain of the operand node.
+ assert(!current_object_.empty());
+
+ // If the operand node object is 'precise', we collect its access chain
+ // for the initial set of 'precise' objects.
+ if (isPreciseObjectNode(node->getOperand())) {
+ // The operand node is an 'precise' object node, add its
+ // access chain to the set of 'precise' objects. This is to collect
+ // the initial set of 'precise' objects.
+ precise_objects_.insert(current_object_);
+ }
+ // Gets the symbol ID from the object's access chain.
+ ObjectAccessChain id_symbol = getFrontElement(current_object_);
+ // Add a mapping from the symbol ID to this assignment operation node.
+ symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
+ }
+ // A unary node is not a dereference node, so we clear the access chain which
+ // is under construction.
+ current_object_.clear();
+ return false;
+}
+
+// Visits a binary node and updates the mapping from symbol IDs to the definition
+// nodes. Also collects the access chains for the initial precise objects.
+bool TSymbolDefinitionCollectingTraverser::visitBinary(glslang::TVisit /* visit */,
+ glslang::TIntermBinary* node)
+{
+ // Traverses the left node to build the access chain info for the object.
+ current_object_.clear();
+ node->getLeft()->traverse(this);
+
+ if (isAssignOperation(node->getOp())) {
+ // We should always be able to get an access chain for the left node.
+ assert(!current_object_.empty());
+
+ // If the left node object is 'precise', it is an initial precise object
+ // specified in the shader source. Adds it to the initial work list to
+ // process later.
+ if (isPreciseObjectNode(node->getLeft())) {
+ // The left node is an 'precise' object node, add its access chain to
+ // the set of 'precise' objects. This is to collect the initial set
+ // of 'precise' objects.
+ precise_objects_.insert(current_object_);
+ }
+ // Gets the symbol ID from the object access chain, which should be the
+ // first element recorded in the access chain.
+ ObjectAccessChain id_symbol = getFrontElement(current_object_);
+ // Adds a mapping from the symbol ID to this assignment operation node.
+ symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
+
+ // Traverses the right node, there may be other 'assignment'
+ // operations in the right.
+ current_object_.clear();
+ node->getRight()->traverse(this);
+
+ } else if (isDereferenceOperation(node->getOp())) {
+ // The left node (parent node) is a struct type object. We need to
+ // record the access chain information of the current node into its
+ // object id.
+ if (node->getOp() == glslang::EOpIndexDirectStruct) {
+ unsigned struct_dereference_index = getStructIndexFromConstantUnion(node->getRight());
+ current_object_.push_back(ObjectAccesschainDelimiter);
+ current_object_.append(std::to_string(struct_dereference_index));
+ }
+ accesschain_mapping_[node] = current_object_;
+
+ // For a dereference node, there is no need to traverse the right child
+ // node as the right node should always be an integer type object.
+
+ } else {
+ // For other binary nodes, still traverse the right node.
+ current_object_.clear();
+ node->getRight()->traverse(this);
+ }
+ return false;
+}
+
+// Traverses the AST and returns a tuple of four members:
+// 1) a mapping from symbol IDs to the definition nodes (aka. assignment nodes) of these symbols.
+// 2) a mapping from object nodes in the AST to the access chains of these objects.
+// 3) a set of access chains of precise objects.
+// 4) a set of return nodes with precise expressions.
+std::tuple<NodeMapping, AccessChainMapping, ObjectAccesschainSet, ReturnBranchNodeSet>
+getSymbolToDefinitionMappingAndPreciseSymbolIDs(const glslang::TIntermediate& intermediate)
+{
+ auto result_tuple = std::make_tuple(NodeMapping(), AccessChainMapping(), ObjectAccesschainSet(),
+ ReturnBranchNodeSet());
+
+ TIntermNode* root = intermediate.getTreeRoot();
+ if (root == 0)
+ return result_tuple;
+
+ NodeMapping& symbol_definition_mapping = std::get<0>(result_tuple);
+ AccessChainMapping& accesschain_mapping = std::get<1>(result_tuple);
+ ObjectAccesschainSet& precise_objects = std::get<2>(result_tuple);
+ ReturnBranchNodeSet& precise_return_nodes = std::get<3>(result_tuple);
+
+ // Traverses the AST and populate the results.
+ TSymbolDefinitionCollectingTraverser collector(&symbol_definition_mapping, &accesschain_mapping,
+ &precise_objects, &precise_return_nodes);
+ root->traverse(&collector);
+
+ return result_tuple;
+}
+
+//
+// A traverser that determine whether the left node (or operand node for unary
+// node) of an assignment node is 'precise', containing 'precise' or not,
+// according to the access chain a given precise object which share the same
+// symbol as the left node.
+//
+// Post-orderly traverses the left node subtree of an binary assignment node and:
+//
+// 1) Propagates the 'precise' from the left object nodes to this object node.
+//
+// 2) Builds object access chain along the traversal, and also compares with
+// the access chain of the given 'precise' object along with the traversal to
+// tell if the node to be defined is 'precise' or not.
+//
+class TNoContractionAssigneeCheckingTraverser : public glslang::TIntermTraverser {
+
+ enum DecisionStatus {
+ // The object node to be assigned to may contain 'precise' objects and also not 'precise' objects.
+ Mixed = 0,
+ // The object node to be assigned to is either a 'precise' object or a struct objects whose members are all 'precise'.
+ Precise = 1,
+ // The object node to be assigned to is not a 'precise' object.
+ NotPreicse = 2,
+ };
+
+public:
+ TNoContractionAssigneeCheckingTraverser(const AccessChainMapping& accesschain_mapping)
+ : TIntermTraverser(true, false, false), accesschain_mapping_(accesschain_mapping),
+ precise_object_(nullptr) {}
+
+ // Checks the preciseness of a given assignment node with a precise object
+ // represented as access chain. The precise object shares the same symbol
+ // with the assignee of the given assignment node. Return a tuple of two:
+ //
+ // 1) The preciseness of the assignee node of this assignment node. True
+ // if the assignee contains 'precise' objects or is 'precise', false if
+ // the assignee is not 'precise' according to the access chain of the given
+ // precise object.
+ //
+ // 2) The incremental access chain from the assignee node to its nested
+ // 'precise' object, according to the access chain of the given precise
+ // object. This incremental access chain can be empty, which means the
+ // assignee is 'precise'. Otherwise it shows the path to the nested
+ // precise object.
+ std::tuple<bool, ObjectAccessChain>
+ getPrecisenessAndRemainedAccessChain(glslang::TIntermOperator* node,
+ const ObjectAccessChain& precise_object)
+ {
+ assert(isAssignOperation(node->getOp()));
+ precise_object_ = &precise_object;
+ ObjectAccessChain assignee_object;
+ if (glslang::TIntermBinary* BN = node->getAsBinaryNode()) {
+ // This is a binary assignment node, we need to check the
+ // preciseness of the left node.
+ assert(accesschain_mapping_.count(BN->getLeft()));
+ // The left node (assignee node) is an object node, traverse the
+ // node to let the 'precise' of nesting objects being transfered to
+ // nested objects.
+ BN->getLeft()->traverse(this);
+ // After traversing the left node, if the left node is 'precise',
+ // we can conclude this assignment should propagate 'precise'.
+ if (isPreciseObjectNode(BN->getLeft())) {
+ return make_tuple(true, ObjectAccessChain());
+ }
+ // If the preciseness of the left node (assignee node) can not
+ // be determined by now, we need to compare the access chain string
+ // of the assignee object with the given precise object.
+ assignee_object = accesschain_mapping_.at(BN->getLeft());
+
+ } else if (glslang::TIntermUnary* UN = node->getAsUnaryNode()) {
+ // This is a unary assignment node, we need to check the
+ // preciseness of the operand node. For unary assignment node, the
+ // operand node should always be an object node.
+ assert(accesschain_mapping_.count(UN->getOperand()));
+ // Traverse the operand node to let the 'precise' being propagated
+ // from lower nodes to upper nodes.
+ UN->getOperand()->traverse(this);
+ // After traversing the operand node, if the operand node is
+ // 'precise', this assignment should propagate 'precise'.
+ if (isPreciseObjectNode(UN->getOperand())) {
+ return make_tuple(true, ObjectAccessChain());
+ }
+ // If the preciseness of the operand node (assignee node) can not
+ // be determined by now, we need to compare the access chain string
+ // of the assignee object with the given precise object.
+ assignee_object = accesschain_mapping_.at(UN->getOperand());
+ } else {
+ // Not a binary or unary node, should not happen.
+ assert(false);
+ }
+
+ // Compare the access chain string of the assignee node with the given
+ // precise object to determine if this assignment should propagate
+ // 'precise'.
+ if (assignee_object.find(precise_object) == 0) {
+ // The access chain string of the given precise object is a prefix
+ // of assignee's access chain string. The assignee should be
+ // 'precise'.
+ return make_tuple(true, ObjectAccessChain());
+ } else if (precise_object.find(assignee_object) == 0) {
+ // The assignee's access chain string is a prefix of the given
+ // precise object, the assignee object contains 'precise' object,
+ // and we need to pass the remained access chain to the object nodes
+ // in the right.
+ return make_tuple(true, getSubAccessChainAfterPrefix(precise_object, assignee_object));
+ } else {
+ // The access chain strings do not match, the assignee object can
+ // not be labeled as 'precise' according to the given precise
+ // object.
+ return make_tuple(false, ObjectAccessChain());
+ }
+ }
+
+protected:
+ TNoContractionAssigneeCheckingTraverser& operator=(const TNoContractionAssigneeCheckingTraverser&);
+
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override;
+ void visitSymbol(glslang::TIntermSymbol* node) override;
+
+ // A map from object nodes to their access chain string (used as object ID).
+ const AccessChainMapping& accesschain_mapping_;
+ // A given precise object, represented in it access chain string. This
+ // precise object is used to be compared with the assignee node to tell if
+ // the assignee node is 'precise', contains 'precise' object or not
+ // 'precise'.
+ const ObjectAccessChain* precise_object_;
+};
+
+// Visits a binary node. If the node is an object node, it must be a dereference
+// node. In such cases, if the left node is 'precise', this node should also be
+// 'precise'.
+bool TNoContractionAssigneeCheckingTraverser::visitBinary(glslang::TVisit,
+ glslang::TIntermBinary* node)
+{
+ // Traverses the left so that we transfer the 'precise' from nesting object
+ // to its nested object.
+ node->getLeft()->traverse(this);
+ // If this binary node is an object node, we should have it in the
+ // accesschain_mapping_.
+ if (accesschain_mapping_.count(node)) {
+ // A binary object node must be a dereference node.
+ assert(isDereferenceOperation(node->getOp()));
+ // If the left node is 'precise', this node should also be precise,
+ // otherwise, compare with the given precise_object_. If the
+ // access chain of this node matches with the given precise_object_,
+ // this node should be marked as 'precise'.
+ if (isPreciseObjectNode(node->getLeft())) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else if (accesschain_mapping_.at(node) == *precise_object_) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ }
+ return false;
+}
+
+// Visits a symbol node, if the symbol node ID (its access chain string) matches
+// with the given precise object, this node should be 'precise'.
+void TNoContractionAssigneeCheckingTraverser::visitSymbol(glslang::TIntermSymbol* node)
+{
+ // A symbol node should always be an object node, and should have been added
+ // to the map from object nodes to their access chain strings.
+ assert(accesschain_mapping_.count(node));
+ if (accesschain_mapping_.at(node) == *precise_object_) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+}
+
+//
+// A traverser that only traverses the right side of binary assignment nodes
+// and the operand node of unary assignment nodes.
+//
+// 1) Marks arithmetic operations as 'NoContraction'.
+//
+// 2) Find the object which should be marked as 'precise' in the right and
+// update the 'precise' object work list.
+//
+class TNoContractionPropagator : public glslang::TIntermTraverser {
+public:
+ TNoContractionPropagator(ObjectAccesschainSet* precise_objects,
+ const AccessChainMapping& accesschain_mapping)
+ : TIntermTraverser(true, false, false),
+ precise_objects_(*precise_objects), added_precise_object_ids_(),
+ remained_accesschain_(), accesschain_mapping_(accesschain_mapping) {}
+
+ // Propagates 'precise' in the right nodes of a given assignment node with
+ // access chain record from the assignee node to a 'precise' object it
+ // contains.
+ void
+ propagateNoContractionInOneExpression(glslang::TIntermTyped* defining_node,
+ const ObjectAccessChain& assignee_remained_accesschain)
+ {
+ remained_accesschain_ = assignee_remained_accesschain;
+ if (glslang::TIntermBinary* BN = defining_node->getAsBinaryNode()) {
+ assert(isAssignOperation(BN->getOp()));
+ BN->getRight()->traverse(this);
+ if (isArithmeticOperation(BN->getOp())) {
+ BN->getWritableType().getQualifier().noContraction = true;
+ }
+ } else if (glslang::TIntermUnary* UN = defining_node->getAsUnaryNode()) {
+ assert(isAssignOperation(UN->getOp()));
+ UN->getOperand()->traverse(this);
+ if (isArithmeticOperation(UN->getOp())) {
+ UN->getWritableType().getQualifier().noContraction = true;
+ }
+ }
+ }
+
+ // Propagates 'precise' in a given precise return node.
+ void propagateNoContractionInReturnNode(glslang::TIntermBranch* return_node)
+ {
+ remained_accesschain_ = "";
+ assert(return_node->getFlowOp() == glslang::EOpReturn && return_node->getExpression());
+ return_node->getExpression()->traverse(this);
+ }
+
+protected:
+ TNoContractionPropagator& operator=(const TNoContractionPropagator&);
+
+ // Visits an aggregate node. The node can be a initializer list, in which
+ // case we need to find the 'precise' or 'precise' containing object node
+ // with the access chain record. In other cases, just need to traverse all
+ // the children nodes.
+ bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override
+ {
+ if (!remained_accesschain_.empty() && node->getOp() == glslang::EOpConstructStruct) {
+ // This is a struct initializer node, and the remained
+ // access chain is not empty, we need to refer to the
+ // assignee_remained_access_chain_ to find the nested
+ // 'precise' object. And we don't need to visit other nodes in this
+ // aggregate node.
+
+ // Gets the struct dereference index that leads to 'precise' object.
+ ObjectAccessChain precise_accesschain_index_str =
+ getFrontElement(remained_accesschain_);
+ unsigned precise_accesschain_index = (unsigned)strtoul(precise_accesschain_index_str.c_str(), nullptr, 10);
+ // Gets the node pointed by the access chain index extracted before.
+ glslang::TIntermTyped* potential_precise_node =
+ node->getSequence()[precise_accesschain_index]->getAsTyped();
+ assert(potential_precise_node);
+ // Pop the front access chain index from the path, and visit the nested node.
+ {
+ ObjectAccessChain next_level_accesschain =
+ subAccessChainFromSecondElement(remained_accesschain_);
+ StateSettingGuard<ObjectAccessChain> setup_remained_accesschain_for_next_level(
+ &remained_accesschain_, next_level_accesschain);
+ potential_precise_node->traverse(this);
+ }
+ return false;
+ }
+ return true;
+ }
+
+ // Visits a binary node. A binary node can be an object node, e.g. a dereference node.
+ // As only the top object nodes in the right side of an assignment needs to be visited
+ // and added to 'precise' work list, this traverser won't visit the children nodes of
+ // an object node. If the binary node does not represent an object node, it should
+ // go on to traverse its children nodes and if it is an arithmetic operation node, this
+ // operation should be marked as 'noContraction'.
+ bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override
+ {
+ if (isDereferenceOperation(node->getOp())) {
+ // This binary node is an object node. Need to update the precise
+ // object set with the access chain of this node + remained
+ // access chain .
+ ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
+ if (remained_accesschain_.empty()) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else {
+ new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
+ }
+ // Cache the access chain as added precise object, so we won't add the
+ // same object to the work list again.
+ if (!added_precise_object_ids_.count(new_precise_accesschain)) {
+ precise_objects_.insert(new_precise_accesschain);
+ added_precise_object_ids_.insert(new_precise_accesschain);
+ }
+ // Only the upper-most object nodes should be visited, so do not
+ // visit children of this object node.
+ return false;
+ }
+ // If this is an arithmetic operation, marks this node as 'noContraction'.
+ if (isArithmeticOperation(node->getOp()) && node->getBasicType() != glslang::EbtInt) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ // As this node is not an object node, need to traverse the children nodes.
+ return true;
+ }
+
+ // Visits a unary node. A unary node can not be an object node. If the operation
+ // is an arithmetic operation, need to mark this node as 'noContraction'.
+ bool visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) override
+ {
+ // If this is an arithmetic operation, marks this with 'noContraction'
+ if (isArithmeticOperation(node->getOp())) {
+ node->getWritableType().getQualifier().noContraction = true;
+ }
+ return true;
+ }
+
+ // Visits a symbol node. A symbol node is always an object node. So we
+ // should always be able to find its in our collected mapping from object
+ // nodes to access chains. As an object node, a symbol node can be either
+ // 'precise' or containing 'precise' objects according to unused
+ // access chain information we have when we visit this node.
+ void visitSymbol(glslang::TIntermSymbol* node) override
+ {
+ // Symbol nodes are object nodes and should always have an
+ // access chain collected before matches with it.
+ assert(accesschain_mapping_.count(node));
+ ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
+ // If the unused access chain is empty, this symbol node should be
+ // marked as 'precise'. Otherwise, the unused access chain should be
+ // appended to the symbol ID to build a new access chain which points to
+ // the nested 'precise' object in this symbol object.
+ if (remained_accesschain_.empty()) {
+ node->getWritableType().getQualifier().noContraction = true;
+ } else {
+ new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
+ }
+ // Add the new 'precise' access chain to the work list and make sure we
+ // don't visit it again.
+ if (!added_precise_object_ids_.count(new_precise_accesschain)) {
+ precise_objects_.insert(new_precise_accesschain);
+ added_precise_object_ids_.insert(new_precise_accesschain);
+ }
+ }
+
+ // A set of precise objects, represented as access chains.
+ ObjectAccesschainSet& precise_objects_;
+ // Visited symbol nodes, should not revisit these nodes.
+ ObjectAccesschainSet added_precise_object_ids_;
+ // The left node of an assignment operation might be an parent of 'precise' objects.
+ // This means the left node might not be an 'precise' object node, but it may contains
+ // 'precise' qualifier which should be propagated to the corresponding child node in
+ // the right. So we need the path from the left node to its nested 'precise' node to
+ // tell us how to find the corresponding 'precise' node in the right.
+ ObjectAccessChain remained_accesschain_;
+ // A map from node pointers to their access chains.
+ const AccessChainMapping& accesschain_mapping_;
+};
+}
+
+namespace glslang {
+
+void PropagateNoContraction(const glslang::TIntermediate& intermediate)
+{
+ // First, traverses the AST, records symbols with their defining operations
+ // and collects the initial set of precise symbols (symbol nodes that marked
+ // as 'noContraction') and precise return nodes.
+ auto mappings_and_precise_objects =
+ getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate);
+
+ // The mapping of symbol node IDs to their defining nodes. This enables us
+ // to get the defining node directly from a given symbol ID without
+ // traversing the tree again.
+ NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects);
+
+ // The mapping of object nodes to their access chains recorded.
+ AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects);
+
+ // The initial set of 'precise' objects which are represented as the
+ // access chain toward them.
+ ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects);
+
+ // The set of 'precise' return nodes.
+ ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects);
+
+ // Second, uses the initial set of precise objects as a work list, pops an
+ // access chain, extract the symbol ID from it. Then:
+ // 1) Check the assignee object, see if it is 'precise' object node or
+ // contains 'precise' object. Obtain the incremental access chain from the
+ // assignee node to its nested 'precise' node (if any).
+ // 2) If the assignee object node is 'precise' or it contains 'precise'
+ // objects, traverses the right side of the assignment operation
+ // expression to mark arithmetic operations as 'noContration' and update
+ // 'precise' access chain work list with new found object nodes.
+ // Repeat above steps until the work list is empty.
+ TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping);
+ TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping);
+
+ // We have two initial precise work lists to handle:
+ // 1) precise return nodes
+ // 2) precise object access chains
+ // We should process the precise return nodes first and the involved
+ // objects in the return expression should be added to the precise object
+ // access chain set.
+ while (!precise_return_nodes.empty()) {
+ glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin();
+ propagator.propagateNoContractionInReturnNode(precise_return_node);
+ precise_return_nodes.erase(precise_return_node);
+ }
+
+ while (!precise_object_accesschains.empty()) {
+ // Get the access chain of a precise object from the work list.
+ ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin();
+ // Get the symbol id from the access chain.
+ ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain);
+ // Get all the defining nodes of that symbol ID.
+ std::pair<NodeMapping::iterator, NodeMapping::iterator> range =
+ symbol_definition_mapping.equal_range(symbol_id);
+ // Visits all the assignment nodes of that symbol ID and
+ // 1) Check if the assignee node is 'precise' or contains 'precise'
+ // objects.
+ // 2) Propagate the 'precise' to the top layer object nodes
+ // in the right side of the assignment operation, update the 'precise'
+ // work list with new access chains representing the new 'precise'
+ // objects, and mark arithmetic operations as 'noContraction'.
+ for (NodeMapping::iterator defining_node_iter = range.first;
+ defining_node_iter != range.second; defining_node_iter++) {
+ TIntermOperator* defining_node = defining_node_iter->second;
+ // Check the assignee node.
+ auto checker_result = checker.getPrecisenessAndRemainedAccessChain(
+ defining_node, precise_object_accesschain);
+ bool& contain_precise = std::get<0>(checker_result);
+ ObjectAccessChain& remained_accesschain = std::get<1>(checker_result);
+ // If the assignee node is 'precise' or contains 'precise', propagate the
+ // 'precise' to the right. Otherwise just skip this assignment node.
+ if (contain_precise) {
+ propagator.propagateNoContractionInOneExpression(defining_node,
+ remained_accesschain);
+ }
+ }
+ // Remove the last processed 'precise' object from the work list.
+ precise_object_accesschains.erase(precise_object_accesschain);
+ }
+}
+};
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h b/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h
new file mode 100644
index 0000000..8521ad7
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/propagateNoContraction.h
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2015-2016 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+
+//
+// Visit the nodes in the glslang intermediate tree representation to
+// propagate 'noContraction' qualifier.
+//
+
+#pragma once
+
+#include "../Include/intermediate.h"
+
+namespace glslang {
+
+// Propagates the 'precise' qualifier for objects (objects marked with
+// 'noContraction' qualifier) from the shader source specified 'precise'
+// variables to all the involved objects, and add 'noContraction' qualifier for
+// the involved arithmetic operations.
+// Note that the same qualifier: 'noContraction' is used in both object nodes
+// and arithmetic operation nodes, but has different meaning. For object nodes,
+// 'noContraction' means the object is 'precise'; and for arithmetic operation
+// nodes, it means the operation should not be contracted.
+void PropagateNoContraction(const glslang::TIntermediate& intermediate);
+};
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/reflection.cpp b/src/3rdparty/glslang/glslang/MachineIndependent/reflection.cpp
new file mode 100644
index 0000000..2b28403
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/reflection.cpp
@@ -0,0 +1,1256 @@
+//
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../Include/Common.h"
+#include "reflection.h"
+#include "LiveTraverser.h"
+#include "localintermediate.h"
+
+#include "gl_types.h"
+
+//
+// Grow the reflection database through a friend traverser class of TReflection and a
+// collection of functions to do a liveness traversal that note what uniforms are used
+// in semantically non-dead code.
+//
+// Can be used multiple times, once per stage, to grow a program reflection.
+//
+// High-level algorithm for one stage:
+//
+// 1. Put the entry point on the list of live functions.
+//
+// 2. Traverse any live function, while skipping if-tests with a compile-time constant
+// condition of false, and while adding any encountered function calls to the live
+// function list.
+//
+// Repeat until the live function list is empty.
+//
+// 3. Add any encountered uniform variables and blocks to the reflection database.
+//
+// Can be attempted with a failed link, but will return false if recursion had been detected, or
+// there wasn't exactly one entry point.
+//
+
+namespace glslang {
+
+//
+// The traverser: mostly pass through, except
+// - processing binary nodes to see if they are dereferences of an aggregates to track
+// - processing symbol nodes to see if they are non-aggregate objects to track
+//
+// This ignores semantically dead code by using TLiveTraverser.
+//
+// This is in the glslang namespace directly so it can be a friend of TReflection.
+//
+
+class TReflectionTraverser : public TLiveTraverser {
+public:
+ TReflectionTraverser(const TIntermediate& i, TReflection& r) :
+ TLiveTraverser(i), reflection(r) { }
+
+ virtual bool visitBinary(TVisit, TIntermBinary* node);
+ virtual void visitSymbol(TIntermSymbol* base);
+
+ // Add a simple reference to a uniform variable to the uniform database, no dereference involved.
+ // However, no dereference doesn't mean simple... it could be a complex aggregate.
+ void addUniform(const TIntermSymbol& base)
+ {
+ if (processedDerefs.find(&base) == processedDerefs.end()) {
+ processedDerefs.insert(&base);
+
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of
+ // the dereference change expected by blowUpActiveAggregate.
+ TList<TIntermBinary*> derefs;
+ blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
+ base.getQualifier().storage, true);
+ }
+ }
+
+ void addPipeIOVariable(const TIntermSymbol& base)
+ {
+ if (processedDerefs.find(&base) == processedDerefs.end()) {
+ processedDerefs.insert(&base);
+
+ const TString &name = base.getName();
+ const TType &type = base.getType();
+ const bool input = base.getQualifier().isPipeInput();
+
+ TReflection::TMapIndexToReflection &ioItems =
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
+
+ if (reflection.options & EShReflectionUnwrapIOBlocks) {
+ bool anonymous = IsAnonymous(name);
+
+ TString baseName;
+ if (type.getBasicType() == EbtBlock) {
+ baseName = anonymous ? TString() : type.getTypeName();
+ } else {
+ baseName = anonymous ? TString() : name;
+ }
+
+ // by convention if this is an arrayed block we ignore the array in the reflection
+ if (type.isArray() && type.getBasicType() == EbtBlock) {
+ blowUpIOAggregate(input, baseName, TType(type, 0));
+ } else {
+ blowUpIOAggregate(input, baseName, type);
+ }
+ } else {
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (it == reflection.nameToIndex.end()) {
+ reflection.nameToIndex[name.c_str()] = (int)ioItems.size();
+ ioItems.push_back(
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
+
+ EShLanguageMask& stages = ioItems.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ EShLanguageMask& stages = ioItems[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+ }
+ }
+
+ // shared calculation by getOffset and getOffsets
+ void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
+ {
+ int dummyStride;
+
+ // modify just the children's view of matrix layout, if there is one for this member
+ TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
+ int memberAlignment = intermediate.getMemberAlignment(memberType, memberSize, dummyStride,
+ parentType.getQualifier().layoutPacking,
+ subMatrixLayout != ElmNone
+ ? subMatrixLayout == ElmRowMajor
+ : parentType.getQualifier().layoutMatrix == ElmRowMajor);
+ RoundToPow2(offset, memberAlignment);
+ }
+
+ // Lookup or calculate the offset of a block member, using the recursively
+ // defined block offset rules.
+ int getOffset(const TType& type, int index)
+ {
+ const TTypeList& memberList = *type.getStruct();
+
+ // Don't calculate offset if one is present, it could be user supplied
+ // and different than what would be calculated. That is, this is faster,
+ // but not just an optimization.
+ if (memberList[index].type->getQualifier().hasOffset())
+ return memberList[index].type->getQualifier().layoutOffset;
+
+ int memberSize = 0;
+ int offset = 0;
+ for (int m = 0; m <= index; ++m) {
+ updateOffset(type, *memberList[m].type, offset, memberSize);
+
+ if (m < index)
+ offset += memberSize;
+ }
+
+ return offset;
+ }
+
+ // Lookup or calculate the offset of all block members at once, using the recursively
+ // defined block offset rules.
+ void getOffsets(const TType& type, TVector<int>& offsets)
+ {
+ const TTypeList& memberList = *type.getStruct();
+
+ int memberSize = 0;
+ int offset = 0;
+ for (size_t m = 0; m < offsets.size(); ++m) {
+ // if the user supplied an offset, snap to it now
+ if (memberList[m].type->getQualifier().hasOffset())
+ offset = memberList[m].type->getQualifier().layoutOffset;
+
+ // calculate the offset of the next member and align the current offset to this member
+ updateOffset(type, *memberList[m].type, offset, memberSize);
+
+ // save the offset of this member
+ offsets[m] = offset;
+
+ // update for the next member
+ offset += memberSize;
+ }
+ }
+
+ // Calculate the stride of an array type
+ int getArrayStride(const TType& baseType, const TType& type)
+ {
+ int dummySize;
+ int stride;
+
+ // consider blocks to have 0 stride, so that all offsets are relative to the start of their block
+ if (type.getBasicType() == EbtBlock)
+ return 0;
+
+ TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix;
+ intermediate.getMemberAlignment(type, dummySize, stride,
+ baseType.getQualifier().layoutPacking,
+ subMatrixLayout != ElmNone
+ ? subMatrixLayout == ElmRowMajor
+ : baseType.getQualifier().layoutMatrix == ElmRowMajor);
+
+ return stride;
+ }
+
+ // Calculate the block data size.
+ // Block arrayness is not taken into account, each element is backed by a separate buffer.
+ int getBlockSize(const TType& blockType)
+ {
+ const TTypeList& memberList = *blockType.getStruct();
+ int lastIndex = (int)memberList.size() - 1;
+ int lastOffset = getOffset(blockType, lastIndex);
+
+ int lastMemberSize;
+ int dummyStride;
+ intermediate.getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
+ blockType.getQualifier().layoutPacking,
+ blockType.getQualifier().layoutMatrix == ElmRowMajor);
+
+ return lastOffset + lastMemberSize;
+ }
+
+ // count the total number of leaf members from iterating out of a block type
+ int countAggregateMembers(const TType& parentType)
+ {
+ if (! parentType.isStruct())
+ return 1;
+
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+
+ bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
+
+ const TTypeList &memberList = *parentType.getStruct();
+
+ int ret = 0;
+
+ for (size_t i = 0; i < memberList.size(); i++)
+ {
+ const TType &memberType = *memberList[i].type;
+ int numMembers = countAggregateMembers(memberType);
+ // for sized arrays of structs, apply logic to expand out the same as we would below in
+ // blowUpActiveAggregate
+ if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
+ if (! strictArraySuffix || ! blockParent)
+ numMembers *= memberType.getArraySizes()->getCumulativeSize();
+ }
+ ret += numMembers;
+ }
+
+ return ret;
+ }
+
+ // Traverse the provided deref chain, including the base, and
+ // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
+ // - recursively expand any variable array index in the middle of that traversal
+ // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
+ //
+ // arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
+ // A value of 0 for arraySize will mean to use the full array's size.
+ void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
+ TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
+ int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
+ {
+ // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
+ // Broadly:
+ // * arrays-of-structs always have a [x] suffix.
+ // * with array-of-struct variables in the root of a buffer block, only ever return [0].
+ // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+
+ // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
+ bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
+
+ // process the part of the dereference chain that was explicit in the shader
+ TString name = baseName;
+ const TType* terminalType = &baseType;
+ for (; deref != derefs.end(); ++deref) {
+ TIntermBinary* visitNode = *deref;
+ terminalType = &visitNode->getType();
+ int index;
+ switch (visitNode->getOp()) {
+ case EOpIndexIndirect: {
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ // Visit all the indices of this array, and for each one add on the remaining dereferencing
+ for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
+ TString newBaseName = name;
+ if (strictArraySuffix && blockParent)
+ newBaseName.append(TString("[0]"));
+ else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
+ newBaseName.append(TString("[") + String(i) + "]");
+ TList<TIntermBinary*>::const_iterator nextDeref = deref;
+ ++nextDeref;
+ blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
+ topLevelArrayStride, baseStorage, active);
+
+ if (offset >= 0)
+ offset += stride;
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+ case EOpIndexDirect: {
+ int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
+
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (strictArraySuffix && blockParent) {
+ name.append(TString("[0]"));
+ } else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
+ name.append(TString("[") + String(index) + "]");
+
+ if (offset >= 0)
+ offset += stride * index;
+ }
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ blockParent = false;
+ break;
+ }
+ case EOpIndexDirectStruct:
+ index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
+ if (offset >= 0)
+ offset += getOffset(visitNode->getLeft()->getType(), index);
+ if (name.size() > 0)
+ name.append(".");
+ name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
+ break;
+ default:
+ break;
+ }
+ }
+
+ // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
+ if (! isReflectionGranularity(*terminalType)) {
+ // the base offset of this node, that children are relative to
+ int baseOffset = offset;
+
+ if (terminalType->isArray()) {
+ // Visit all the indices of this array, and for each one,
+ // fully explode the remaining aggregate to dereference
+
+ int stride = 0;
+ if (offset >= 0)
+ stride = getArrayStride(baseType, *terminalType);
+
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = stride;
+
+ int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
+
+ // for top-level arrays in blocks, only expand [0] to avoid explosion of items
+ if (strictArraySuffix && blockParent)
+ arrayIterateSize = 1;
+
+ for (int i = 0; i < arrayIterateSize; ++i) {
+ TString newBaseName = name;
+ newBaseName.append(TString("[") + String(i) + "]");
+ TType derefType(*terminalType, 0);
+ if (offset >= 0)
+ offset = baseOffset + stride * i;
+
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
+ topLevelArrayStride, baseStorage, active);
+ }
+ } else {
+ // Visit all members of this aggregate, and for each one,
+ // fully explode the remaining aggregate to dereference
+ const TTypeList& typeList = *terminalType->getStruct();
+
+ TVector<int> memberOffsets;
+
+ if (baseOffset >= 0) {
+ memberOffsets.resize(typeList.size());
+ getOffsets(*terminalType, memberOffsets);
+ }
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TString newBaseName = name;
+ if (newBaseName.size() > 0)
+ newBaseName.append(".");
+ newBaseName.append(typeList[i].type->getFieldName());
+ TType derefType(*terminalType, i);
+ if (offset >= 0)
+ offset = baseOffset + memberOffsets[i];
+
+ int arrayStride = topLevelArrayStride;
+ if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
+ derefType.isArray()) {
+ arrayStride = getArrayStride(baseType, derefType);
+ }
+
+ blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
+ arrayStride, baseStorage, active);
+ }
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+
+ if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
+ name.append(TString("[0]"));
+ }
+
+ // Finally, add a full string to the reflection database, and update the array size if necessary.
+ // If the dereferenced entity to record is an array, compute the size and update the maximum size.
+
+ // there might not be a final array dereference, it could have been copied as an array object
+ if (arraySize == 0)
+ arraySize = mapToGlArraySize(*terminalType);
+
+ TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
+
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (it == reflection.nameToIndex.end()) {
+ int uniformIndex = (int)variables.size();
+ reflection.nameToIndex[name.c_str()] = uniformIndex;
+ variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
+ arraySize, blockIndex));
+ if (terminalType->isArray()) {
+ variables.back().arrayStride = getArrayStride(baseType, *terminalType);
+ if (topLevelArrayStride == 0)
+ topLevelArrayStride = variables.back().arrayStride;
+ }
+
+ if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->getBasicType() == EbtAtomicUint)
+ reflection.atomicCounterUniformIndices.push_back(uniformIndex);
+
+ variables.back().topLevelArrayStride = topLevelArrayStride;
+
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
+ EShLanguageMask& stages = variables.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ } else {
+ if (arraySize > 1) {
+ int& reflectedArraySize = variables[it->second].size;
+ reflectedArraySize = std::max(arraySize, reflectedArraySize);
+ }
+
+ if ((reflection.options & EShReflectionAllBlockVariables) && active) {
+ EShLanguageMask& stages = variables[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+ }
+
+ // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
+ void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
+ {
+ TString name = baseName;
+
+ // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
+ if (! isReflectionGranularity(type)) {
+ if (type.isArray()) {
+ // Visit all the indices of this array, and for each one,
+ // fully explode the remaining aggregate to dereference
+ for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
+ TString newBaseName = name;
+ newBaseName.append(TString("[") + String(i) + "]");
+ TType derefType(type, 0);
+
+ blowUpIOAggregate(input, newBaseName, derefType);
+ }
+ } else {
+ // Visit all members of this aggregate, and for each one,
+ // fully explode the remaining aggregate to dereference
+ const TTypeList& typeList = *type.getStruct();
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TString newBaseName = name;
+ if (newBaseName.size() > 0)
+ newBaseName.append(".");
+ newBaseName.append(typeList[i].type->getFieldName());
+ TType derefType(type, i);
+
+ blowUpIOAggregate(input, newBaseName, derefType);
+ }
+ }
+
+ // it was all completed in the recursive calls above
+ return;
+ }
+
+ if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
+ name.append(TString("[0]"));
+ }
+
+ TReflection::TMapIndexToReflection &ioItems =
+ input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
+
+ std::string namespacedName = input ? "in " : "out ";
+ namespacedName += name.c_str();
+
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
+ if (it == reflection.nameToIndex.end()) {
+ reflection.nameToIndex[namespacedName] = (int)ioItems.size();
+ ioItems.push_back(
+ TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
+
+ EShLanguageMask& stages = ioItems.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ EShLanguageMask& stages = ioItems[it->second].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+ }
+
+ // Add a uniform dereference where blocks/struct/arrays are involved in the access.
+ // Handles the situation where the left node is at the correct or too coarse a
+ // granularity for reflection. (That is, further dereferences up the tree will be
+ // skipped.) Earlier dereferences, down the tree, will be handled
+ // at the same time, and logged to prevent reprocessing as the tree is traversed.
+ //
+ // Note: Other things like the following must be caught elsewhere:
+ // - a simple non-array, non-struct variable (no dereference even conceivable)
+ // - an aggregrate consumed en masse, without a dereference
+ //
+ // So, this code is for cases like
+ // - a struct/block dereferencing a member (whether the member is array or not)
+ // - an array of struct
+ // - structs/arrays containing the above
+ //
+ void addDereferencedUniform(TIntermBinary* topNode)
+ {
+ // See if too fine-grained to process (wait to get further down the tree)
+ const TType& leftType = topNode->getLeft()->getType();
+ if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
+ return;
+
+ // We have an array or structure or block dereference, see if it's a uniform
+ // based dereference (if not, skip it).
+ TIntermSymbol* base = findBase(topNode);
+ if (! base || ! base->getQualifier().isUniformOrBuffer())
+ return;
+
+ // See if we've already processed this (e.g., in the middle of something
+ // we did earlier), and if so skip it
+ if (processedDerefs.find(topNode) != processedDerefs.end())
+ return;
+
+ // Process this uniform dereference
+
+ int offset = -1;
+ int blockIndex = -1;
+ bool anonymous = false;
+
+ // See if we need to record the block itself
+ bool block = base->getBasicType() == EbtBlock;
+ if (block) {
+ offset = 0;
+ anonymous = IsAnonymous(base->getName());
+
+ const TString& blockName = base->getType().getTypeName();
+ TString baseName;
+
+ if (! anonymous)
+ baseName = blockName;
+
+ if (base->getType().isArray()) {
+ TType derefType(base->getType(), 0);
+
+ assert(! anonymous);
+ for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
+ blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
+ getBlockSize(base->getType()));
+ baseName.append(TString("[0]"));
+ } else
+ blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
+
+ if (reflection.options & EShReflectionAllBlockVariables) {
+ // Use a degenerate (empty) set of dereferences to immediately put as at the end of
+ // the dereference change expected by blowUpActiveAggregate.
+ TList<TIntermBinary*> derefs;
+
+ // because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
+ // member in the struct definition. This will lose any information about whether the parent was a buffer
+ // block. So if we're using strict array rules which don't expand the first child of a buffer block we
+ // instead iterate over the children here.
+ const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
+ bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
+
+ if (strictArraySuffix && blockParent) {
+ const TTypeList& typeList = *base->getType().getStruct();
+
+ TVector<int> memberOffsets;
+
+ memberOffsets.resize(typeList.size());
+ getOffsets(base->getType(), memberOffsets);
+
+ for (int i = 0; i < (int)typeList.size(); ++i) {
+ TType derefType(base->getType(), i);
+ TString name = baseName;
+ if (name.size() > 0)
+ name.append(".");
+ name.append(typeList[i].type->getFieldName());
+
+ // if this member is an array, store the top-level array stride but start the explosion from
+ // the inner struct type.
+ if (derefType.isArray() && derefType.isStruct()) {
+ name.append("[0]");
+ blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
+ blockIndex, 0, getArrayStride(base->getType(), derefType),
+ base->getQualifier().storage, false);
+ } else {
+ blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
+ 0, 0, base->getQualifier().storage, false);
+ }
+ }
+ } else {
+ // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
+ // expanding root arrays anyway, just start the iteration from the base block type.
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
+ base->getQualifier().storage, false);
+ }
+ }
+ }
+
+ // Process the dereference chain, backward, accumulating the pieces for later forward traversal.
+ // If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
+ TList<TIntermBinary*> derefs;
+ for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
+ if (isReflectionGranularity(visitNode->getLeft()->getType()))
+ continue;
+
+ derefs.push_front(visitNode);
+ processedDerefs.insert(visitNode);
+ }
+ processedDerefs.insert(base);
+
+ // See if we have a specific array size to stick to while enumerating the explosion of the aggregate
+ int arraySize = 0;
+ if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
+ if (topNode->getOp() == EOpIndexDirect)
+ arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
+ }
+
+ // Put the dereference chain together, forward
+ TString baseName;
+ if (! anonymous) {
+ if (block)
+ baseName = base->getType().getTypeName();
+ else
+ baseName = base->getName();
+ }
+ blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
+ base->getQualifier().storage, true);
+ }
+
+ int addBlockName(const TString& name, const TType& type, int size)
+ {
+ TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
+
+ int blockIndex;
+ TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
+ if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
+ blockIndex = (int)blocks.size();
+ reflection.nameToIndex[name.c_str()] = blockIndex;
+ blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
+
+ blocks.back().numMembers = countAggregateMembers(type);
+
+ EShLanguageMask& stages = blocks.back().stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ } else {
+ blockIndex = it->second;
+
+ EShLanguageMask& stages = blocks[blockIndex].stages;
+ stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
+ }
+
+ return blockIndex;
+ }
+
+ // Are we at a level in a dereference chain at which individual active uniform queries are made?
+ bool isReflectionGranularity(const TType& type)
+ {
+ return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
+ }
+
+ // For a binary operation indexing into an aggregate, chase down the base of the aggregate.
+ // Return 0 if the topology does not fit this situation.
+ TIntermSymbol* findBase(const TIntermBinary* node)
+ {
+ TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
+ if (base)
+ return base;
+ TIntermBinary* left = node->getLeft()->getAsBinaryNode();
+ if (! left)
+ return nullptr;
+
+ return findBase(left);
+ }
+
+ //
+ // Translate a glslang sampler type into the GL API #define number.
+ //
+ int mapSamplerToGlType(TSampler sampler)
+ {
+ if (! sampler.image) {
+ // a sampler...
+ switch (sampler.type) {
+ case EbtFloat:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
+ case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
+ }
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
+ }
+ case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_SAMPLER_3D;
+ case EsdCube:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
+ case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
+ }
+ case EsdRect:
+ return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_SAMPLER_BUFFER;
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
+ }
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
+ }
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
+ }
+ case Esd3D:
+ return GL_FLOAT16_SAMPLER_3D_AMD;
+ case EsdCube:
+ switch ((int)sampler.shadow) {
+ case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
+ }
+ case EsdRect:
+ return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
+ case EsdBuffer:
+ return GL_FLOAT16_SAMPLER_BUFFER_AMD;
+ }
+#endif
+ case EbtInt:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ : GL_INT_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_INT_SAMPLER_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
+ case EsdRect:
+ return GL_INT_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_INT_SAMPLER_BUFFER;
+ }
+ case EbtUint:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
+ case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_UNSIGNED_INT_SAMPLER_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
+ case EsdRect:
+ return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
+ case EsdBuffer:
+ return GL_UNSIGNED_INT_SAMPLER_BUFFER;
+ }
+ default:
+ return 0;
+ }
+ } else {
+ // an image...
+ switch (sampler.type) {
+ case EbtFloat:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
+ case EsdRect:
+ return GL_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_IMAGE_BUFFER;
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
+ case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
+ }
+ case Esd3D:
+ return GL_FLOAT16_IMAGE_3D_AMD;
+ case EsdCube:
+ return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
+ case EsdRect:
+ return GL_FLOAT16_IMAGE_2D_RECT_AMD;
+ case EsdBuffer:
+ return GL_FLOAT16_IMAGE_BUFFER_AMD;
+ }
+#endif
+ case EbtInt:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_INT_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
+ case EsdRect:
+ return GL_INT_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_INT_IMAGE_BUFFER;
+ }
+ case EbtUint:
+ switch ((int)sampler.dim) {
+ case Esd1D:
+ return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
+ case Esd2D:
+ switch ((int)sampler.ms) {
+ case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
+ case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
+ : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
+ }
+ case Esd3D:
+ return GL_UNSIGNED_INT_IMAGE_3D;
+ case EsdCube:
+ return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
+ case EsdRect:
+ return GL_UNSIGNED_INT_IMAGE_2D_RECT;
+ case EsdBuffer:
+ return GL_UNSIGNED_INT_IMAGE_BUFFER;
+ }
+ default:
+ return 0;
+ }
+ }
+ }
+
+ //
+ // Translate a glslang type into the GL API #define number.
+ // Ignores arrayness.
+ //
+ int mapToGlType(const TType& type)
+ {
+ switch (type.getBasicType()) {
+ case EbtSampler:
+ return mapSamplerToGlType(type.getSampler());
+ case EbtStruct:
+ case EbtBlock:
+ case EbtVoid:
+ return 0;
+ default:
+ break;
+ }
+
+ if (type.isVector()) {
+ int offset = type.getVectorSize() - 2;
+ switch (type.getBasicType()) {
+ case EbtFloat: return GL_FLOAT_VEC2 + offset;
+ case EbtDouble: return GL_DOUBLE_VEC2 + offset;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
+#endif
+ case EbtInt: return GL_INT_VEC2 + offset;
+ case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
+ case EbtInt64: return GL_INT64_ARB + offset;
+ case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset;
+ case EbtBool: return GL_BOOL_VEC2 + offset;
+ case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
+ default: return 0;
+ }
+ }
+ if (type.isMatrix()) {
+ switch (type.getBasicType()) {
+ case EbtFloat:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT2;
+ case 3: return GL_FLOAT_MAT2x3;
+ case 4: return GL_FLOAT_MAT2x4;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT3x2;
+ case 3: return GL_FLOAT_MAT3;
+ case 4: return GL_FLOAT_MAT3x4;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT_MAT4x2;
+ case 3: return GL_FLOAT_MAT4x3;
+ case 4: return GL_FLOAT_MAT4;
+ default: return 0;
+ }
+ }
+ case EbtDouble:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT2;
+ case 3: return GL_DOUBLE_MAT2x3;
+ case 4: return GL_DOUBLE_MAT2x4;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT3x2;
+ case 3: return GL_DOUBLE_MAT3;
+ case 4: return GL_DOUBLE_MAT3x4;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_DOUBLE_MAT4x2;
+ case 3: return GL_DOUBLE_MAT4x3;
+ case 4: return GL_DOUBLE_MAT4;
+ default: return 0;
+ }
+ }
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16:
+ switch (type.getMatrixCols()) {
+ case 2:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT2_AMD;
+ case 3: return GL_FLOAT16_MAT2x3_AMD;
+ case 4: return GL_FLOAT16_MAT2x4_AMD;
+ default: return 0;
+ }
+ case 3:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT3x2_AMD;
+ case 3: return GL_FLOAT16_MAT3_AMD;
+ case 4: return GL_FLOAT16_MAT3x4_AMD;
+ default: return 0;
+ }
+ case 4:
+ switch (type.getMatrixRows()) {
+ case 2: return GL_FLOAT16_MAT4x2_AMD;
+ case 3: return GL_FLOAT16_MAT4x3_AMD;
+ case 4: return GL_FLOAT16_MAT4_AMD;
+ default: return 0;
+ }
+ }
+#endif
+ default:
+ return 0;
+ }
+ }
+ if (type.getVectorSize() == 1) {
+ switch (type.getBasicType()) {
+ case EbtFloat: return GL_FLOAT;
+ case EbtDouble: return GL_DOUBLE;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: return GL_FLOAT16_NV;
+#endif
+ case EbtInt: return GL_INT;
+ case EbtUint: return GL_UNSIGNED_INT;
+ case EbtInt64: return GL_INT64_ARB;
+ case EbtUint64: return GL_UNSIGNED_INT64_ARB;
+ case EbtBool: return GL_BOOL;
+ case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
+ default: return 0;
+ }
+ }
+
+ return 0;
+ }
+
+ int mapToGlArraySize(const TType& type)
+ {
+ return type.isArray() ? type.getOuterArraySize() : 1;
+ }
+
+ TReflection& reflection;
+ std::set<const TIntermNode*> processedDerefs;
+
+protected:
+ TReflectionTraverser(TReflectionTraverser&);
+ TReflectionTraverser& operator=(TReflectionTraverser&);
+};
+
+//
+// Implement the traversal functions of interest.
+//
+
+// To catch dereferenced aggregates that must be reflected.
+// This catches them at the highest level possible in the tree.
+bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
+{
+ switch (node->getOp()) {
+ case EOpIndexDirect:
+ case EOpIndexIndirect:
+ case EOpIndexDirectStruct:
+ addDereferencedUniform(node);
+ break;
+ default:
+ break;
+ }
+
+ // still need to visit everything below, which could contain sub-expressions
+ // containing different uniforms
+ return true;
+}
+
+// To reflect non-dereferenced objects.
+void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
+{
+ if (base->getQualifier().storage == EvqUniform)
+ addUniform(*base);
+
+ if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
+ (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
+ addPipeIOVariable(*base);
+}
+
+//
+// Implement TObjectReflection methods.
+//
+
+TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
+ int pSize, int pIndex)
+ : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
+ numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
+{
+}
+
+int TObjectReflection::getBinding() const
+{
+ if (type == nullptr || !type->getQualifier().hasBinding())
+ return -1;
+ return type->getQualifier().layoutBinding;
+}
+
+void TObjectReflection::dump() const
+{
+ printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
+ index, getBinding(), stages);
+
+ if (counterIndex != -1)
+ printf(", counter %d", counterIndex);
+
+ if (numMembers != -1)
+ printf(", numMembers %d", numMembers);
+
+ if (arrayStride != 0)
+ printf(", arrayStride %d", arrayStride);
+
+ if (topLevelArrayStride != 0)
+ printf(", topLevelArrayStride %d", topLevelArrayStride);
+
+ printf("\n");
+}
+
+//
+// Implement TReflection methods.
+//
+
+// Track any required attribute reflection, such as compute shader numthreads.
+//
+void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
+{
+ if (stage == EShLangCompute) {
+ // Remember thread dimensions
+ for (int dim=0; dim<3; ++dim)
+ localSize[dim] = intermediate.getLocalSize(dim);
+ }
+}
+
+// build counter block index associations for buffers
+void TReflection::buildCounterIndices(const TIntermediate& intermediate)
+{
+ // search for ones that have counters
+ for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
+ const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
+ const int index = getIndex(counterName);
+
+ if (index >= 0)
+ indexToUniformBlock[i].counterIndex = index;
+ }
+}
+
+// build Shader Stages mask for all uniforms
+void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
+{
+ if (options & EShReflectionAllBlockVariables)
+ return;
+
+ for (int i = 0; i < int(indexToUniform.size()); ++i) {
+ indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
+ }
+
+ for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
+ indexToBufferVariable[i].stages =
+ static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
+ }
+}
+
+// Merge live symbols from 'intermediate' into the existing reflection database.
+//
+// Returns false if the input is too malformed to do this.
+bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
+{
+ if (intermediate.getTreeRoot() == nullptr ||
+ intermediate.getNumEntryPoints() != 1 ||
+ intermediate.isRecursive())
+ return false;
+
+ buildAttributeReflection(stage, intermediate);
+
+ TReflectionTraverser it(intermediate, *this);
+
+ // put the entry point on the list of functions to process
+ it.pushFunction(intermediate.getEntryPointMangledName().c_str());
+
+ // process all the functions
+ while (! it.functions.empty()) {
+ TIntermNode* function = it.functions.back();
+ it.functions.pop_back();
+ function->traverse(&it);
+ }
+
+ buildCounterIndices(intermediate);
+ buildUniformStageMask(intermediate);
+
+ return true;
+}
+
+void TReflection::dump()
+{
+ printf("Uniform reflection:\n");
+ for (size_t i = 0; i < indexToUniform.size(); ++i)
+ indexToUniform[i].dump();
+ printf("\n");
+
+ printf("Uniform block reflection:\n");
+ for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
+ indexToUniformBlock[i].dump();
+ printf("\n");
+
+ printf("Buffer variable reflection:\n");
+ for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
+ indexToBufferVariable[i].dump();
+ printf("\n");
+
+ printf("Buffer block reflection:\n");
+ for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
+ indexToBufferBlock[i].dump();
+ printf("\n");
+
+ printf("Pipeline input reflection:\n");
+ for (size_t i = 0; i < indexToPipeInput.size(); ++i)
+ indexToPipeInput[i].dump();
+ printf("\n");
+
+ printf("Pipeline output reflection:\n");
+ for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
+ indexToPipeOutput[i].dump();
+ printf("\n");
+
+ if (getLocalSize(0) > 1) {
+ static const char* axis[] = { "X", "Y", "Z" };
+
+ for (int dim=0; dim<3; ++dim)
+ if (getLocalSize(dim) > 1)
+ printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
+
+ printf("\n");
+ }
+
+ // printf("Live names\n");
+ // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
+ // printf("%s: %d\n", it->first.c_str(), it->second);
+ // printf("\n");
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/MachineIndependent/reflection.h b/src/3rdparty/glslang/glslang/MachineIndependent/reflection.h
new file mode 100644
index 0000000..44b17a0
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/MachineIndependent/reflection.h
@@ -0,0 +1,203 @@
+//
+// Copyright (C) 2013-2016 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _REFLECTION_INCLUDED
+#define _REFLECTION_INCLUDED
+
+#include "../Public/ShaderLang.h"
+#include "../Include/Types.h"
+
+#include <list>
+#include <set>
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+namespace glslang {
+
+class TIntermediate;
+class TIntermAggregate;
+class TReflectionTraverser;
+
+// The full reflection database
+class TReflection {
+public:
+ TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last)
+ : options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection())
+ {
+ for (int dim=0; dim<3; ++dim)
+ localSize[dim] = 0;
+ }
+
+ virtual ~TReflection() {}
+
+ // grow the reflection stage by stage
+ bool addStage(EShLanguage, const TIntermediate&);
+
+ // for mapping a uniform index to a uniform object's description
+ int getNumUniforms() { return (int)indexToUniform.size(); }
+ const TObjectReflection& getUniform(int i) const
+ {
+ if (i >= 0 && i < (int)indexToUniform.size())
+ return indexToUniform[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping a block index to the block's description
+ int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); }
+ const TObjectReflection& getUniformBlock(int i) const
+ {
+ if (i >= 0 && i < (int)indexToUniformBlock.size())
+ return indexToUniformBlock[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping an pipeline input index to the input's description
+ int getNumPipeInputs() { return (int)indexToPipeInput.size(); }
+ const TObjectReflection& getPipeInput(int i) const
+ {
+ if (i >= 0 && i < (int)indexToPipeInput.size())
+ return indexToPipeInput[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping an pipeline output index to the output's description
+ int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); }
+ const TObjectReflection& getPipeOutput(int i) const
+ {
+ if (i >= 0 && i < (int)indexToPipeOutput.size())
+ return indexToPipeOutput[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping from an atomic counter to the uniform index
+ int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); }
+ const TObjectReflection& getAtomicCounter(int i) const
+ {
+ if (i >= 0 && i < (int)atomicCounterUniformIndices.size())
+ return getUniform(atomicCounterUniformIndices[i]);
+ else
+ return badReflection;
+ }
+
+ // for mapping a buffer variable index to a buffer variable object's description
+ int getNumBufferVariables() { return (int)indexToBufferVariable.size(); }
+ const TObjectReflection& getBufferVariable(int i) const
+ {
+ if (i >= 0 && i < (int)indexToBufferVariable.size())
+ return indexToBufferVariable[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping a storage block index to the storage block's description
+ int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); }
+ const TObjectReflection& getStorageBufferBlock(int i) const
+ {
+ if (i >= 0 && i < (int)indexToBufferBlock.size())
+ return indexToBufferBlock[i];
+ else
+ return badReflection;
+ }
+
+ // for mapping any name to its index (block names, uniform names and input/output names)
+ int getIndex(const char* name) const
+ {
+ TNameToIndex::const_iterator it = nameToIndex.find(name);
+ if (it == nameToIndex.end())
+ return -1;
+ else
+ return it->second;
+ }
+
+ // see getIndex(const char*)
+ int getIndex(const TString& name) const { return getIndex(name.c_str()); }
+
+ // Thread local size
+ unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
+
+ void dump();
+
+protected:
+ friend class glslang::TReflectionTraverser;
+
+ void buildCounterIndices(const TIntermediate&);
+ void buildUniformStageMask(const TIntermediate& intermediate);
+ void buildAttributeReflection(EShLanguage, const TIntermediate&);
+
+ // Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
+ typedef std::map<std::string, int> TNameToIndex;
+ typedef std::vector<TObjectReflection> TMapIndexToReflection;
+ typedef std::vector<int> TIndices;
+
+ TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage)
+ {
+ if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
+ return indexToBufferBlock;
+ return indexToUniformBlock;
+ }
+ TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage)
+ {
+ if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
+ return indexToBufferVariable;
+ return indexToUniform;
+ }
+
+ EShReflectionOptions options;
+
+ EShLanguage firstStage;
+ EShLanguage lastStage;
+
+ TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
+ TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
+ TMapIndexToReflection indexToUniform;
+ TMapIndexToReflection indexToUniformBlock;
+ TMapIndexToReflection indexToBufferVariable;
+ TMapIndexToReflection indexToBufferBlock;
+ TMapIndexToReflection indexToPipeInput;
+ TMapIndexToReflection indexToPipeOutput;
+ TIndices atomicCounterUniformIndices;
+
+ unsigned int localSize[3];
+};
+
+} // end namespace glslang
+
+#endif // _REFLECTION_INCLUDED
diff --git a/src/3rdparty/glslang/glslang/OSDependent/Unix/ossource.cpp b/src/3rdparty/glslang/glslang/OSDependent/Unix/ossource.cpp
new file mode 100644
index 0000000..3f029f0
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/OSDependent/Unix/ossource.cpp
@@ -0,0 +1,207 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// This file contains the Linux-specific functions
+//
+#include "../osinclude.h"
+#include "../../../OGLCompilersDLL/InitializeDll.h"
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdint.h>
+#include <cstdio>
+#include <sys/time.h>
+
+#if !defined(__Fuchsia__)
+#include <sys/resource.h>
+#endif
+
+namespace glslang {
+
+//
+// Thread cleanup
+//
+
+//
+// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects
+// the cleanup routine to return void.
+//
+static void DetachThreadLinux(void *)
+{
+ DetachThread();
+}
+
+//
+// Registers cleanup handler, sets cancel type and state, and executes the thread specific
+// cleanup handler. This function will be called in the Standalone.cpp for regression
+// testing. When OpenGL applications are run with the driver code, Linux OS does the
+// thread cleanup.
+//
+void OS_CleanupThreadData(void)
+{
+#if defined(__ANDROID__) || defined(__Fuchsia__)
+ DetachThreadLinux(NULL);
+#else
+ int old_cancel_state, old_cancel_type;
+ void *cleanupArg = NULL;
+
+ //
+ // Set thread cancel state and push cleanup handler.
+ //
+ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state);
+ pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg);
+
+ //
+ // Put the thread in deferred cancellation mode.
+ //
+ pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type);
+
+ //
+ // Pop cleanup handler and execute it prior to unregistering the cleanup handler.
+ //
+ pthread_cleanup_pop(1);
+
+ //
+ // Restore the thread's previous cancellation mode.
+ //
+ pthread_setcanceltype(old_cancel_state, NULL);
+#endif
+}
+
+//
+// Thread Local Storage Operations
+//
+inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key)
+{
+ return (OS_TLSIndex)((uintptr_t)key + 1);
+}
+
+inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex)
+{
+ return (pthread_key_t)((uintptr_t)nIndex - 1);
+}
+
+OS_TLSIndex OS_AllocTLSIndex()
+{
+ pthread_key_t pPoolIndex;
+
+ //
+ // Create global pool key.
+ //
+ if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
+ assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
+ return OS_INVALID_TLS_INDEX;
+ }
+ else
+ return PthreadKeyToTLSIndex(pPoolIndex);
+}
+
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
+ return true;
+ else
+ return false;
+}
+
+void* OS_GetTLSValue(OS_TLSIndex nIndex)
+{
+ //
+ // This function should return 0 if nIndex is invalid.
+ //
+ assert(nIndex != OS_INVALID_TLS_INDEX);
+ return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
+}
+
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ //
+ // Delete the global pool key.
+ //
+ if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
+ return true;
+ else
+ return false;
+}
+
+namespace {
+ pthread_mutex_t gMutex;
+}
+
+void InitGlobalLock()
+{
+ pthread_mutexattr_t mutexattr;
+ pthread_mutexattr_init(&mutexattr);
+ pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(&gMutex, &mutexattr);
+}
+
+void GetGlobalLock()
+{
+ pthread_mutex_lock(&gMutex);
+}
+
+void ReleaseGlobalLock()
+{
+ pthread_mutex_unlock(&gMutex);
+}
+
+// #define DUMP_COUNTERS
+
+void OS_DumpMemoryCounters()
+{
+#ifdef DUMP_COUNTERS
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage) == 0)
+ printf("Working set size: %ld\n", usage.ru_maxrss * 1024);
+#else
+ printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
+#endif
+}
+
+} // end namespace glslang
diff --git a/src/3rdparty/glslang/glslang/OSDependent/Windows/main.cpp b/src/3rdparty/glslang/glslang/OSDependent/Windows/main.cpp
new file mode 100644
index 0000000..0bcde7b
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/OSDependent/Windows/main.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "InitializeDll.h"
+
+#define STRICT
+#define VC_EXTRALEAN 1
+#include <windows.h>
+#include <assert.h>
+
+BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+ switch (fdwReason)
+ {
+ case DLL_PROCESS_ATTACH:
+
+ if (! glslang::InitProcess())
+ return FALSE;
+ break;
+ case DLL_THREAD_ATTACH:
+
+ if (! glslang::InitThread())
+ return FALSE;
+ break;
+
+ case DLL_THREAD_DETACH:
+
+ if (! glslang::DetachThread())
+ return FALSE;
+ break;
+
+ case DLL_PROCESS_DETACH:
+
+ glslang::DetachProcess();
+ break;
+
+ default:
+ assert(0 && "DllMain(): Reason for calling DLL Main is unknown");
+ return FALSE;
+ }
+
+ return TRUE;
+}
diff --git a/src/3rdparty/glslang/glslang/OSDependent/Windows/ossource.cpp b/src/3rdparty/glslang/glslang/OSDependent/Windows/ossource.cpp
new file mode 100644
index 0000000..870840c
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/OSDependent/Windows/ossource.cpp
@@ -0,0 +1,147 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include "../osinclude.h"
+
+#define STRICT
+#define VC_EXTRALEAN 1
+#include <windows.h>
+#include <cassert>
+#include <process.h>
+#include <psapi.h>
+#include <cstdio>
+#include <cstdint>
+
+//
+// This file contains the Window-OS-specific functions
+//
+
+#if !(defined(_WIN32) || defined(_WIN64))
+#error Trying to build a windows specific file in a non windows build.
+#endif
+
+namespace glslang {
+
+inline OS_TLSIndex ToGenericTLSIndex (DWORD handle)
+{
+ return (OS_TLSIndex)((uintptr_t)handle + 1);
+}
+
+inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex)
+{
+ return (DWORD)((uintptr_t)nIndex - 1);
+}
+
+//
+// Thread Local Storage Operations
+//
+OS_TLSIndex OS_AllocTLSIndex()
+{
+ DWORD dwIndex = TlsAlloc();
+ if (dwIndex == TLS_OUT_OF_INDEXES) {
+ assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
+ return OS_INVALID_TLS_INDEX;
+ }
+
+ return ToGenericTLSIndex(dwIndex);
+}
+
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue))
+ return true;
+ else
+ return false;
+}
+
+void* OS_GetTLSValue(OS_TLSIndex nIndex)
+{
+ assert(nIndex != OS_INVALID_TLS_INDEX);
+ return TlsGetValue(ToNativeTLSIndex(nIndex));
+}
+
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
+{
+ if (nIndex == OS_INVALID_TLS_INDEX) {
+ assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
+ return false;
+ }
+
+ if (TlsFree(ToNativeTLSIndex(nIndex)))
+ return true;
+ else
+ return false;
+}
+
+HANDLE GlobalLock;
+
+void InitGlobalLock()
+{
+ GlobalLock = CreateMutex(0, false, 0);
+}
+
+void GetGlobalLock()
+{
+ WaitForSingleObject(GlobalLock, INFINITE);
+}
+
+void ReleaseGlobalLock()
+{
+ ReleaseMutex(GlobalLock);
+}
+
+unsigned int __stdcall EnterGenericThread (void* entry)
+{
+ return ((TThreadEntrypoint)entry)(0);
+}
+
+//#define DUMP_COUNTERS
+
+void OS_DumpMemoryCounters()
+{
+#ifdef DUMP_COUNTERS
+ PROCESS_MEMORY_COUNTERS counters;
+ GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters));
+ printf("Working set size: %d\n", counters.WorkingSetSize);
+#else
+ printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
+#endif
+}
+
+} // namespace glslang
diff --git a/src/3rdparty/glslang/glslang/OSDependent/osinclude.h b/src/3rdparty/glslang/glslang/OSDependent/osinclude.h
new file mode 100644
index 0000000..218abe4
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/OSDependent/osinclude.h
@@ -0,0 +1,63 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __OSINCLUDE_H
+#define __OSINCLUDE_H
+
+namespace glslang {
+
+//
+// Thread Local Storage Operations
+//
+typedef void* OS_TLSIndex;
+#define OS_INVALID_TLS_INDEX ((void*)0)
+
+OS_TLSIndex OS_AllocTLSIndex();
+bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue);
+bool OS_FreeTLSIndex(OS_TLSIndex nIndex);
+void* OS_GetTLSValue(OS_TLSIndex nIndex);
+
+void InitGlobalLock();
+void GetGlobalLock();
+void ReleaseGlobalLock();
+
+typedef unsigned int (*TThreadEntrypoint)(void*);
+
+void OS_CleanupThreadData(void);
+
+void OS_DumpMemoryCounters();
+
+} // end namespace glslang
+
+#endif // __OSINCLUDE_H
diff --git a/src/3rdparty/glslang/glslang/Public/ShaderLang.h b/src/3rdparty/glslang/glslang/Public/ShaderLang.h
new file mode 100644
index 0000000..0c25569
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Public/ShaderLang.h
@@ -0,0 +1,846 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013-2016 LunarG, Inc.
+// Copyright (C) 2015-2018 Google, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#ifndef _COMPILER_INTERFACE_INCLUDED_
+#define _COMPILER_INTERFACE_INCLUDED_
+
+#include "../Include/ResourceLimits.h"
+#include "../MachineIndependent/Versions.h"
+
+#include <cstring>
+#include <vector>
+
+#ifdef _WIN32
+#define C_DECL __cdecl
+//#ifdef SH_EXPORTING
+// #define SH_IMPORT_EXPORT __declspec(dllexport)
+//#else
+// #define SH_IMPORT_EXPORT __declspec(dllimport)
+//#endif
+#define SH_IMPORT_EXPORT
+#else
+#define SH_IMPORT_EXPORT
+#define C_DECL
+#endif
+
+//
+// This is the platform independent interface between an OGL driver
+// and the shading language compiler/linker.
+//
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+// This should always increase, as some paths to do not consume
+// a more major number.
+// It should increment by one when new functionality is added.
+#define GLSLANG_MINOR_VERSION 11
+
+//
+// Call before doing any other compiler/linker operations.
+//
+// (Call once per process, not once per thread.)
+//
+SH_IMPORT_EXPORT int ShInitialize();
+
+//
+// Call this at process shutdown to clean up memory.
+//
+SH_IMPORT_EXPORT int ShFinalize();
+
+//
+// Types of languages the compiler can consume.
+//
+typedef enum {
+ EShLangVertex,
+ EShLangTessControl,
+ EShLangTessEvaluation,
+ EShLangGeometry,
+ EShLangFragment,
+ EShLangCompute,
+ EShLangRayGenNV,
+ EShLangIntersectNV,
+ EShLangAnyHitNV,
+ EShLangClosestHitNV,
+ EShLangMissNV,
+ EShLangCallableNV,
+ EShLangTaskNV,
+ EShLangMeshNV,
+ EShLangCount,
+} EShLanguage; // would be better as stage, but this is ancient now
+
+typedef enum {
+ EShLangVertexMask = (1 << EShLangVertex),
+ EShLangTessControlMask = (1 << EShLangTessControl),
+ EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
+ EShLangGeometryMask = (1 << EShLangGeometry),
+ EShLangFragmentMask = (1 << EShLangFragment),
+ EShLangComputeMask = (1 << EShLangCompute),
+ EShLangRayGenNVMask = (1 << EShLangRayGenNV),
+ EShLangIntersectNVMask = (1 << EShLangIntersectNV),
+ EShLangAnyHitNVMask = (1 << EShLangAnyHitNV),
+ EShLangClosestHitNVMask = (1 << EShLangClosestHitNV),
+ EShLangMissNVMask = (1 << EShLangMissNV),
+ EShLangCallableNVMask = (1 << EShLangCallableNV),
+ EShLangTaskNVMask = (1 << EShLangTaskNV),
+ EShLangMeshNVMask = (1 << EShLangMeshNV),
+} EShLanguageMask;
+
+namespace glslang {
+
+class TType;
+
+typedef enum {
+ EShSourceNone,
+ EShSourceGlsl,
+ EShSourceHlsl,
+} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
+
+typedef enum {
+ EShClientNone,
+ EShClientVulkan,
+ EShClientOpenGL,
+} EShClient;
+
+typedef enum {
+ EShTargetNone,
+ EShTargetSpv, // preferred spelling
+ EshTargetSpv = EShTargetSpv, // legacy spelling
+} EShTargetLanguage;
+
+typedef enum {
+ EShTargetVulkan_1_0 = (1 << 22),
+ EShTargetVulkan_1_1 = (1 << 22) | (1 << 12),
+ EShTargetOpenGL_450 = 450,
+} EShTargetClientVersion;
+
+typedef EShTargetClientVersion EshTargetClientVersion;
+
+typedef enum {
+ EShTargetSpv_1_0 = (1 << 16),
+ EShTargetSpv_1_1 = (1 << 16) | (1 << 8),
+ EShTargetSpv_1_2 = (1 << 16) | (2 << 8),
+ EShTargetSpv_1_3 = (1 << 16) | (3 << 8),
+ EShTargetSpv_1_4 = (1 << 16) | (4 << 8),
+} EShTargetLanguageVersion;
+
+struct TInputLanguage {
+ EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone
+ EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone
+ EShClient dialect;
+ int dialectVersion; // version of client's language definition, not the client (when not EShClientNone)
+};
+
+struct TClient {
+ EShClient client;
+ EShTargetClientVersion version; // version of client itself (not the client's input dialect)
+};
+
+struct TTarget {
+ EShTargetLanguage language;
+ EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header
+ bool hlslFunctionality1; // can target hlsl_functionality1 extension(s)
+};
+
+// All source/client/target versions and settings.
+// Can override previous methods of setting, when items are set here.
+// Expected to grow, as more are added, rather than growing parameter lists.
+struct TEnvironment {
+ TInputLanguage input; // definition of the input language
+ TClient client; // what client is the overall compilation being done for?
+ TTarget target; // what to generate
+};
+
+const char* StageName(EShLanguage);
+
+} // end namespace glslang
+
+//
+// Types of output the linker will create.
+//
+typedef enum {
+ EShExVertexFragment,
+ EShExFragment
+} EShExecutable;
+
+//
+// Optimization level for the compiler.
+//
+typedef enum {
+ EShOptNoGeneration,
+ EShOptNone,
+ EShOptSimple, // Optimizations that can be done quickly
+ EShOptFull, // Optimizations that will take more time
+} EShOptimizationLevel;
+
+//
+// Texture and Sampler transformation mode.
+//
+typedef enum {
+ EShTexSampTransKeep, // keep textures and samplers as is (default)
+ EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers
+} EShTextureSamplerTransformMode;
+
+//
+// Message choices for what errors and warnings are given.
+//
+enum EShMessages {
+ EShMsgDefault = 0, // default is to give all required errors and extra warnings
+ EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
+ EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
+ EShMsgAST = (1 << 2), // print the AST intermediate representation
+ EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation
+ EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V
+ EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor
+ EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics
+ EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit
+ EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions
+ EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules
+ EShMsgDebugInfo = (1 << 10), // save debug information
+ EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
+ EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
+ EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers)
+};
+
+//
+// Options for building reflection
+//
+typedef enum {
+ EShReflectionDefault = 0, // default is original behaviour before options were added
+ EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
+ EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
+ EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
+ EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
+ EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
+ EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
+} EShReflectionOptions;
+
+//
+// Build a table for bindings. This can be used for locating
+// attributes, uniforms, globals, etc., as needed.
+//
+typedef struct {
+ const char* name;
+ int binding;
+} ShBinding;
+
+typedef struct {
+ int numBindings;
+ ShBinding* bindings; // array of bindings
+} ShBindingTable;
+
+//
+// ShHandle held by but opaque to the driver. It is allocated,
+// managed, and de-allocated by the compiler/linker. It's contents
+// are defined by and used by the compiler and linker. For example,
+// symbol table information and object code passed from the compiler
+// to the linker can be stored where ShHandle points.
+//
+// If handle creation fails, 0 will be returned.
+//
+typedef void* ShHandle;
+
+//
+// Driver calls these to create and destroy compiler/linker
+// objects.
+//
+SH_IMPORT_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader
+SH_IMPORT_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair
+SH_IMPORT_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object)
+SH_IMPORT_EXPORT void ShDestruct(ShHandle);
+
+//
+// The return value of ShCompile is boolean, non-zero indicating
+// success.
+//
+// The info-log should be written by ShCompile into
+// ShHandle, so it can answer future queries.
+//
+SH_IMPORT_EXPORT int ShCompile(
+ const ShHandle,
+ const char* const shaderStrings[],
+ const int numStrings,
+ const int* lengths,
+ const EShOptimizationLevel,
+ const TBuiltInResource *resources,
+ int debugOptions,
+ int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader
+ bool forwardCompatible = false, // give errors for use of deprecated features
+ EShMessages messages = EShMsgDefault // warnings and errors
+ );
+
+SH_IMPORT_EXPORT int ShLinkExt(
+ const ShHandle, // linker object
+ const ShHandle h[], // compiler objects to link together
+ const int numHandles);
+
+//
+// ShSetEncrpytionMethod is a place-holder for specifying
+// how source code is encrypted.
+//
+SH_IMPORT_EXPORT void ShSetEncryptionMethod(ShHandle);
+
+//
+// All the following return 0 if the information is not
+// available in the object passed down, or the object is bad.
+//
+SH_IMPORT_EXPORT const char* ShGetInfoLog(const ShHandle);
+SH_IMPORT_EXPORT const void* ShGetExecutable(const ShHandle);
+SH_IMPORT_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing
+SH_IMPORT_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings
+//
+// Tell the linker to never assign a vertex attribute to this list of physical attributes
+//
+SH_IMPORT_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count);
+
+//
+// Returns the location ID of the named uniform.
+// Returns -1 if error.
+//
+SH_IMPORT_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name);
+
+#ifdef __cplusplus
+ } // end extern "C"
+#endif
+
+////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Deferred-Lowering C++ Interface
+// -----------------------------------
+//
+// Below is a new alternate C++ interface, which deprecates the above
+// opaque handle-based interface.
+//
+// The below is further designed to handle multiple compilation units per stage, where
+// the intermediate results, including the parse tree, are preserved until link time,
+// rather than the above interface which is designed to have each compilation unit
+// lowered at compile time. In the above model, linking occurs on the lowered results,
+// whereas in this model intra-stage linking can occur at the parse tree
+// (treeRoot in TIntermediate) level, and then a full stage can be lowered.
+//
+
+#include <list>
+#include <string>
+#include <utility>
+
+class TCompiler;
+class TInfoSink;
+
+namespace glslang {
+
+const char* GetEsslVersionString();
+const char* GetGlslVersionString();
+int GetKhronosToolId();
+
+class TIntermediate;
+class TProgram;
+class TPoolAllocator;
+
+// Call this exactly once per process before using anything else
+bool InitializeProcess();
+
+// Call once per process to tear down everything
+void FinalizeProcess();
+
+// Resource type for IO resolver
+enum TResourceType {
+ EResSampler,
+ EResTexture,
+ EResImage,
+ EResUbo,
+ EResSsbo,
+ EResUav,
+ EResCount
+};
+
+// Make one TShader per shader that you will link into a program. Then
+// - provide the shader through setStrings() or setStringsWithLengths()
+// - optionally call setEnv*(), see below for more detail
+// - optionally use setPreamble() to set a special shader string that will be
+// processed before all others but won't affect the validity of #version
+// - call parse(): source language and target environment must be selected
+// either by correct setting of EShMessages sent to parse(), or by
+// explicitly calling setEnv*()
+// - query the info logs
+//
+// N.B.: Does not yet support having the same TShader instance being linked into
+// multiple programs.
+//
+// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
+//
+class TShader {
+public:
+ explicit TShader(EShLanguage);
+ virtual ~TShader();
+ void setStrings(const char* const* s, int n);
+ void setStringsWithLengths(const char* const* s, const int* l, int n);
+ void setStringsWithLengthsAndNames(
+ const char* const* s, const int* l, const char* const* names, int n);
+ void setPreamble(const char* s) { preamble = s; }
+ void setEntryPoint(const char* entryPoint);
+ void setSourceEntryPoint(const char* sourceEntryPointName);
+ void addProcesses(const std::vector<std::string>&);
+
+ // IO resolver binding data: see comments in ShaderLang.cpp
+ void setShiftBinding(TResourceType res, unsigned int base);
+ void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding
+ void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
+ void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set);
+ void setResourceSetBinding(const std::vector<std::string>& base);
+ void setAutoMapBindings(bool map);
+ void setAutoMapLocations(bool map);
+ void addUniformLocationOverride(const char* name, int loc);
+ void setUniformLocationBase(int base);
+ void setInvertY(bool invert);
+ void setHlslIoMapping(bool hlslIoMap);
+ void setFlattenUniformArrays(bool flatten);
+ void setNoStorageFormat(bool useUnknownFormat);
+ void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
+
+ // For setting up the environment (cleared to nothingness in the constructor).
+ // These must be called so that parsing is done for the right source language and
+ // target environment, either indirectly through TranslateEnvironment() based on
+ // EShMessages et. al., or directly by the user.
+ void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
+ {
+ environment.input.languageFamily = lang;
+ environment.input.stage = envStage;
+ environment.input.dialect = client;
+ environment.input.dialectVersion = version;
+ }
+ void setEnvClient(EShClient client, EShTargetClientVersion version)
+ {
+ environment.client.client = client;
+ environment.client.version = version;
+ }
+ void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version)
+ {
+ environment.target.language = lang;
+ environment.target.version = version;
+ }
+ void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
+ bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
+
+ // Interface to #include handlers.
+ //
+ // To support #include, a client of Glslang does the following:
+ // 1. Call setStringsWithNames to set the source strings and associated
+ // names. For example, the names could be the names of the files
+ // containing the shader sources.
+ // 2. Call parse with an Includer.
+ //
+ // When the Glslang parser encounters an #include directive, it calls
+ // the Includer's include method with the requested include name
+ // together with the current string name. The returned IncludeResult
+ // contains the fully resolved name of the included source, together
+ // with the source text that should replace the #include directive
+ // in the source stream. After parsing that source, Glslang will
+ // release the IncludeResult object.
+ class Includer {
+ public:
+ // An IncludeResult contains the resolved name and content of a source
+ // inclusion.
+ struct IncludeResult {
+ IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) :
+ headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { }
+ // For a successful inclusion, the fully resolved name of the requested
+ // include. For example, in a file system-based includer, full resolution
+ // should convert a relative path name into an absolute path name.
+ // For a failed inclusion, this is an empty string.
+ const std::string headerName;
+ // The content and byte length of the requested inclusion. The
+ // Includer producing this IncludeResult retains ownership of the
+ // storage.
+ // For a failed inclusion, the header
+ // field points to a string containing error details.
+ const char* const headerData;
+ const size_t headerLength;
+ // Include resolver's context.
+ void* userData;
+ protected:
+ IncludeResult& operator=(const IncludeResult&);
+ IncludeResult();
+ };
+
+ // For both include methods below:
+ //
+ // Resolves an inclusion request by name, current source name,
+ // and include depth.
+ // On success, returns an IncludeResult containing the resolved name
+ // and content of the include.
+ // On failure, returns a nullptr, or an IncludeResult
+ // with an empty string for the headerName and error details in the
+ // header field.
+ // The Includer retains ownership of the contents
+ // of the returned IncludeResult value, and those contents must
+ // remain valid until the releaseInclude method is called on that
+ // IncludeResult object.
+ //
+ // Note "local" vs. "system" is not an "either/or": "local" is an
+ // extra thing to do over "system". Both might get called, as per
+ // the C++ specification.
+
+ // For the "system" or <>-style includes; search the "system" paths.
+ virtual IncludeResult* includeSystem(const char* /*headerName*/,
+ const char* /*includerName*/,
+ size_t /*inclusionDepth*/) { return nullptr; }
+
+ // For the "local"-only aspect of a "" include. Should not search in the
+ // "system" paths, because on returning a failure, the parser will
+ // call includeSystem() to look in the "system" locations.
+ virtual IncludeResult* includeLocal(const char* /*headerName*/,
+ const char* /*includerName*/,
+ size_t /*inclusionDepth*/) { return nullptr; }
+
+ // Signals that the parser will no longer use the contents of the
+ // specified IncludeResult.
+ virtual void releaseInclude(IncludeResult*) = 0;
+ virtual ~Includer() {}
+ };
+
+ // Fail all Includer searches
+ class ForbidIncluder : public Includer {
+ public:
+ virtual void releaseInclude(IncludeResult*) override { }
+ };
+
+ bool parse(const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages, Includer&);
+
+ bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages messages)
+ {
+ TShader::ForbidIncluder includer;
+ return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer);
+ }
+
+ // Equivalent to parse() without a default profile and without forcing defaults.
+ bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages)
+ {
+ return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages);
+ }
+
+ bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages,
+ Includer& includer)
+ {
+ return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
+ }
+
+ // NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
+ // is not an officially supported or fully working path.
+ bool preprocess(const TBuiltInResource* builtInResources,
+ int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
+ bool forwardCompatible, EShMessages message, std::string* outputString,
+ Includer& includer);
+
+ const char* getInfoLog();
+ const char* getInfoDebugLog();
+ EShLanguage getStage() const { return stage; }
+ TIntermediate* getIntermediate() const { return intermediate; }
+
+protected:
+ TPoolAllocator* pool;
+ EShLanguage stage;
+ TCompiler* compiler;
+ TIntermediate* intermediate;
+ TInfoSink* infoSink;
+ // strings and lengths follow the standard for glShaderSource:
+ // strings is an array of numStrings pointers to string data.
+ // lengths can be null, but if not it is an array of numStrings
+ // integers containing the length of the associated strings.
+ // if lengths is null or lengths[n] < 0 the associated strings[n] is
+ // assumed to be null-terminated.
+ // stringNames is the optional names for all the strings. If stringNames
+ // is null, then none of the strings has name. If a certain element in
+ // stringNames is null, then the corresponding string does not have name.
+ const char* const* strings;
+ const int* lengths;
+ const char* const* stringNames;
+ const char* preamble;
+ int numStrings;
+
+ // a function in the source string can be renamed FROM this TO the name given in setEntryPoint.
+ std::string sourceEntryPointName;
+
+ TEnvironment environment;
+
+ friend class TProgram;
+
+private:
+ TShader& operator=(TShader&);
+};
+
+//
+// A reflection database and its interface, consistent with the OpenGL API reflection queries.
+//
+
+// Data needed for just a single object at the granularity exchanged by the reflection API
+class TObjectReflection {
+public:
+ TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
+
+ const TType* getType() const { return type; }
+ int getBinding() const;
+ void dump() const;
+ static TObjectReflection badReflection() { return TObjectReflection(); }
+
+ std::string name;
+ int offset;
+ int glDefineType;
+ int size; // data size in bytes for a block, array size for a (non-block) object that's an array
+ int index;
+ int counterIndex;
+ int numMembers;
+ int arrayStride; // stride of an array variable
+ int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
+ EShLanguageMask stages;
+
+protected:
+ TObjectReflection()
+ : offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
+ topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
+ {
+ }
+
+ const TType* type;
+};
+
+class TReflection;
+class TIoMapper;
+
+// Allows to customize the binding layout after linking.
+// All used uniform variables will invoke at least validateBinding.
+// If validateBinding returned true then the other resolveBinding,
+// resolveSet, and resolveLocation are invoked to resolve the binding
+// and descriptor set index respectively.
+//
+// Invocations happen in a particular order:
+// 1) all shader inputs
+// 2) all shader outputs
+// 3) all uniforms with binding and set already defined
+// 4) all uniforms with binding but no set defined
+// 5) all uniforms with set but no binding defined
+// 6) all uniforms with no binding and no set defined
+//
+// mapIO will use this resolver in two phases. The first
+// phase is a notification phase, calling the corresponging
+// notifiy callbacks, this phase ends with a call to endNotifications.
+// Phase two starts directly after the call to endNotifications
+// and calls all other callbacks to validate and to get the
+// bindings, sets, locations, component and color indices.
+//
+// NOTE: that still limit checks are applied to bindings and sets
+// and may result in an error.
+class TIoMapResolver
+{
+public:
+ virtual ~TIoMapResolver() {}
+
+ // Should return true if the resulting/current binding would be okay.
+ // Basic idea is to do aliasing binding checks with this.
+ virtual bool validateBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current binding should be overridden.
+ // Return -1 if the current binding (including no binding) should be kept.
+ virtual int resolveBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current set should be overridden.
+ // Return -1 if the current set (including no set) should be kept.
+ virtual int resolveSet(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current location should be overridden.
+ // Return -1 if the current location (including no location) should be kept.
+ virtual int resolveUniformLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return true if the resulting/current setup would be okay.
+ // Basic idea is to do aliasing checks and reject invalid semantic names.
+ virtual bool validateInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current location should be overridden.
+ // Return -1 if the current location (including no location) should be kept.
+ virtual int resolveInOutLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current component index should be overridden.
+ // Return -1 if the current component index (including no index) should be kept.
+ virtual int resolveInOutComponent(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Should return a value >= 0 if the current color index should be overridden.
+ // Return -1 if the current color index (including no index) should be kept.
+ virtual int resolveInOutIndex(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Notification of a uniform variable
+ virtual void notifyBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Notification of a in or out variable
+ virtual void notifyInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
+ // Called by mapIO when it has finished the notify pass
+ virtual void endNotifications(EShLanguage stage) = 0;
+ // Called by mapIO when it starts its notify pass for the given stage
+ virtual void beginNotifications(EShLanguage stage) = 0;
+ // Called by mipIO when it starts its resolve pass for the given stage
+ virtual void beginResolve(EShLanguage stage) = 0;
+ // Called by mapIO when it has finished the resolve pass
+ virtual void endResolve(EShLanguage stage) = 0;
+};
+
+// Make one TProgram per set of shaders that will get linked together. Add all
+// the shaders that are to be linked together. After calling shader.parse()
+// for all shaders, call link().
+//
+// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
+//
+class TProgram {
+public:
+ TProgram();
+ virtual ~TProgram();
+ void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
+
+ // Link Validation interface
+ bool link(EShMessages);
+ const char* getInfoLog();
+ const char* getInfoDebugLog();
+
+ TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
+
+ // Reflection Interface
+
+ // call first, to do liveness analysis, index mapping, etc.; returns false on failure
+ bool buildReflection(int opts = EShReflectionDefault);
+
+ unsigned getLocalSize(int dim) const; // return dim'th local size
+ int getReflectionIndex(const char *name) const;
+
+ int getNumUniformVariables() const;
+ const TObjectReflection& getUniform(int index) const;
+ int getNumUniformBlocks() const;
+ const TObjectReflection& getUniformBlock(int index) const;
+ int getNumPipeInputs() const;
+ const TObjectReflection& getPipeInput(int index) const;
+ int getNumPipeOutputs() const;
+ const TObjectReflection& getPipeOutput(int index) const;
+ int getNumBufferVariables() const;
+ const TObjectReflection& getBufferVariable(int index) const;
+ int getNumBufferBlocks() const;
+ const TObjectReflection& getBufferBlock(int index) const;
+ int getNumAtomicCounters() const;
+ const TObjectReflection& getAtomicCounter(int index) const;
+
+ // Legacy Reflection Interface - expressed in terms of above interface
+
+ // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
+ int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
+
+ // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
+ int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
+
+ // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
+ int getNumLiveAttributes() const { return getNumPipeInputs(); }
+
+ // can be used for glGetUniformIndices()
+ int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
+
+ // can be used for "name" part of glGetActiveUniform()
+ const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
+
+ // returns the binding number
+ int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
+
+ // returns Shaders Stages where a Uniform is present
+ EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
+ int getUniformBlockIndex(int index) const { return getUniform(index).index; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
+ int getUniformType(int index) const { return getUniform(index).glDefineType; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
+ int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
+
+ // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
+ int getUniformArraySize(int index) const { return getUniform(index).size; }
+
+ // returns a TType*
+ const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
+
+ // can be used for glGetActiveUniformBlockName()
+ const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
+
+ // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
+ int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
+
+ // returns the block binding number
+ int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
+
+ // returns block index of associated counter.
+ int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
+
+ // returns a TType*
+ const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
+
+ // can be used for glGetActiveAttrib()
+ const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
+
+ // can be used for glGetActiveAttrib()
+ int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
+
+ // returns a TType*
+ const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
+
+ void dumpReflection();
+
+ // I/O mapping: apply base offsets and map live unbound variables
+ // If resolver is not provided it uses the previous approach
+ // and respects auto assignment and offsets.
+ bool mapIO(TIoMapResolver* resolver = NULL);
+
+protected:
+ bool linkStage(EShLanguage, EShMessages);
+
+ TPoolAllocator* pool;
+ std::list<TShader*> stages[EShLangCount];
+ TIntermediate* intermediate[EShLangCount];
+ bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
+ TInfoSink* infoSink;
+ TReflection* reflection;
+ TIoMapper* ioMapper;
+ bool linked;
+
+private:
+ TProgram(TProgram&);
+ TProgram& operator=(TProgram&);
+};
+
+} // end namespace glslang
+
+#endif // _COMPILER_INTERFACE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/updateGrammar b/src/3rdparty/glslang/glslang/updateGrammar
new file mode 100644
index 0000000..a546dd2
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/updateGrammar
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+bison --defines=MachineIndependent/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o MachineIndependent/glslang_tab.cpp