summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/glslang/glslang/Include
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/glslang/glslang/Include')
-rw-r--r--src/3rdparty/glslang/glslang/Include/BaseTypes.h545
-rw-r--r--src/3rdparty/glslang/glslang/Include/Common.h291
-rw-r--r--src/3rdparty/glslang/glslang/Include/ConstantUnion.h938
-rw-r--r--src/3rdparty/glslang/glslang/Include/InfoSink.h144
-rw-r--r--src/3rdparty/glslang/glslang/Include/InitializeGlobals.h44
-rw-r--r--src/3rdparty/glslang/glslang/Include/PoolAlloc.h317
-rw-r--r--src/3rdparty/glslang/glslang/Include/ResourceLimits.h149
-rw-r--r--src/3rdparty/glslang/glslang/Include/ShHandle.h176
-rw-r--r--src/3rdparty/glslang/glslang/Include/Types.h2276
-rw-r--r--src/3rdparty/glslang/glslang/Include/arrays.h341
-rw-r--r--src/3rdparty/glslang/glslang/Include/intermediate.h1730
-rw-r--r--src/3rdparty/glslang/glslang/Include/revision.h3
-rw-r--r--src/3rdparty/glslang/glslang/Include/revision.template13
13 files changed, 6967 insertions, 0 deletions
diff --git a/src/3rdparty/glslang/glslang/Include/BaseTypes.h b/src/3rdparty/glslang/glslang/Include/BaseTypes.h
new file mode 100644
index 0000000..1827c49
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/BaseTypes.h
@@ -0,0 +1,545 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _BASICTYPES_INCLUDED_
+#define _BASICTYPES_INCLUDED_
+
+namespace glslang {
+
+//
+// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
+//
+enum TBasicType {
+ EbtVoid,
+ EbtFloat,
+ EbtDouble,
+ EbtFloat16,
+ EbtInt8,
+ EbtUint8,
+ EbtInt16,
+ EbtUint16,
+ EbtInt,
+ EbtUint,
+ EbtInt64,
+ EbtUint64,
+ EbtBool,
+ EbtAtomicUint,
+ EbtSampler,
+ EbtStruct,
+ EbtBlock,
+
+#ifdef NV_EXTENSIONS
+ EbtAccStructNV,
+#endif
+
+ EbtReference,
+
+ // HLSL types that live only temporarily.
+ EbtString,
+
+ EbtNumTypes
+};
+
+//
+// Storage qualifiers. Should align with different kinds of storage or
+// resource or GLSL storage qualifier. Expansion is deprecated.
+//
+// N.B.: You probably DON'T want to add anything here, but rather just add it
+// to the built-in variables. See the comment above TBuiltInVariable.
+//
+// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
+// DO NOT follow the design pattern of, say EvqInstanceId, etc.
+//
+enum TStorageQualifier {
+ EvqTemporary, // For temporaries (within a function), read/write
+ EvqGlobal, // For globals read/write
+ EvqConst, // User-defined constant values, will be semantically constant and constant folded
+ EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqUniform, // read only, shared with app
+ EvqBuffer, // read/write, shared with app
+ EvqShared, // compute shader's read/write 'shared' qualifier
+
+#ifdef NV_EXTENSIONS
+ EvqPayloadNV,
+ EvqPayloadInNV,
+ EvqHitAttrNV,
+ EvqCallableDataNV,
+ EvqCallableDataInNV,
+#endif
+
+ // parameters
+ EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
+ EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
+ EvqInOut,
+ EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
+
+ // built-ins read by vertex shader
+ EvqVertexId,
+ EvqInstanceId,
+
+ // built-ins written by vertex shader
+ EvqPosition,
+ EvqPointSize,
+ EvqClipVertex,
+
+ // built-ins read by fragment shader
+ EvqFace,
+ EvqFragCoord,
+ EvqPointCoord,
+
+ // built-ins written by fragment shader
+ EvqFragColor,
+ EvqFragDepth,
+
+ // end of list
+ EvqLast
+};
+
+//
+// Subcategories of the TStorageQualifier, simply to give a direct mapping
+// between built-in variable names and an numerical value (the enum).
+//
+// For backward compatibility, there is some redundancy between the
+// TStorageQualifier and these. Existing members should both be maintained accurately.
+// However, any new built-in variable (and any existing non-redundant one)
+// must follow the pattern that the specific built-in is here, and only its
+// general qualifier is in TStorageQualifier.
+//
+// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
+// shows up as two different built-in variables in a single stage, but
+// only has a single enum in TBuiltInVariable, so both the
+// TStorageQualifier and the TBuitinVariable are needed to distinguish
+// between them.
+//
+enum TBuiltInVariable {
+ EbvNone,
+ EbvNumWorkGroups,
+ EbvWorkGroupSize,
+ EbvWorkGroupId,
+ EbvLocalInvocationId,
+ EbvGlobalInvocationId,
+ EbvLocalInvocationIndex,
+ EbvNumSubgroups,
+ EbvSubgroupID,
+ EbvSubGroupSize,
+ EbvSubGroupInvocation,
+ EbvSubGroupEqMask,
+ EbvSubGroupGeMask,
+ EbvSubGroupGtMask,
+ EbvSubGroupLeMask,
+ EbvSubGroupLtMask,
+ EbvSubgroupSize2,
+ EbvSubgroupInvocation2,
+ EbvSubgroupEqMask2,
+ EbvSubgroupGeMask2,
+ EbvSubgroupGtMask2,
+ EbvSubgroupLeMask2,
+ EbvSubgroupLtMask2,
+ EbvVertexId,
+ EbvInstanceId,
+ EbvVertexIndex,
+ EbvInstanceIndex,
+ EbvBaseVertex,
+ EbvBaseInstance,
+ EbvDrawId,
+ EbvPosition,
+ EbvPointSize,
+ EbvClipVertex,
+ EbvClipDistance,
+ EbvCullDistance,
+ EbvNormal,
+ EbvVertex,
+ EbvMultiTexCoord0,
+ EbvMultiTexCoord1,
+ EbvMultiTexCoord2,
+ EbvMultiTexCoord3,
+ EbvMultiTexCoord4,
+ EbvMultiTexCoord5,
+ EbvMultiTexCoord6,
+ EbvMultiTexCoord7,
+ EbvFrontColor,
+ EbvBackColor,
+ EbvFrontSecondaryColor,
+ EbvBackSecondaryColor,
+ EbvTexCoord,
+ EbvFogFragCoord,
+ EbvInvocationId,
+ EbvPrimitiveId,
+ EbvLayer,
+ EbvViewportIndex,
+ EbvPatchVertices,
+ EbvTessLevelOuter,
+ EbvTessLevelInner,
+ EbvBoundingBox,
+ EbvTessCoord,
+ EbvColor,
+ EbvSecondaryColor,
+ EbvFace,
+ EbvFragCoord,
+ EbvPointCoord,
+ EbvFragColor,
+ EbvFragData,
+ EbvFragDepth,
+ EbvFragStencilRef,
+ EbvSampleId,
+ EbvSamplePosition,
+ EbvSampleMask,
+ EbvHelperInvocation,
+
+#ifdef AMD_EXTENSIONS
+ EbvBaryCoordNoPersp,
+ EbvBaryCoordNoPerspCentroid,
+ EbvBaryCoordNoPerspSample,
+ EbvBaryCoordSmooth,
+ EbvBaryCoordSmoothCentroid,
+ EbvBaryCoordSmoothSample,
+ EbvBaryCoordPullModel,
+#endif
+
+ EbvViewIndex,
+ EbvDeviceIndex,
+
+ EbvFragSizeEXT,
+ EbvFragInvocationCountEXT,
+
+#ifdef NV_EXTENSIONS
+ EbvViewportMaskNV,
+ EbvSecondaryPositionNV,
+ EbvSecondaryViewportMaskNV,
+ EbvPositionPerViewNV,
+ EbvViewportMaskPerViewNV,
+ EbvFragFullyCoveredNV,
+ EbvFragmentSizeNV,
+ EbvInvocationsPerPixelNV,
+ // raytracing
+ EbvLaunchIdNV,
+ EbvLaunchSizeNV,
+ EbvInstanceCustomIndexNV,
+ EbvWorldRayOriginNV,
+ EbvWorldRayDirectionNV,
+ EbvObjectRayOriginNV,
+ EbvObjectRayDirectionNV,
+ EbvRayTminNV,
+ EbvRayTmaxNV,
+ EbvHitTNV,
+ EbvHitKindNV,
+ EbvObjectToWorldNV,
+ EbvWorldToObjectNV,
+ EbvIncomingRayFlagsNV,
+ EbvBaryCoordNV,
+ EbvBaryCoordNoPerspNV,
+ EbvTaskCountNV,
+ EbvPrimitiveCountNV,
+ EbvPrimitiveIndicesNV,
+ EbvClipDistancePerViewNV,
+ EbvCullDistancePerViewNV,
+ EbvLayerPerViewNV,
+ EbvMeshViewCountNV,
+ EbvMeshViewIndicesNV,
+#endif
+
+ // HLSL built-ins that live only temporarily, until they get remapped
+ // to one of the above.
+ EbvFragDepthGreater,
+ EbvFragDepthLesser,
+ EbvGsOutputStream,
+ EbvOutputPatch,
+ EbvInputPatch,
+
+ // structbuffer types
+ EbvAppendConsume, // no need to differentiate append and consume
+ EbvRWStructuredBuffer,
+ EbvStructuredBuffer,
+ EbvByteAddressBuffer,
+ EbvRWByteAddressBuffer,
+
+ EbvLast
+};
+
+// These will show up in error messages
+__inline const char* GetStorageQualifierString(TStorageQualifier q)
+{
+ switch (q) {
+ case EvqTemporary: return "temp"; break;
+ case EvqGlobal: return "global"; break;
+ case EvqConst: return "const"; break;
+ case EvqConstReadOnly: return "const (read only)"; break;
+ case EvqVaryingIn: return "in"; break;
+ case EvqVaryingOut: return "out"; break;
+ case EvqUniform: return "uniform"; break;
+ case EvqBuffer: return "buffer"; break;
+ case EvqShared: return "shared"; break;
+ case EvqIn: return "in"; break;
+ case EvqOut: return "out"; break;
+ case EvqInOut: return "inout"; break;
+ case EvqVertexId: return "gl_VertexId"; break;
+ case EvqInstanceId: return "gl_InstanceId"; break;
+ case EvqPosition: return "gl_Position"; break;
+ case EvqPointSize: return "gl_PointSize"; break;
+ case EvqClipVertex: return "gl_ClipVertex"; break;
+ case EvqFace: return "gl_FrontFacing"; break;
+ case EvqFragCoord: return "gl_FragCoord"; break;
+ case EvqPointCoord: return "gl_PointCoord"; break;
+ case EvqFragColor: return "fragColor"; break;
+ case EvqFragDepth: return "gl_FragDepth"; break;
+#ifdef NV_EXTENSIONS
+ case EvqPayloadNV: return "rayPayloadNV"; break;
+ case EvqPayloadInNV: return "rayPayloadInNV"; break;
+ case EvqHitAttrNV: return "hitAttributeNV"; break;
+ case EvqCallableDataNV: return "callableDataNV"; break;
+ case EvqCallableDataInNV: return "callableDataInNV"; break;
+#endif
+ default: return "unknown qualifier";
+ }
+}
+
+__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
+{
+ switch (v) {
+ case EbvNone: return "";
+ case EbvNumWorkGroups: return "NumWorkGroups";
+ case EbvWorkGroupSize: return "WorkGroupSize";
+ case EbvWorkGroupId: return "WorkGroupID";
+ case EbvLocalInvocationId: return "LocalInvocationID";
+ case EbvGlobalInvocationId: return "GlobalInvocationID";
+ case EbvLocalInvocationIndex: return "LocalInvocationIndex";
+ case EbvSubGroupSize: return "SubGroupSize";
+ case EbvSubGroupInvocation: return "SubGroupInvocation";
+ case EbvSubGroupEqMask: return "SubGroupEqMask";
+ case EbvSubGroupGeMask: return "SubGroupGeMask";
+ case EbvSubGroupGtMask: return "SubGroupGtMask";
+ case EbvSubGroupLeMask: return "SubGroupLeMask";
+ case EbvSubGroupLtMask: return "SubGroupLtMask";
+ case EbvVertexId: return "VertexId";
+ case EbvInstanceId: return "InstanceId";
+ case EbvVertexIndex: return "VertexIndex";
+ case EbvInstanceIndex: return "InstanceIndex";
+ case EbvBaseVertex: return "BaseVertex";
+ case EbvBaseInstance: return "BaseInstance";
+ case EbvDrawId: return "DrawId";
+ case EbvPosition: return "Position";
+ case EbvPointSize: return "PointSize";
+ case EbvClipVertex: return "ClipVertex";
+ case EbvClipDistance: return "ClipDistance";
+ case EbvCullDistance: return "CullDistance";
+ case EbvNormal: return "Normal";
+ case EbvVertex: return "Vertex";
+ case EbvMultiTexCoord0: return "MultiTexCoord0";
+ case EbvMultiTexCoord1: return "MultiTexCoord1";
+ case EbvMultiTexCoord2: return "MultiTexCoord2";
+ case EbvMultiTexCoord3: return "MultiTexCoord3";
+ case EbvMultiTexCoord4: return "MultiTexCoord4";
+ case EbvMultiTexCoord5: return "MultiTexCoord5";
+ case EbvMultiTexCoord6: return "MultiTexCoord6";
+ case EbvMultiTexCoord7: return "MultiTexCoord7";
+ case EbvFrontColor: return "FrontColor";
+ case EbvBackColor: return "BackColor";
+ case EbvFrontSecondaryColor: return "FrontSecondaryColor";
+ case EbvBackSecondaryColor: return "BackSecondaryColor";
+ case EbvTexCoord: return "TexCoord";
+ case EbvFogFragCoord: return "FogFragCoord";
+ case EbvInvocationId: return "InvocationID";
+ case EbvPrimitiveId: return "PrimitiveID";
+ case EbvLayer: return "Layer";
+ case EbvViewportIndex: return "ViewportIndex";
+ case EbvPatchVertices: return "PatchVertices";
+ case EbvTessLevelOuter: return "TessLevelOuter";
+ case EbvTessLevelInner: return "TessLevelInner";
+ case EbvBoundingBox: return "BoundingBox";
+ case EbvTessCoord: return "TessCoord";
+ case EbvColor: return "Color";
+ case EbvSecondaryColor: return "SecondaryColor";
+ case EbvFace: return "Face";
+ case EbvFragCoord: return "FragCoord";
+ case EbvPointCoord: return "PointCoord";
+ case EbvFragColor: return "FragColor";
+ case EbvFragData: return "FragData";
+ case EbvFragDepth: return "FragDepth";
+ case EbvFragStencilRef: return "FragStencilRef";
+ case EbvSampleId: return "SampleId";
+ case EbvSamplePosition: return "SamplePosition";
+ case EbvSampleMask: return "SampleMaskIn";
+ case EbvHelperInvocation: return "HelperInvocation";
+
+#ifdef AMD_EXTENSIONS
+ case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
+ case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
+ case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
+ case EbvBaryCoordSmooth: return "BaryCoordSmooth";
+ case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
+ case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
+ case EbvBaryCoordPullModel: return "BaryCoordPullModel";
+#endif
+
+ case EbvViewIndex: return "ViewIndex";
+ case EbvDeviceIndex: return "DeviceIndex";
+
+ case EbvFragSizeEXT: return "FragSizeEXT";
+ case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
+
+#ifdef NV_EXTENSIONS
+ case EbvViewportMaskNV: return "ViewportMaskNV";
+ case EbvSecondaryPositionNV: return "SecondaryPositionNV";
+ case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
+ case EbvPositionPerViewNV: return "PositionPerViewNV";
+ case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
+ case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
+ case EbvFragmentSizeNV: return "FragmentSizeNV";
+ case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
+ case EbvLaunchIdNV: return "LaunchIdNV";
+ case EbvLaunchSizeNV: return "LaunchSizeNV";
+ case EbvInstanceCustomIndexNV: return "InstanceCustomIndexNV";
+ case EbvWorldRayOriginNV: return "WorldRayOriginNV";
+ case EbvWorldRayDirectionNV: return "WorldRayDirectionNV";
+ case EbvObjectRayOriginNV: return "ObjectRayOriginNV";
+ case EbvObjectRayDirectionNV: return "ObjectRayDirectionNV";
+ case EbvRayTminNV: return "ObjectRayTminNV";
+ case EbvRayTmaxNV: return "ObjectRayTmaxNV";
+ case EbvHitTNV: return "HitTNV";
+ case EbvHitKindNV: return "HitKindNV";
+ case EbvIncomingRayFlagsNV: return "IncomingRayFlagsNV";
+ case EbvObjectToWorldNV: return "ObjectToWorldNV";
+ case EbvWorldToObjectNV: return "WorldToObjectNV";
+
+ case EbvBaryCoordNV: return "BaryCoordNV";
+ case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
+ case EbvTaskCountNV: return "TaskCountNV";
+ case EbvPrimitiveCountNV: return "PrimitiveCountNV";
+ case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
+ case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV";
+ case EbvCullDistancePerViewNV: return "CullDistancePerViewNV";
+ case EbvLayerPerViewNV: return "LayerPerViewNV";
+ case EbvMeshViewCountNV: return "MeshViewCountNV";
+ case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
+#endif
+ default: return "unknown built-in variable";
+ }
+}
+
+// In this enum, order matters; users can assume higher precision is a bigger value
+// and EpqNone is 0.
+enum TPrecisionQualifier {
+ EpqNone = 0,
+ EpqLow,
+ EpqMedium,
+ EpqHigh
+};
+
+__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
+{
+ switch (p) {
+ case EpqNone: return ""; break;
+ case EpqLow: return "lowp"; break;
+ case EpqMedium: return "mediump"; break;
+ case EpqHigh: return "highp"; break;
+ default: return "unknown precision qualifier";
+ }
+}
+
+__inline bool isTypeSignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtInt8:
+ case EbtInt16:
+ case EbtInt:
+ case EbtInt64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeUnsignedInt(TBasicType type)
+{
+ switch (type) {
+ case EbtUint8:
+ case EbtUint16:
+ case EbtUint:
+ case EbtUint64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline bool isTypeInt(TBasicType type)
+{
+ return isTypeSignedInt(type) || isTypeUnsignedInt(type);
+}
+
+__inline bool isTypeFloat(TBasicType type)
+{
+ switch (type) {
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+__inline int getTypeRank(TBasicType type) {
+ int res = -1;
+ switch(type) {
+ case EbtInt8:
+ case EbtUint8:
+ res = 0;
+ break;
+ case EbtInt16:
+ case EbtUint16:
+ res = 1;
+ break;
+ case EbtInt:
+ case EbtUint:
+ res = 2;
+ break;
+ case EbtInt64:
+ case EbtUint64:
+ res = 3;
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ return res;
+}
+
+} // end namespace glslang
+
+#endif // _BASICTYPES_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/Common.h b/src/3rdparty/glslang/glslang/Include/Common.h
new file mode 100644
index 0000000..98e5a1a
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/Common.h
@@ -0,0 +1,291 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _COMMON_INCLUDED_
+#define _COMMON_INCLUDED_
+
+
+#if defined(__ANDROID__) || _MSC_VER < 1700
+#include <sstream>
+namespace std {
+template<typename T>
+std::string to_string(const T& val) {
+ std::ostringstream os;
+ os << val;
+ return os.str();
+}
+}
+#endif
+
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+ #include <basetsd.h>
+ #ifndef snprintf
+ #define snprintf sprintf_s
+ #endif
+ #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
+#elif defined (solaris)
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <sys/int_types.h>
+ #define UINT_PTR uintptr_t
+#else
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <stdint.h>
+ #define UINT_PTR uintptr_t
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+ #include <stdlib.h>
+ inline long long int strtoll (const char* str, char** endptr, int base)
+ {
+ return _strtoi64(str, endptr, base);
+ }
+ inline unsigned long long int strtoull (const char* str, char** endptr, int base)
+ {
+ return _strtoui64(str, endptr, base);
+ }
+ inline long long int atoll (const char* str)
+ {
+ return strtoll(str, NULL, 10);
+ }
+#endif
+
+#if defined(_MSC_VER)
+#define strdup _strdup
+#endif
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4786) // Don't warn about too long identifiers
+ #pragma warning(disable : 4514) // unused inline method
+ #pragma warning(disable : 4201) // nameless union
+#endif
+
+#include <set>
+#include <unordered_set>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#include <list>
+#include <algorithm>
+#include <string>
+#include <cstdio>
+#include <cassert>
+
+#include "PoolAlloc.h"
+
+//
+// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
+//
+#define POOL_ALLOCATOR_NEW_DELETE(A) \
+ void* operator new(size_t s) { return (A).allocate(s); } \
+ void* operator new(size_t, void *_Where) { return (_Where); } \
+ void operator delete(void*) { } \
+ void operator delete(void *, void *) { } \
+ void* operator new[](size_t s) { return (A).allocate(s); } \
+ void* operator new[](size_t, void *_Where) { return (_Where); } \
+ void operator delete[](void*) { } \
+ void operator delete[](void *, void *) { }
+
+namespace glslang {
+
+ //
+ // Pool version of string.
+ //
+ typedef pool_allocator<char> TStringAllocator;
+ typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
+
+} // end namespace glslang
+
+// Repackage the std::hash for use by unordered map/set with a TString key.
+namespace std {
+
+ template<> struct hash<glslang::TString> {
+ std::size_t operator()(const glslang::TString& s) const
+ {
+ const unsigned _FNV_offset_basis = 2166136261U;
+ const unsigned _FNV_prime = 16777619U;
+ unsigned _Val = _FNV_offset_basis;
+ size_t _Count = s.size();
+ const char* _First = s.c_str();
+ for (size_t _Next = 0; _Next < _Count; ++_Next)
+ {
+ _Val ^= (unsigned)_First[_Next];
+ _Val *= _FNV_prime;
+ }
+
+ return _Val;
+ }
+ };
+}
+
+namespace glslang {
+
+inline TString* NewPoolTString(const char* s)
+{
+ void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
+ return new(memory) TString(s);
+}
+
+template<class T> inline T* NewPoolObject(T*)
+{
+ return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
+}
+
+template<class T> inline T* NewPoolObject(T, int instances)
+{
+ return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
+}
+
+//
+// Pool allocator versions of vectors, lists, and maps
+//
+template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
+ TVector() : std::vector<T, pool_allocator<T> >() {}
+ TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
+ TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
+ TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
+};
+
+template <class T> class TList : public std::list<T, pool_allocator<T> > {
+};
+
+template <class K, class D, class CMP = std::less<K> >
+class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
+};
+
+template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
+class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
+};
+
+//
+// Persistent string memory. Should only be used for strings that survive
+// across compiles/links.
+//
+typedef std::basic_string<char> TPersistString;
+
+//
+// templatized min and max functions.
+//
+template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
+template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
+
+//
+// Create a TString object from an integer.
+//
+#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
+inline const TString String(const int i, const int base = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+ _itoa_s(i, text, sizeof(text), base);
+ return text;
+}
+#else
+inline const TString String(const int i, const int /*base*/ = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+
+ // we assume base 10 for all cases
+ snprintf(text, sizeof(text), "%d", i);
+
+ return text;
+}
+#endif
+
+struct TSourceLoc {
+ void init()
+ {
+ name = nullptr; string = 0; line = 0; column = 0;
+ }
+ void init(int stringNum) { init(); string = stringNum; }
+ // Returns the name if it exists. Otherwise, returns the string number.
+ std::string getStringNameOrNum(bool quoteStringName = true) const
+ {
+ if (name != nullptr) {
+ TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name;
+ std::string ret_str(qstr.c_str());
+ return ret_str;
+ }
+ return std::to_string((long long)string);
+ }
+ const char* getFilename() const
+ {
+ if (name == nullptr)
+ return nullptr;
+ return name->c_str();
+ }
+ const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); }
+ TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr
+ int string;
+ int line;
+ int column;
+};
+
+class TPragmaTable : public TMap<TString, TString> {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+};
+
+const int MaxTokenLength = 1024;
+
+template <class T> bool IsPow2(T powerOf2)
+{
+ if (powerOf2 <= 0)
+ return false;
+
+ return (powerOf2 & (powerOf2 - 1)) == 0;
+}
+
+// Round number up to a multiple of the given powerOf2, which is not
+// a power, just a number that must be a power of 2.
+template <class T> void RoundToPow2(T& number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
+}
+
+template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ return ! (number & (powerOf2 - 1));
+}
+
+} // end namespace glslang
+
+#endif // _COMMON_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ConstantUnion.h b/src/3rdparty/glslang/glslang/Include/ConstantUnion.h
new file mode 100644
index 0000000..3e93340
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ConstantUnion.h
@@ -0,0 +1,938 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _CONSTANT_UNION_INCLUDED_
+#define _CONSTANT_UNION_INCLUDED_
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+
+namespace glslang {
+
+class TConstUnion {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnion() : iConst(0), type(EbtInt) { }
+
+ void setI8Const(signed char i)
+ {
+ i8Const = i;
+ type = EbtInt8;
+ }
+
+ void setU8Const(unsigned char u)
+ {
+ u8Const = u;
+ type = EbtUint8;
+ }
+
+ void setI16Const(signed short i)
+ {
+ i16Const = i;
+ type = EbtInt16;
+ }
+
+ void setU16Const(unsigned short u)
+ {
+ u16Const = u;
+ type = EbtUint16;
+ }
+
+ void setIConst(int i)
+ {
+ iConst = i;
+ type = EbtInt;
+ }
+
+ void setUConst(unsigned int u)
+ {
+ uConst = u;
+ type = EbtUint;
+ }
+
+ void setI64Const(long long i64)
+ {
+ i64Const = i64;
+ type = EbtInt64;
+ }
+
+ void setU64Const(unsigned long long u64)
+ {
+ u64Const = u64;
+ type = EbtUint64;
+ }
+
+ void setDConst(double d)
+ {
+ dConst = d;
+ type = EbtDouble;
+ }
+
+ void setBConst(bool b)
+ {
+ bConst = b;
+ type = EbtBool;
+ }
+
+ void setSConst(const TString* s)
+ {
+ sConst = s;
+ type = EbtString;
+ }
+
+ signed char getI8Const() const { return i8Const; }
+ unsigned char getU8Const() const { return u8Const; }
+ signed short getI16Const() const { return i16Const; }
+ unsigned short getU16Const() const { return u16Const; }
+ int getIConst() const { return iConst; }
+ unsigned int getUConst() const { return uConst; }
+ long long getI64Const() const { return i64Const; }
+ unsigned long long getU64Const() const { return u64Const; }
+ double getDConst() const { return dConst; }
+ bool getBConst() const { return bConst; }
+ const TString* getSConst() const { return sConst; }
+
+ bool operator==(const signed char i) const
+ {
+ if (i == i8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned char u) const
+ {
+ if (u == u8Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const signed short i) const
+ {
+ if (i == i16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned short u) const
+ {
+ if (u == u16Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const int i) const
+ {
+ if (i == iConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned int u) const
+ {
+ if (u == uConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const long long i64) const
+ {
+ if (i64 == i64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned long long u64) const
+ {
+ if (u64 == u64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const double d) const
+ {
+ if (d == dConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const bool b) const
+ {
+ if (b == bConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const TConstUnion& constant) const
+ {
+ if (constant.type != type)
+ return false;
+
+ switch (type) {
+ case EbtInt16:
+ if (constant.i16Const == i16Const)
+ return true;
+
+ break;
+ case EbtUint16:
+ if (constant.u16Const == u16Const)
+ return true;
+
+ break;
+ case EbtInt8:
+ if (constant.i8Const == i8Const)
+ return true;
+
+ break;
+ case EbtUint8:
+ if (constant.u8Const == u8Const)
+ return true;
+
+ break;
+ case EbtInt:
+ if (constant.iConst == iConst)
+ return true;
+
+ break;
+ case EbtUint:
+ if (constant.uConst == uConst)
+ return true;
+
+ break;
+ case EbtInt64:
+ if (constant.i64Const == i64Const)
+ return true;
+
+ break;
+ case EbtUint64:
+ if (constant.u64Const == u64Const)
+ return true;
+
+ break;
+ case EbtDouble:
+ if (constant.dConst == dConst)
+ return true;
+
+ break;
+ case EbtBool:
+ if (constant.bConst == bConst)
+ return true;
+
+ break;
+ default:
+ assert(false && "Default missing");
+ }
+
+ return false;
+ }
+
+ bool operator!=(const signed char i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned char u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const signed short i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned short u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const int i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned int u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const long long i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned long long u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const float f) const
+ {
+ return !operator==(f);
+ }
+
+ bool operator!=(const bool b) const
+ {
+ return !operator==(b);
+ }
+
+ bool operator!=(const TConstUnion& constant) const
+ {
+ return !operator==(constant);
+ }
+
+ bool operator>(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const > constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const > constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const > constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const > constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst > constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst > constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const > constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const > constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst > constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ bool operator<(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8:
+ if (i8Const < constant.i8Const)
+ return true;
+
+ return false;
+ case EbtUint8:
+ if (u8Const < constant.u8Const)
+ return true;
+
+ return false;
+ case EbtInt16:
+ if (i16Const < constant.i16Const)
+ return true;
+
+ return false;
+ case EbtUint16:
+ if (u16Const < constant.u16Const)
+ return true;
+
+ return false;
+ case EbtInt:
+ if (iConst < constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst < constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const < constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const < constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst < constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ TConstUnion operator+(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator-(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator*(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator%(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
+ case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
+ case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator>>(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator<<(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break;
+ case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint8:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break;
+ case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break;
+ case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint16:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break;
+ case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
+ case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
+ case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
+ case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
+ case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break;
+ case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break;
+ case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break;
+ case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break;
+ case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator|(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator^(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
+ case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
+ case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
+ case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
+ case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator~() const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt8: returnValue.setI8Const(~i8Const); break;
+ case EbtUint8: returnValue.setU8Const(~u8Const); break;
+ case EbtInt16: returnValue.setI16Const(~i16Const); break;
+ case EbtUint16: returnValue.setU16Const(~u16Const); break;
+ case EbtInt: returnValue.setIConst(~iConst); break;
+ case EbtUint: returnValue.setUConst(~uConst); break;
+ case EbtInt64: returnValue.setI64Const(~i64Const); break;
+ case EbtUint64: returnValue.setU64Const(~u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator||(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TBasicType getType() const { return type; }
+
+private:
+ union {
+ signed char i8Const; // used for i8vec, scalar int8s
+ unsigned char u8Const; // used for u8vec, scalar uint8s
+ signed short i16Const; // used for i16vec, scalar int16s
+ unsigned short u16Const; // used for u16vec, scalar uint16s
+ int iConst; // used for ivec, scalar ints
+ unsigned int uConst; // used for uvec, scalar uints
+ long long i64Const; // used for i64vec, scalar int64s
+ unsigned long long u64Const; // used for u64vec, scalar uint64s
+ bool bConst; // used for bvec, scalar bools
+ double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
+ const TString* sConst; // string constant
+ };
+
+ TBasicType type;
+};
+
+// Encapsulate having a pointer to an array of TConstUnion,
+// which only needs to be allocated if its size is going to be
+// bigger than 0.
+//
+// One convenience is being able to use [] to go inside the array, instead
+// of C++ assuming it as an array of pointers to vectors.
+//
+// General usage is that the size is known up front, and it is
+// created once with the proper size.
+//
+class TConstUnionArray {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnionArray() : unionArray(nullptr) { }
+ virtual ~TConstUnionArray() { }
+
+ explicit TConstUnionArray(int size)
+ {
+ if (size == 0)
+ unionArray = nullptr;
+ else
+ unionArray = new TConstUnionVector(size);
+ }
+ TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
+ TConstUnionArray(const TConstUnionArray& a, int start, int size)
+ {
+ unionArray = new TConstUnionVector(size);
+ for (int i = 0; i < size; ++i)
+ (*unionArray)[i] = a[start + i];
+ }
+
+ // Use this constructor for a smear operation
+ TConstUnionArray(int size, const TConstUnion& val)
+ {
+ unionArray = new TConstUnionVector(size, val);
+ }
+
+ int size() const { return unionArray ? (int)unionArray->size() : 0; }
+ TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
+ const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
+ bool operator==(const TConstUnionArray& rhs) const
+ {
+ // this includes the case that both are unallocated
+ if (unionArray == rhs.unionArray)
+ return true;
+
+ if (! unionArray || ! rhs.unionArray)
+ return false;
+
+ return *unionArray == *rhs.unionArray;
+ }
+ bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
+
+ double dot(const TConstUnionArray& rhs)
+ {
+ assert(rhs.unionArray->size() == unionArray->size());
+ double sum = 0.0;
+
+ for (size_t comp = 0; comp < unionArray->size(); ++comp)
+ sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
+
+ return sum;
+ }
+
+ bool empty() const { return unionArray == nullptr; }
+
+protected:
+ typedef TVector<TConstUnion> TConstUnionVector;
+ TConstUnionVector* unionArray;
+};
+
+} // end namespace glslang
+
+#endif // _CONSTANT_UNION_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/InfoSink.h b/src/3rdparty/glslang/glslang/Include/InfoSink.h
new file mode 100644
index 0000000..dceb603
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/InfoSink.h
@@ -0,0 +1,144 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INFOSINK_INCLUDED_
+#define _INFOSINK_INCLUDED_
+
+#include "../Include/Common.h"
+#include <cmath>
+
+namespace glslang {
+
+//
+// TPrefixType is used to centralize how info log messages start.
+// See below.
+//
+enum TPrefixType {
+ EPrefixNone,
+ EPrefixWarning,
+ EPrefixError,
+ EPrefixInternalError,
+ EPrefixUnimplemented,
+ EPrefixNote
+};
+
+enum TOutputStream {
+ ENull = 0,
+ EDebugger = 0x01,
+ EStdOut = 0x02,
+ EString = 0x04,
+};
+//
+// Encapsulate info logs for all objects that have them.
+//
+// The methods are a general set of tools for getting a variety of
+// messages and types inserted into the log.
+//
+class TInfoSinkBase {
+public:
+ TInfoSinkBase() : outputStream(4) {}
+ void erase() { sink.erase(); }
+ TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
+ TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
+ TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
+ snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
+ append(buf);
+ return *this; }
+ TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
+ const char* c_str() const { return sink.c_str(); }
+ void prefix(TPrefixType message) {
+ switch(message) {
+ case EPrefixNone: break;
+ case EPrefixWarning: append("WARNING: "); break;
+ case EPrefixError: append("ERROR: "); break;
+ case EPrefixInternalError: append("INTERNAL ERROR: "); break;
+ case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
+ case EPrefixNote: append("NOTE: "); break;
+ default: append("UNKNOWN ERROR: "); break;
+ }
+ }
+ void location(const TSourceLoc& loc) {
+ const int maxSize = 24;
+ char locText[maxSize];
+ snprintf(locText, maxSize, ":%d", loc.line);
+ append(loc.getStringNameOrNum(false).c_str());
+ append(locText);
+ append(": ");
+ }
+ void message(TPrefixType message, const char* s) {
+ prefix(message);
+ append(s);
+ append("\n");
+ }
+ void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
+ prefix(message);
+ location(loc);
+ append(s);
+ append("\n");
+ }
+
+ void setOutputStream(int output = 4)
+ {
+ outputStream = output;
+ }
+
+protected:
+ void append(const char* s);
+
+ void append(int count, char c);
+ void append(const TPersistString& t);
+ void append(const TString& t);
+
+ void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
+ sink.reserve(sink.capacity() + sink.capacity() / 2); }
+ void appendToStream(const char* s);
+ TPersistString sink;
+ int outputStream;
+};
+
+} // end namespace glslang
+
+class TInfoSink {
+public:
+ glslang::TInfoSinkBase info;
+ glslang::TInfoSinkBase debug;
+};
+
+#endif // _INFOSINK_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h b/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h
new file mode 100644
index 0000000..95d0a40
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/InitializeGlobals.h
@@ -0,0 +1,44 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __INITIALIZE_GLOBALS_INCLUDED_
+#define __INITIALIZE_GLOBALS_INCLUDED_
+
+namespace glslang {
+
+bool InitializePoolIndex();
+
+} // end namespace glslang
+
+#endif // __INITIALIZE_GLOBALS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/PoolAlloc.h b/src/3rdparty/glslang/glslang/Include/PoolAlloc.h
new file mode 100644
index 0000000..0e237a6
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/PoolAlloc.h
@@ -0,0 +1,317 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _POOLALLOC_INCLUDED_
+#define _POOLALLOC_INCLUDED_
+
+#ifdef _DEBUG
+# define GUARD_BLOCKS // define to enable guard block sanity checking
+#endif
+
+//
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
+//
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+// STL containers can use this allocator by using the pool_allocator
+// class as the allocator (second) template argument.
+//
+
+#include <cstddef>
+#include <cstring>
+#include <vector>
+
+namespace glslang {
+
+// If we are using guard blocks, we must track each individual
+// allocation. If we aren't using guard blocks, these
+// never get instantiated, so won't have any impact.
+//
+
+class TAllocation {
+public:
+ TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
+ size(size), mem(mem), prevAlloc(prev) {
+ // Allocations are bracketed:
+ // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+ // This would be cleaner with if (guardBlockSize)..., but that
+ // makes the compiler print warnings about 0 length memsets,
+ // even with the if() protecting them.
+# ifdef GUARD_BLOCKS
+ memset(preGuard(), guardBlockBeginVal, guardBlockSize);
+ memset(data(), userDataFill, size);
+ memset(postGuard(), guardBlockEndVal, guardBlockSize);
+# endif
+ }
+
+ void check() const {
+ checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), guardBlockEndVal, "after");
+ }
+
+ void checkAllocList() const;
+
+ // Return total size needed to accommodate user buffer of 'size',
+ // plus our tracking data.
+ inline static size_t allocationSize(size_t size) {
+ return size + 2 * guardBlockSize + headerSize();
+ }
+
+ // Offset from surrounding buffer to get to user data buffer.
+ inline static unsigned char* offsetAllocation(unsigned char* m) {
+ return m + guardBlockSize + headerSize();
+ }
+
+private:
+ void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char* preGuard() const { return mem + headerSize(); }
+ unsigned char* data() const { return preGuard() + guardBlockSize; }
+ unsigned char* postGuard() const { return data() + size; }
+
+ size_t size; // size of the user data area
+ unsigned char* mem; // beginning of our allocation (pts to header)
+ TAllocation* prevAlloc; // prior allocation in the chain
+
+ const static unsigned char guardBlockBeginVal;
+ const static unsigned char guardBlockEndVal;
+ const static unsigned char userDataFill;
+
+ const static size_t guardBlockSize;
+# ifdef GUARD_BLOCKS
+ inline static size_t headerSize() { return sizeof(TAllocation); }
+# else
+ inline static size_t headerSize() { return 0; }
+# endif
+};
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class TPoolAllocator {
+public:
+ TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~TPoolAllocator();
+
+ //
+ // Call push() to establish a new place to pop memory too. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void* allocate(size_t numBytes);
+
+ //
+ // There is no deallocate. The point of this class is that
+ // deallocation can be skipped by the user of it, as the model
+ // of use is to simultaneously deallocate everything at once
+ // by calling pop(), and to not have to solve memory leak problems.
+ //
+
+protected:
+ friend struct tHeader;
+
+ struct tHeader {
+ tHeader(tHeader* nextPage, size_t pageCount) :
+#ifdef GUARD_BLOCKS
+ lastAllocation(0),
+#endif
+ nextPage(nextPage), pageCount(pageCount) { }
+
+ ~tHeader() {
+#ifdef GUARD_BLOCKS
+ if (lastAllocation)
+ lastAllocation->checkAllocList();
+#endif
+ }
+
+#ifdef GUARD_BLOCKS
+ TAllocation* lastAllocation;
+#endif
+ tHeader* nextPage;
+ size_t pageCount;
+ };
+
+ struct tAllocState {
+ size_t offset;
+ tHeader* page;
+ };
+ typedef std::vector<tAllocState> tAllocStack;
+
+ // Track allocations if and only if we're using guard blocks
+#ifndef GUARD_BLOCKS
+ void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
+#else
+ void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
+ new(memory) TAllocation(numBytes, memory, block->lastAllocation);
+ block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
+#endif
+
+ // This is optimized entirely away if GUARD_BLOCKS is not defined.
+ return TAllocation::offsetAllocation(memory);
+ }
+
+ size_t pageSize; // granularity of allocation from the OS
+ size_t alignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+ size_t alignmentMask;
+ size_t headerSkip; // amount of memory to skip to make room for the
+ // header (basically, size of header, rounded
+ // up to make it aligned
+ size_t currentPageOffset; // next offset in top of inUseList to allocate from
+ tHeader* freeList; // list of popped memory
+ tHeader* inUseList; // list of all memory currently being used
+ tAllocStack stack; // stack of where to allocate from, to partition pool
+
+ int numCalls; // just an interesting statistic
+ size_t totalBytes; // just an interesting statistic
+private:
+ TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
+ TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
+};
+
+//
+// There could potentially be many pools with pops happening at
+// different times. But a simple use is to have a global pop
+// with everyone using the same global allocator.
+//
+extern TPoolAllocator& GetThreadPoolAllocator();
+void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
+
+//
+// This STL compatible allocator is intended to be used as the allocator
+// parameter to templatized STL containers, like vector and map.
+//
+// It will use the pools for allocation, and not
+// do any deallocation, but will still do destruction.
+//
+template<class T>
+class pool_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template<class Other>
+ struct rebind {
+ typedef pool_allocator<Other> other;
+ };
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pool_allocator() : allocator(GetThreadPoolAllocator()) { }
+ pool_allocator(TPoolAllocator& a) : allocator(a) { }
+ pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
+
+ template<class Other>
+ pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
+
+ pointer allocate(size_type n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+ pointer allocate(size_type n, const void*) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+
+ void deallocate(void*, size_type) { }
+ void deallocate(pointer, size_type) { }
+
+ pointer _Charalloc(size_t n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
+
+ void construct(pointer p, const T& val) { new ((void *)p) T(val); }
+ void destroy(pointer p) { p->T::~T(); }
+
+ bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
+ bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
+
+ size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
+ size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
+
+ void setAllocator(TPoolAllocator* a) { allocator = *a; }
+ TPoolAllocator& getAllocator() const { return allocator; }
+
+protected:
+ pool_allocator& operator=(const pool_allocator&) { return *this; }
+ TPoolAllocator& allocator;
+};
+
+} // end namespace glslang
+
+#endif // _POOLALLOC_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ResourceLimits.h b/src/3rdparty/glslang/glslang/Include/ResourceLimits.h
new file mode 100644
index 0000000..106b21d
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ResourceLimits.h
@@ -0,0 +1,149 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _RESOURCE_LIMITS_INCLUDED_
+#define _RESOURCE_LIMITS_INCLUDED_
+
+struct TLimits {
+ bool nonInductiveForLoops;
+ bool whileLoops;
+ bool doWhileLoops;
+ bool generalUniformIndexing;
+ bool generalAttributeMatrixVectorIndexing;
+ bool generalVaryingIndexing;
+ bool generalSamplerIndexing;
+ bool generalVariableIndexing;
+ bool generalConstantMatrixVectorIndexing;
+};
+
+struct TBuiltInResource {
+ int maxLights;
+ int maxClipPlanes;
+ int maxTextureUnits;
+ int maxTextureCoords;
+ int maxVertexAttribs;
+ int maxVertexUniformComponents;
+ int maxVaryingFloats;
+ int maxVertexTextureImageUnits;
+ int maxCombinedTextureImageUnits;
+ int maxTextureImageUnits;
+ int maxFragmentUniformComponents;
+ int maxDrawBuffers;
+ int maxVertexUniformVectors;
+ int maxVaryingVectors;
+ int maxFragmentUniformVectors;
+ int maxVertexOutputVectors;
+ int maxFragmentInputVectors;
+ int minProgramTexelOffset;
+ int maxProgramTexelOffset;
+ int maxClipDistances;
+ int maxComputeWorkGroupCountX;
+ int maxComputeWorkGroupCountY;
+ int maxComputeWorkGroupCountZ;
+ int maxComputeWorkGroupSizeX;
+ int maxComputeWorkGroupSizeY;
+ int maxComputeWorkGroupSizeZ;
+ int maxComputeUniformComponents;
+ int maxComputeTextureImageUnits;
+ int maxComputeImageUniforms;
+ int maxComputeAtomicCounters;
+ int maxComputeAtomicCounterBuffers;
+ int maxVaryingComponents;
+ int maxVertexOutputComponents;
+ int maxGeometryInputComponents;
+ int maxGeometryOutputComponents;
+ int maxFragmentInputComponents;
+ int maxImageUnits;
+ int maxCombinedImageUnitsAndFragmentOutputs;
+ int maxCombinedShaderOutputResources;
+ int maxImageSamples;
+ int maxVertexImageUniforms;
+ int maxTessControlImageUniforms;
+ int maxTessEvaluationImageUniforms;
+ int maxGeometryImageUniforms;
+ int maxFragmentImageUniforms;
+ int maxCombinedImageUniforms;
+ int maxGeometryTextureImageUnits;
+ int maxGeometryOutputVertices;
+ int maxGeometryTotalOutputComponents;
+ int maxGeometryUniformComponents;
+ int maxGeometryVaryingComponents;
+ int maxTessControlInputComponents;
+ int maxTessControlOutputComponents;
+ int maxTessControlTextureImageUnits;
+ int maxTessControlUniformComponents;
+ int maxTessControlTotalOutputComponents;
+ int maxTessEvaluationInputComponents;
+ int maxTessEvaluationOutputComponents;
+ int maxTessEvaluationTextureImageUnits;
+ int maxTessEvaluationUniformComponents;
+ int maxTessPatchComponents;
+ int maxPatchVertices;
+ int maxTessGenLevel;
+ int maxViewports;
+ int maxVertexAtomicCounters;
+ int maxTessControlAtomicCounters;
+ int maxTessEvaluationAtomicCounters;
+ int maxGeometryAtomicCounters;
+ int maxFragmentAtomicCounters;
+ int maxCombinedAtomicCounters;
+ int maxAtomicCounterBindings;
+ int maxVertexAtomicCounterBuffers;
+ int maxTessControlAtomicCounterBuffers;
+ int maxTessEvaluationAtomicCounterBuffers;
+ int maxGeometryAtomicCounterBuffers;
+ int maxFragmentAtomicCounterBuffers;
+ int maxCombinedAtomicCounterBuffers;
+ int maxAtomicCounterBufferSize;
+ int maxTransformFeedbackBuffers;
+ int maxTransformFeedbackInterleavedComponents;
+ int maxCullDistances;
+ int maxCombinedClipAndCullDistances;
+ int maxSamples;
+ int maxMeshOutputVerticesNV;
+ int maxMeshOutputPrimitivesNV;
+ int maxMeshWorkGroupSizeX_NV;
+ int maxMeshWorkGroupSizeY_NV;
+ int maxMeshWorkGroupSizeZ_NV;
+ int maxTaskWorkGroupSizeX_NV;
+ int maxTaskWorkGroupSizeY_NV;
+ int maxTaskWorkGroupSizeZ_NV;
+ int maxMeshViewCountNV;
+
+ TLimits limits;
+};
+
+#endif // _RESOURCE_LIMITS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/ShHandle.h b/src/3rdparty/glslang/glslang/Include/ShHandle.h
new file mode 100644
index 0000000..df07bd8
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/ShHandle.h
@@ -0,0 +1,176 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SHHANDLE_INCLUDED_
+#define _SHHANDLE_INCLUDED_
+
+//
+// Machine independent part of the compiler private objects
+// sent as ShHandle to the driver.
+//
+// This should not be included by driver code.
+//
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "../MachineIndependent/Versions.h"
+#include "InfoSink.h"
+
+class TCompiler;
+class TLinker;
+class TUniformMap;
+
+//
+// The base class used to back handles returned to the driver.
+//
+class TShHandleBase {
+public:
+ TShHandleBase() { pool = new glslang::TPoolAllocator; }
+ virtual ~TShHandleBase() { delete pool; }
+ virtual TCompiler* getAsCompiler() { return 0; }
+ virtual TLinker* getAsLinker() { return 0; }
+ virtual TUniformMap* getAsUniformMap() { return 0; }
+ virtual glslang::TPoolAllocator* getPool() const { return pool; }
+private:
+ glslang::TPoolAllocator* pool;
+};
+
+//
+// The base class for the machine dependent linker to derive from
+// for managing where uniforms live.
+//
+class TUniformMap : public TShHandleBase {
+public:
+ TUniformMap() { }
+ virtual ~TUniformMap() { }
+ virtual TUniformMap* getAsUniformMap() { return this; }
+ virtual int getLocation(const char* name) = 0;
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink infoSink;
+};
+
+class TIntermNode;
+
+//
+// The base class for the machine dependent compiler to derive from
+// for managing object code from the compile.
+//
+class TCompiler : public TShHandleBase {
+public:
+ TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
+ virtual ~TCompiler() { }
+ EShLanguage getLanguage() { return language; }
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
+
+ virtual TCompiler* getAsCompiler() { return this; }
+ virtual bool linkable() { return haveValidObjectCode; }
+
+ TInfoSink& infoSink;
+protected:
+ TCompiler& operator=(TCompiler&);
+
+ EShLanguage language;
+ bool haveValidObjectCode;
+};
+
+//
+// Link operations are based on a list of compile results...
+//
+typedef glslang::TVector<TCompiler*> TCompilerList;
+typedef glslang::TVector<TShHandleBase*> THandleList;
+
+//
+// The base class for the machine dependent linker to derive from
+// to manage the resulting executable.
+//
+
+class TLinker : public TShHandleBase {
+public:
+ TLinker(EShExecutable e, TInfoSink& iSink) :
+ infoSink(iSink),
+ executable(e),
+ haveReturnableObjectCode(false),
+ appAttributeBindings(0),
+ fixedAttributeBindings(0),
+ excludedAttributes(0),
+ excludedCount(0),
+ uniformBindings(0) { }
+ virtual TLinker* getAsLinker() { return this; }
+ virtual ~TLinker() { }
+ virtual bool link(TCompilerList&, TUniformMap*) = 0;
+ virtual bool link(THandleList&) { return false; }
+ virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
+ virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
+ virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
+ virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
+ virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
+ virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink& infoSink;
+protected:
+ TLinker& operator=(TLinker&);
+ EShExecutable executable;
+ bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
+
+ const ShBindingTable* appAttributeBindings;
+ const ShBindingTable* fixedAttributeBindings;
+ const int* excludedAttributes;
+ int excludedCount;
+ ShBindingTable* uniformBindings; // created by the linker
+};
+
+//
+// This is the interface between the machine independent code
+// and the machine dependent code.
+//
+// The machine dependent code should derive from the classes
+// above. Then Construct*() and Delete*() will create and
+// destroy the machine dependent objects, which contain the
+// above machine independent information.
+//
+TCompiler* ConstructCompiler(EShLanguage, int);
+
+TShHandleBase* ConstructLinker(EShExecutable, int);
+TShHandleBase* ConstructBindings();
+void DeleteLinker(TShHandleBase*);
+void DeleteBindingList(TShHandleBase* bindingList);
+
+TUniformMap* ConstructUniformMap();
+void DeleteCompiler(TCompiler*);
+
+void DeleteUniformMap(TUniformMap*);
+
+#endif // _SHHANDLE_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/Types.h b/src/3rdparty/glslang/glslang/Include/Types.h
new file mode 100644
index 0000000..d0d9b60
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/Types.h
@@ -0,0 +1,2276 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2015-2016 Google, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _TYPES_INCLUDED
+#define _TYPES_INCLUDED
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+#include "../Public/ShaderLang.h"
+#include "arrays.h"
+
+#include <algorithm>
+
+namespace glslang {
+
+const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded
+
+const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed
+inline bool IsAnonymous(const TString& name)
+{
+ return name.compare(0, 5, AnonymousPrefix) == 0;
+}
+
+//
+// Details within a sampler type
+//
+enum TSamplerDim {
+ EsdNone,
+ Esd1D,
+ Esd2D,
+ Esd3D,
+ EsdCube,
+ EsdRect,
+ EsdBuffer,
+ EsdSubpass, // goes only with non-sampled image (image is true)
+ EsdNumDims
+};
+
+struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler
+ TBasicType type : 8; // type returned by sampler
+ TSamplerDim dim : 8;
+ bool arrayed : 1;
+ bool shadow : 1;
+ bool ms : 1;
+ bool image : 1; // image, combined should be false
+ bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler
+ bool sampler : 1; // true means a pure sampler, other fields should be clear()
+ bool external : 1; // GL_OES_EGL_image_external
+ bool yuv : 1; // GL_EXT_YUV_target
+ unsigned int vectorSize : 3; // vector return type size.
+
+ // Some languages support structures as sample results. Storing the whole structure in the
+ // TSampler is too large, so there is an index to a separate table.
+ static const unsigned structReturnIndexBits = 4; // number of index bits to use.
+ static const unsigned structReturnSlots = (1<<structReturnIndexBits)-1; // number of valid values
+ static const unsigned noReturnStruct = structReturnSlots; // value if no return struct type.
+
+ // Index into a language specific table of texture return structures.
+ unsigned int structReturnIndex : structReturnIndexBits;
+
+ // Encapsulate getting members' vector sizes packed into the vectorSize bitfield.
+ unsigned int getVectorSize() const { return vectorSize; }
+
+ bool isImage() const { return image && dim != EsdSubpass; }
+ bool isSubpass() const { return dim == EsdSubpass; }
+ bool isCombined() const { return combined; }
+ bool isPureSampler() const { return sampler; }
+ bool isTexture() const { return !sampler && !image; }
+ bool isShadow() const { return shadow; }
+ bool isArrayed() const { return arrayed; }
+ bool isMultiSample() const { return ms; }
+ bool hasReturnStruct() const { return structReturnIndex != noReturnStruct; }
+
+ void clear()
+ {
+ type = EbtVoid;
+ dim = EsdNone;
+ arrayed = false;
+ shadow = false;
+ ms = false;
+ image = false;
+ combined = false;
+ sampler = false;
+ external = false;
+ yuv = false;
+ structReturnIndex = noReturnStruct;
+
+ // by default, returns a single vec4;
+ vectorSize = 4;
+ }
+
+ // make a combined sampler and texture
+ void set(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ combined = true;
+ }
+
+ // make an image
+ void setImage(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ image = true;
+ }
+
+ // make a texture with no sampler
+ void setTexture(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ }
+
+ // make a subpass input attachment
+ void setSubpass(TBasicType t, bool m = false)
+ {
+ clear();
+ type = t;
+ image = true;
+ dim = EsdSubpass;
+ ms = m;
+ }
+
+ // make a pure sampler, no texture, no image, nothing combined, the 'sampler' keyword
+ void setPureSampler(bool s)
+ {
+ clear();
+ sampler = true;
+ shadow = s;
+ }
+
+ bool operator==(const TSampler& right) const
+ {
+ return type == right.type &&
+ dim == right.dim &&
+ arrayed == right.arrayed &&
+ shadow == right.shadow &&
+ ms == right.ms &&
+ image == right.image &&
+ combined == right.combined &&
+ sampler == right.sampler &&
+ external == right.external &&
+ yuv == right.yuv &&
+ vectorSize == right.vectorSize &&
+ structReturnIndex == right.structReturnIndex;
+ }
+
+ bool operator!=(const TSampler& right) const
+ {
+ return ! operator==(right);
+ }
+
+ TString getString() const
+ {
+ TString s;
+
+ if (sampler) {
+ s.append("sampler");
+ return s;
+ }
+
+ switch (type) {
+ case EbtFloat: break;
+#ifdef AMD_EXTENSIONS
+ case EbtFloat16: s.append("f16"); break;
+#endif
+ case EbtInt8: s.append("i8"); break;
+ case EbtUint16: s.append("u8"); break;
+ case EbtInt16: s.append("i16"); break;
+ case EbtUint8: s.append("u16"); break;
+ case EbtInt: s.append("i"); break;
+ case EbtUint: s.append("u"); break;
+ case EbtInt64: s.append("i64"); break;
+ case EbtUint64: s.append("u64"); break;
+ default: break; // some compilers want this
+ }
+ if (image) {
+ if (dim == EsdSubpass)
+ s.append("subpass");
+ else
+ s.append("image");
+ } else if (combined) {
+ s.append("sampler");
+ } else {
+ s.append("texture");
+ }
+ if (external) {
+ s.append("ExternalOES");
+ return s;
+ }
+ if (yuv) {
+ return "__" + s + "External2DY2YEXT";
+ }
+ switch (dim) {
+ case Esd1D: s.append("1D"); break;
+ case Esd2D: s.append("2D"); break;
+ case Esd3D: s.append("3D"); break;
+ case EsdCube: s.append("Cube"); break;
+ case EsdRect: s.append("2DRect"); break;
+ case EsdBuffer: s.append("Buffer"); break;
+ case EsdSubpass: s.append("Input"); break;
+ default: break; // some compilers want this
+ }
+ if (ms)
+ s.append("MS");
+ if (arrayed)
+ s.append("Array");
+ if (shadow)
+ s.append("Shadow");
+
+ return s;
+ }
+};
+
+//
+// Need to have association of line numbers to types in a list for building structs.
+//
+class TType;
+struct TTypeLoc {
+ TType* type;
+ TSourceLoc loc;
+};
+typedef TVector<TTypeLoc> TTypeList;
+
+typedef TVector<TString*> TIdentifierList;
+
+//
+// Following are a series of helper enums for managing layouts and qualifiers,
+// used for TPublicType, TType, others.
+//
+
+enum TLayoutPacking {
+ ElpNone,
+ ElpShared, // default, but different than saying nothing
+ ElpStd140,
+ ElpStd430,
+ ElpPacked,
+ ElpScalar,
+ ElpCount // If expanding, see bitfield width below
+};
+
+enum TLayoutMatrix {
+ ElmNone,
+ ElmRowMajor,
+ ElmColumnMajor, // default, but different than saying nothing
+ ElmCount // If expanding, see bitfield width below
+};
+
+// Union of geometry shader and tessellation shader geometry types.
+// They don't go into TType, but rather have current state per shader or
+// active parser type (TPublicType).
+enum TLayoutGeometry {
+ ElgNone,
+ ElgPoints,
+ ElgLines,
+ ElgLinesAdjacency,
+ ElgLineStrip,
+ ElgTriangles,
+ ElgTrianglesAdjacency,
+ ElgTriangleStrip,
+ ElgQuads,
+ ElgIsolines,
+};
+
+enum TVertexSpacing {
+ EvsNone,
+ EvsEqual,
+ EvsFractionalEven,
+ EvsFractionalOdd
+};
+
+enum TVertexOrder {
+ EvoNone,
+ EvoCw,
+ EvoCcw
+};
+
+// Note: order matters, as type of format is done by comparison.
+enum TLayoutFormat {
+ ElfNone,
+
+ // Float image
+ ElfRgba32f,
+ ElfRgba16f,
+ ElfR32f,
+ ElfRgba8,
+ ElfRgba8Snorm,
+
+ ElfEsFloatGuard, // to help with comparisons
+
+ ElfRg32f,
+ ElfRg16f,
+ ElfR11fG11fB10f,
+ ElfR16f,
+ ElfRgba16,
+ ElfRgb10A2,
+ ElfRg16,
+ ElfRg8,
+ ElfR16,
+ ElfR8,
+ ElfRgba16Snorm,
+ ElfRg16Snorm,
+ ElfRg8Snorm,
+ ElfR16Snorm,
+ ElfR8Snorm,
+
+ ElfFloatGuard, // to help with comparisons
+
+ // Int image
+ ElfRgba32i,
+ ElfRgba16i,
+ ElfRgba8i,
+ ElfR32i,
+
+ ElfEsIntGuard, // to help with comparisons
+
+ ElfRg32i,
+ ElfRg16i,
+ ElfRg8i,
+ ElfR16i,
+ ElfR8i,
+
+ ElfIntGuard, // to help with comparisons
+
+ // Uint image
+ ElfRgba32ui,
+ ElfRgba16ui,
+ ElfRgba8ui,
+ ElfR32ui,
+
+ ElfEsUintGuard, // to help with comparisons
+
+ ElfRg32ui,
+ ElfRg16ui,
+ ElfRgb10a2ui,
+ ElfRg8ui,
+ ElfR16ui,
+ ElfR8ui,
+
+ ElfCount
+};
+
+enum TLayoutDepth {
+ EldNone,
+ EldAny,
+ EldGreater,
+ EldLess,
+ EldUnchanged,
+
+ EldCount
+};
+
+enum TBlendEquationShift {
+ // No 'EBlendNone':
+ // These are used as bit-shift amounts. A mask of such shifts will have type 'int',
+ // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set.
+ EBlendMultiply,
+ EBlendScreen,
+ EBlendOverlay,
+ EBlendDarken,
+ EBlendLighten,
+ EBlendColordodge,
+ EBlendColorburn,
+ EBlendHardlight,
+ EBlendSoftlight,
+ EBlendDifference,
+ EBlendExclusion,
+ EBlendHslHue,
+ EBlendHslSaturation,
+ EBlendHslColor,
+ EBlendHslLuminosity,
+ EBlendAllEquations,
+
+ EBlendCount
+};
+
+class TQualifier {
+public:
+ static const int layoutNotSet = -1;
+
+ void clear()
+ {
+ precision = EpqNone;
+ invariant = false;
+ noContraction = false;
+ makeTemporary();
+ declaredBuiltIn = EbvNone;
+ }
+
+ // drop qualifiers that don't belong in a temporary variable
+ void makeTemporary()
+ {
+ semanticName = nullptr;
+ storage = EvqTemporary;
+ builtIn = EbvNone;
+ clearInterstage();
+ clearMemory();
+ specConstant = false;
+ nonUniform = false;
+ clearLayout();
+ }
+
+ void clearInterstage()
+ {
+ clearInterpolation();
+ patch = false;
+ sample = false;
+ }
+
+ void clearInterpolation()
+ {
+ centroid = false;
+ smooth = false;
+ flat = false;
+ nopersp = false;
+#ifdef AMD_EXTENSIONS
+ explicitInterp = false;
+#endif
+#ifdef NV_EXTENSIONS
+ pervertexNV = false;
+ perPrimitiveNV = false;
+ perViewNV = false;
+ perTaskNV = false;
+#endif
+ }
+
+ void clearMemory()
+ {
+ coherent = false;
+ devicecoherent = false;
+ queuefamilycoherent = false;
+ workgroupcoherent = false;
+ subgroupcoherent = false;
+ nonprivate = false;
+ volatil = false;
+ restrict = false;
+ readonly = false;
+ writeonly = false;
+ }
+
+ // Drop just the storage qualification, which perhaps should
+ // never be done, as it is fundamentally inconsistent, but need to
+ // explore what downstream consumers need.
+ // E.g., in a dereference, it is an inconsistency between:
+ // A) partially dereferenced resource is still in the storage class it started in
+ // B) partially dereferenced resource is a new temporary object
+ // If A, then nothing should change, if B, then everything should change, but this is half way.
+ void makePartialTemporary()
+ {
+ storage = EvqTemporary;
+ specConstant = false;
+ nonUniform = false;
+ }
+
+ const char* semanticName;
+ TStorageQualifier storage : 6;
+ TBuiltInVariable builtIn : 8;
+ TBuiltInVariable declaredBuiltIn : 8;
+ TPrecisionQualifier precision : 3;
+ bool invariant : 1; // require canonical treatment for cross-shader invariance
+ bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects
+ bool centroid : 1;
+ bool smooth : 1;
+ bool flat : 1;
+ bool nopersp : 1;
+#ifdef AMD_EXTENSIONS
+ bool explicitInterp : 1;
+#endif
+#ifdef NV_EXTENSIONS
+ bool pervertexNV : 1;
+ bool perPrimitiveNV : 1;
+ bool perViewNV : 1;
+ bool perTaskNV : 1;
+#endif
+ bool patch : 1;
+ bool sample : 1;
+ bool coherent : 1;
+ bool devicecoherent : 1;
+ bool queuefamilycoherent : 1;
+ bool workgroupcoherent : 1;
+ bool subgroupcoherent : 1;
+ bool nonprivate : 1;
+ bool volatil : 1;
+ bool restrict : 1;
+ bool readonly : 1;
+ bool writeonly : 1;
+ bool specConstant : 1; // having a constant_id is not sufficient: expressions have no id, but are still specConstant
+ bool nonUniform : 1;
+
+ bool isMemory() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
+ }
+ bool isMemoryQualifierImageAndSSBOOnly() const
+ {
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
+ }
+ bool bufferReferenceNeedsVulkanMemoryModel() const
+ {
+ // include qualifiers that map to load/store availability/visibility/nonprivate memory access operands
+ return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate;
+ }
+
+ bool isInterpolation() const
+ {
+#ifdef AMD_EXTENSIONS
+ return flat || smooth || nopersp || explicitInterp;
+#else
+ return flat || smooth || nopersp;
+#endif
+ }
+
+#ifdef AMD_EXTENSIONS
+ bool isExplicitInterpolation() const
+ {
+ return explicitInterp;
+ }
+#endif
+
+ bool isAuxiliary() const
+ {
+#ifdef NV_EXTENSIONS
+ return centroid || patch || sample || pervertexNV;
+#else
+ return centroid || patch || sample;
+#endif
+ }
+
+ bool isPipeInput() const
+ {
+ switch (storage) {
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPipeOutput() const
+ {
+ switch (storage) {
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamInput() const
+ {
+ switch (storage) {
+ case EvqIn:
+ case EvqInOut:
+ case EvqConstReadOnly:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamOutput() const
+ {
+ switch (storage) {
+ case EvqOut:
+ case EvqInOut:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isUniformOrBuffer() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPerPrimitive() const
+ {
+#ifdef NV_EXTENSIONS
+ return perPrimitiveNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isPerView() const
+ {
+#ifdef NV_EXTENSIONS
+ return perViewNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isTaskMemory() const
+ {
+#ifdef NV_EXTENSIONS
+ return perTaskNV;
+#else
+ return false;
+#endif
+ }
+
+ bool isIo() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // non-built-in symbols that might link between compilation units
+ bool isLinkable() const
+ {
+ switch (storage) {
+ case EvqGlobal:
+ case EvqVaryingIn:
+ case EvqVaryingOut:
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqShared:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // True if this type of IO is supposed to be arrayed with extra level for per-vertex data
+ bool isArrayedIo(EShLanguage language) const
+ {
+ switch (language) {
+ case EShLangGeometry:
+ return isPipeInput();
+ case EShLangTessControl:
+ return ! patch && (isPipeInput() || isPipeOutput());
+ case EShLangTessEvaluation:
+ return ! patch && isPipeInput();
+#ifdef NV_EXTENSIONS
+ case EShLangFragment:
+ return pervertexNV && isPipeInput();
+ case EShLangMeshNV:
+ return ! perTaskNV && isPipeOutput();
+#endif
+
+ default:
+ return false;
+ }
+ }
+
+ // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield
+ void clearLayout() // all layout
+ {
+ clearUniformLayout();
+
+ layoutPushConstant = false;
+ layoutBufferReference = false;
+#ifdef NV_EXTENSIONS
+ layoutPassthrough = false;
+ layoutViewportRelative = false;
+ // -2048 as the default value indicating layoutSecondaryViewportRelative is not set
+ layoutSecondaryViewportRelativeOffset = -2048;
+ layoutShaderRecordNV = false;
+#endif
+
+ layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd;
+
+ clearInterstageLayout();
+
+ layoutSpecConstantId = layoutSpecConstantIdEnd;
+
+ layoutFormat = ElfNone;
+ }
+ void clearInterstageLayout()
+ {
+ layoutLocation = layoutLocationEnd;
+ layoutComponent = layoutComponentEnd;
+ layoutIndex = layoutIndexEnd;
+ clearStreamLayout();
+ clearXfbLayout();
+ }
+ void clearStreamLayout()
+ {
+ layoutStream = layoutStreamEnd;
+ }
+ void clearXfbLayout()
+ {
+ layoutXfbBuffer = layoutXfbBufferEnd;
+ layoutXfbStride = layoutXfbStrideEnd;
+ layoutXfbOffset = layoutXfbOffsetEnd;
+ }
+
+ bool hasNonXfbLayout() const
+ {
+ return hasUniformLayout() ||
+ hasAnyLocation() ||
+ hasStream() ||
+ hasFormat() ||
+#ifdef NV_EXTENSIONS
+ layoutShaderRecordNV ||
+#endif
+ layoutPushConstant ||
+ layoutBufferReference;
+ }
+ bool hasLayout() const
+ {
+ return hasNonXfbLayout() ||
+ hasXfb();
+ }
+ TLayoutMatrix layoutMatrix : 3;
+ TLayoutPacking layoutPacking : 4;
+ int layoutOffset;
+ int layoutAlign;
+
+ unsigned int layoutLocation : 12;
+ static const unsigned int layoutLocationEnd = 0xFFF;
+
+ unsigned int layoutComponent : 3;
+ static const unsigned int layoutComponentEnd = 4;
+
+ unsigned int layoutSet : 7;
+ static const unsigned int layoutSetEnd = 0x3F;
+
+ unsigned int layoutBinding : 16;
+ static const unsigned int layoutBindingEnd = 0xFFFF;
+
+ unsigned int layoutIndex : 8;
+ static const unsigned int layoutIndexEnd = 0xFF;
+
+ unsigned int layoutStream : 8;
+ static const unsigned int layoutStreamEnd = 0xFF;
+
+ unsigned int layoutXfbBuffer : 4;
+ static const unsigned int layoutXfbBufferEnd = 0xF;
+
+ unsigned int layoutXfbStride : 14;
+ static const unsigned int layoutXfbStrideEnd = 0x3FFF;
+
+ unsigned int layoutXfbOffset : 13;
+ static const unsigned int layoutXfbOffsetEnd = 0x1FFF;
+
+ unsigned int layoutAttachment : 8; // for input_attachment_index
+ static const unsigned int layoutAttachmentEnd = 0XFF;
+
+ unsigned int layoutSpecConstantId : 11;
+ static const unsigned int layoutSpecConstantIdEnd = 0x7FF;
+
+ // stored as log2 of the actual alignment value
+ unsigned int layoutBufferReferenceAlign : 6;
+ static const unsigned int layoutBufferReferenceAlignEnd = 0x3F;
+
+ TLayoutFormat layoutFormat : 8;
+
+ bool layoutPushConstant;
+ bool layoutBufferReference;
+
+#ifdef NV_EXTENSIONS
+ bool layoutPassthrough;
+ bool layoutViewportRelative;
+ int layoutSecondaryViewportRelativeOffset;
+ bool layoutShaderRecordNV;
+#endif
+
+ bool hasUniformLayout() const
+ {
+ return hasMatrix() ||
+ hasPacking() ||
+ hasOffset() ||
+ hasBinding() ||
+ hasSet() ||
+ hasAlign();
+ }
+ void clearUniformLayout() // only uniform specific
+ {
+ layoutMatrix = ElmNone;
+ layoutPacking = ElpNone;
+ layoutOffset = layoutNotSet;
+ layoutAlign = layoutNotSet;
+
+ layoutSet = layoutSetEnd;
+ layoutBinding = layoutBindingEnd;
+ layoutAttachment = layoutAttachmentEnd;
+ }
+
+ bool hasMatrix() const
+ {
+ return layoutMatrix != ElmNone;
+ }
+ bool hasPacking() const
+ {
+ return layoutPacking != ElpNone;
+ }
+ bool hasOffset() const
+ {
+ return layoutOffset != layoutNotSet;
+ }
+ bool hasAlign() const
+ {
+ return layoutAlign != layoutNotSet;
+ }
+ bool hasAnyLocation() const
+ {
+ return hasLocation() ||
+ hasComponent() ||
+ hasIndex();
+ }
+ bool hasLocation() const
+ {
+ return layoutLocation != layoutLocationEnd;
+ }
+ bool hasComponent() const
+ {
+ return layoutComponent != layoutComponentEnd;
+ }
+ bool hasIndex() const
+ {
+ return layoutIndex != layoutIndexEnd;
+ }
+ bool hasSet() const
+ {
+ return layoutSet != layoutSetEnd;
+ }
+ bool hasBinding() const
+ {
+ return layoutBinding != layoutBindingEnd;
+ }
+ bool hasStream() const
+ {
+ return layoutStream != layoutStreamEnd;
+ }
+ bool hasFormat() const
+ {
+ return layoutFormat != ElfNone;
+ }
+ bool hasXfb() const
+ {
+ return hasXfbBuffer() ||
+ hasXfbStride() ||
+ hasXfbOffset();
+ }
+ bool hasXfbBuffer() const
+ {
+ return layoutXfbBuffer != layoutXfbBufferEnd;
+ }
+ bool hasXfbStride() const
+ {
+ return layoutXfbStride != layoutXfbStrideEnd;
+ }
+ bool hasXfbOffset() const
+ {
+ return layoutXfbOffset != layoutXfbOffsetEnd;
+ }
+ bool hasAttachment() const
+ {
+ return layoutAttachment != layoutAttachmentEnd;
+ }
+ bool hasSpecConstantId() const
+ {
+ // Not the same thing as being a specialization constant, this
+ // is just whether or not it was declared with an ID.
+ return layoutSpecConstantId != layoutSpecConstantIdEnd;
+ }
+ bool hasBufferReferenceAlign() const
+ {
+ return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd;
+ }
+ bool isSpecConstant() const
+ {
+ // True if type is a specialization constant, whether or not it
+ // had a specialization-constant ID, and false if it is not a
+ // true front-end constant.
+ return specConstant;
+ }
+ bool isNonUniform() const
+ {
+ return nonUniform;
+ }
+ bool isFrontEndConstant() const
+ {
+ // True if the front-end knows the final constant value.
+ // This allows front-end constant folding.
+ return storage == EvqConst && ! specConstant;
+ }
+ bool isConstant() const
+ {
+ // True if is either kind of constant; specialization or regular.
+ return isFrontEndConstant() || isSpecConstant();
+ }
+ void makeSpecConstant()
+ {
+ storage = EvqConst;
+ specConstant = true;
+ }
+ static const char* getLayoutPackingString(TLayoutPacking packing)
+ {
+ switch (packing) {
+ case ElpPacked: return "packed";
+ case ElpShared: return "shared";
+ case ElpStd140: return "std140";
+ case ElpStd430: return "std430";
+ case ElpScalar: return "scalar";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutMatrixString(TLayoutMatrix m)
+ {
+ switch (m) {
+ case ElmColumnMajor: return "column_major";
+ case ElmRowMajor: return "row_major";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutFormatString(TLayoutFormat f)
+ {
+ switch (f) {
+ case ElfRgba32f: return "rgba32f";
+ case ElfRgba16f: return "rgba16f";
+ case ElfRg32f: return "rg32f";
+ case ElfRg16f: return "rg16f";
+ case ElfR11fG11fB10f: return "r11f_g11f_b10f";
+ case ElfR32f: return "r32f";
+ case ElfR16f: return "r16f";
+ case ElfRgba16: return "rgba16";
+ case ElfRgb10A2: return "rgb10_a2";
+ case ElfRgba8: return "rgba8";
+ case ElfRg16: return "rg16";
+ case ElfRg8: return "rg8";
+ case ElfR16: return "r16";
+ case ElfR8: return "r8";
+ case ElfRgba16Snorm: return "rgba16_snorm";
+ case ElfRgba8Snorm: return "rgba8_snorm";
+ case ElfRg16Snorm: return "rg16_snorm";
+ case ElfRg8Snorm: return "rg8_snorm";
+ case ElfR16Snorm: return "r16_snorm";
+ case ElfR8Snorm: return "r8_snorm";
+
+ case ElfRgba32i: return "rgba32i";
+ case ElfRgba16i: return "rgba16i";
+ case ElfRgba8i: return "rgba8i";
+ case ElfRg32i: return "rg32i";
+ case ElfRg16i: return "rg16i";
+ case ElfRg8i: return "rg8i";
+ case ElfR32i: return "r32i";
+ case ElfR16i: return "r16i";
+ case ElfR8i: return "r8i";
+
+ case ElfRgba32ui: return "rgba32ui";
+ case ElfRgba16ui: return "rgba16ui";
+ case ElfRgba8ui: return "rgba8ui";
+ case ElfRg32ui: return "rg32ui";
+ case ElfRg16ui: return "rg16ui";
+ case ElfRgb10a2ui: return "rgb10_a2ui";
+ case ElfRg8ui: return "rg8ui";
+ case ElfR32ui: return "r32ui";
+ case ElfR16ui: return "r16ui";
+ case ElfR8ui: return "r8ui";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutDepthString(TLayoutDepth d)
+ {
+ switch (d) {
+ case EldAny: return "depth_any";
+ case EldGreater: return "depth_greater";
+ case EldLess: return "depth_less";
+ case EldUnchanged: return "depth_unchanged";
+ default: return "none";
+ }
+ }
+ static const char* getBlendEquationString(TBlendEquationShift e)
+ {
+ switch (e) {
+ case EBlendMultiply: return "blend_support_multiply";
+ case EBlendScreen: return "blend_support_screen";
+ case EBlendOverlay: return "blend_support_overlay";
+ case EBlendDarken: return "blend_support_darken";
+ case EBlendLighten: return "blend_support_lighten";
+ case EBlendColordodge: return "blend_support_colordodge";
+ case EBlendColorburn: return "blend_support_colorburn";
+ case EBlendHardlight: return "blend_support_hardlight";
+ case EBlendSoftlight: return "blend_support_softlight";
+ case EBlendDifference: return "blend_support_difference";
+ case EBlendExclusion: return "blend_support_exclusion";
+ case EBlendHslHue: return "blend_support_hsl_hue";
+ case EBlendHslSaturation: return "blend_support_hsl_saturation";
+ case EBlendHslColor: return "blend_support_hsl_color";
+ case EBlendHslLuminosity: return "blend_support_hsl_luminosity";
+ case EBlendAllEquations: return "blend_support_all_equations";
+ default: return "unknown";
+ }
+ }
+ static const char* getGeometryString(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return "points";
+ case ElgLines: return "lines";
+ case ElgLinesAdjacency: return "lines_adjacency";
+ case ElgLineStrip: return "line_strip";
+ case ElgTriangles: return "triangles";
+ case ElgTrianglesAdjacency: return "triangles_adjacency";
+ case ElgTriangleStrip: return "triangle_strip";
+ case ElgQuads: return "quads";
+ case ElgIsolines: return "isolines";
+ default: return "none";
+ }
+ }
+ static const char* getVertexSpacingString(TVertexSpacing spacing)
+ {
+ switch (spacing) {
+ case EvsEqual: return "equal_spacing";
+ case EvsFractionalEven: return "fractional_even_spacing";
+ case EvsFractionalOdd: return "fractional_odd_spacing";
+ default: return "none";
+ }
+ }
+ static const char* getVertexOrderString(TVertexOrder order)
+ {
+ switch (order) {
+ case EvoCw: return "cw";
+ case EvoCcw: return "ccw";
+ default: return "none";
+ }
+ }
+ static int mapGeometryToSize(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return 1;
+ case ElgLines: return 2;
+ case ElgLinesAdjacency: return 4;
+ case ElgTriangles: return 3;
+ case ElgTrianglesAdjacency: return 6;
+ default: return 0;
+ }
+ }
+};
+
+// Qualifiers that don't need to be keep per object. They have shader scope, not object scope.
+// So, they will not be part of TType, TQualifier, etc.
+struct TShaderQualifiers {
+ TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives
+ bool pixelCenterInteger; // fragment shader
+ bool originUpperLeft; // fragment shader
+ int invocations;
+ int vertices; // for tessellation "vertices", geometry & mesh "max_vertices"
+ TVertexSpacing spacing;
+ TVertexOrder order;
+ bool pointMode;
+ int localSize[3]; // compute shader
+ int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize
+ bool earlyFragmentTests; // fragment input
+ bool postDepthCoverage; // fragment input
+ TLayoutDepth layoutDepth;
+ bool blendEquation; // true if any blend equation was specified
+ int numViews; // multiview extenstions
+
+#ifdef NV_EXTENSIONS
+ bool layoutOverrideCoverage; // true if layout override_coverage set
+ bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set
+ bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set
+ int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set
+#endif
+
+ void init()
+ {
+ geometry = ElgNone;
+ originUpperLeft = false;
+ pixelCenterInteger = false;
+ invocations = TQualifier::layoutNotSet;
+ vertices = TQualifier::layoutNotSet;
+ spacing = EvsNone;
+ order = EvoNone;
+ pointMode = false;
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ earlyFragmentTests = false;
+ postDepthCoverage = false;
+ layoutDepth = EldNone;
+ blendEquation = false;
+ numViews = TQualifier::layoutNotSet;
+#ifdef NV_EXTENSIONS
+ layoutOverrideCoverage = false;
+ layoutDerivativeGroupQuads = false;
+ layoutDerivativeGroupLinear = false;
+ primitives = TQualifier::layoutNotSet;
+#endif
+ }
+
+ // Merge in characteristics from the 'src' qualifier. They can override when
+ // set, but never erase when not set.
+ void merge(const TShaderQualifiers& src)
+ {
+ if (src.geometry != ElgNone)
+ geometry = src.geometry;
+ if (src.pixelCenterInteger)
+ pixelCenterInteger = src.pixelCenterInteger;
+ if (src.originUpperLeft)
+ originUpperLeft = src.originUpperLeft;
+ if (src.invocations != TQualifier::layoutNotSet)
+ invocations = src.invocations;
+ if (src.vertices != TQualifier::layoutNotSet)
+ vertices = src.vertices;
+ if (src.spacing != EvsNone)
+ spacing = src.spacing;
+ if (src.order != EvoNone)
+ order = src.order;
+ if (src.pointMode)
+ pointMode = true;
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSize[i] > 1)
+ localSize[i] = src.localSize[i];
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = src.localSizeSpecId[i];
+ }
+ if (src.earlyFragmentTests)
+ earlyFragmentTests = true;
+ if (src.postDepthCoverage)
+ postDepthCoverage = true;
+ if (src.layoutDepth)
+ layoutDepth = src.layoutDepth;
+ if (src.blendEquation)
+ blendEquation = src.blendEquation;
+ if (src.numViews != TQualifier::layoutNotSet)
+ numViews = src.numViews;
+#ifdef NV_EXTENSIONS
+ if (src.layoutOverrideCoverage)
+ layoutOverrideCoverage = src.layoutOverrideCoverage;
+ if (src.layoutDerivativeGroupQuads)
+ layoutDerivativeGroupQuads = src.layoutDerivativeGroupQuads;
+ if (src.layoutDerivativeGroupLinear)
+ layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear;
+ if (src.primitives != TQualifier::layoutNotSet)
+ primitives = src.primitives;
+#endif
+ }
+};
+
+//
+// TPublicType is just temporarily used while parsing and not quite the same
+// information kept per node in TType. Due to the bison stack, it can't have
+// types that it thinks have non-trivial constructors. It should
+// just be used while recognizing the grammar, not anything else.
+// Once enough is known about the situation, the proper information
+// moved into a TType, or the parse context, etc.
+//
+class TPublicType {
+public:
+ TBasicType basicType;
+ TSampler sampler;
+ TQualifier qualifier;
+ TShaderQualifiers shaderQualifiers;
+ int vectorSize : 4;
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool coopmat : 1;
+ TArraySizes* arraySizes;
+ const TType* userDef;
+ TSourceLoc loc;
+ TArraySizes* typeParameters;
+
+ void initType(const TSourceLoc& l)
+ {
+ basicType = EbtVoid;
+ vectorSize = 1;
+ matrixRows = 0;
+ matrixCols = 0;
+ arraySizes = nullptr;
+ userDef = nullptr;
+ loc = l;
+ typeParameters = nullptr;
+ coopmat = false;
+ }
+
+ void initQualifiers(bool global = false)
+ {
+ qualifier.clear();
+ if (global)
+ qualifier.storage = EvqGlobal;
+ }
+
+ void init(const TSourceLoc& l, bool global = false)
+ {
+ initType(l);
+ sampler.clear();
+ initQualifiers(global);
+ shaderQualifiers.init();
+ }
+
+ void setVector(int s)
+ {
+ matrixRows = 0;
+ matrixCols = 0;
+ vectorSize = s;
+ }
+
+ void setMatrix(int c, int r)
+ {
+ matrixRows = r;
+ matrixCols = c;
+ vectorSize = 0;
+ }
+
+ bool isScalar() const
+ {
+ return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr;
+ }
+
+ // "Image" is a superset of "Subpass"
+ bool isImage() const { return basicType == EbtSampler && sampler.isImage(); }
+ bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); }
+};
+
+//
+// Base class for things that have a type.
+//
+class TType {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ // for "empty" type (no args) or simple scalar/vector/matrix
+ explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for explicit precision qualifier
+ TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ qualifier.precision = p;
+ assert(p >= EpqNone && p <= EpqHigh);
+ assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices
+ }
+ // for turning a TPublicType into a TType, using a shallow copy
+ explicit TType(const TPublicType& p) :
+ basicType(p.basicType),
+ vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat),
+ arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters)
+ {
+ if (basicType == EbtSampler)
+ sampler = p.sampler;
+ else
+ sampler.clear();
+ qualifier = p.qualifier;
+ if (p.userDef) {
+ if (p.userDef->basicType == EbtReference) {
+ basicType = EbtReference;
+ referentType = p.userDef->referentType;
+ } else {
+ structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues
+ }
+ typeName = NewPoolTString(p.userDef->getTypeName().c_str());
+ }
+ if (p.coopmat && p.basicType == EbtFloat &&
+ p.typeParameters && p.typeParameters->getNumDims() > 0 &&
+ p.typeParameters->getDimSize(0) == 16) {
+ basicType = EbtFloat16;
+ qualifier.precision = EpqNone;
+ }
+ }
+ // for construction of sampler types
+ TType(const TSampler& sampler, TStorageQualifier q = EvqUniform, TArraySizes* as = nullptr) :
+ basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr),
+ sampler(sampler), typeParameters(nullptr)
+ {
+ qualifier.clear();
+ qualifier.storage = q;
+ }
+ // to efficiently make a dereferenced type
+ // without ever duplicating the outer structure that will be thrown away
+ // and using only shallow copy
+ TType(const TType& type, int derefIndex, bool rowMajor = false)
+ {
+ if (type.isArray()) {
+ shallowCopy(type);
+ if (type.getArraySizes()->getNumDims() == 1) {
+ arraySizes = nullptr;
+ } else {
+ // want our own copy of the array, so we can edit it
+ arraySizes = new TArraySizes;
+ arraySizes->copyDereferenced(*type.arraySizes);
+ }
+ } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) {
+ // do a structure dereference
+ const TTypeList& memberList = *type.getStruct();
+ shallowCopy(*memberList[derefIndex].type);
+ return;
+ } else {
+ // do a vector/matrix dereference
+ shallowCopy(type);
+ if (matrixCols > 0) {
+ // dereference from matrix to vector
+ if (rowMajor)
+ vectorSize = matrixCols;
+ else
+ vectorSize = matrixRows;
+ matrixCols = 0;
+ matrixRows = 0;
+ if (vectorSize == 1)
+ vector1 = true;
+ } else if (isVector()) {
+ // dereference from vector to scalar
+ vectorSize = 1;
+ vector1 = false;
+ } else if (isCoopMat()) {
+ coopmat = false;
+ typeParameters = nullptr;
+ }
+ }
+ }
+ // for making structures, ...
+ TType(TTypeList* userDef, const TString& n) :
+ basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // For interface blocks
+ TType(TTypeList* userDef, const TString& n, const TQualifier& q) :
+ basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
+ qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
+ {
+ sampler.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // for block reference (first parameter must be EbtReference)
+ explicit TType(TBasicType t, const TType &p, const TString& n) :
+ basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ assert(t == EbtReference);
+ typeName = NewPoolTString(n.c_str());
+ qualifier.clear();
+ qualifier.storage = p.qualifier.storage;
+ referentType = p.clone();
+ }
+ virtual ~TType() {}
+
+ // Not for use across pool pops; it will cause multiple instances of TType to point to the same information.
+ // This only works if that information (like a structure's list of types) does not change and
+ // the instances are sharing the same pool.
+ void shallowCopy(const TType& copyOf)
+ {
+ basicType = copyOf.basicType;
+ sampler = copyOf.sampler;
+ qualifier = copyOf.qualifier;
+ vectorSize = copyOf.vectorSize;
+ matrixCols = copyOf.matrixCols;
+ matrixRows = copyOf.matrixRows;
+ vector1 = copyOf.vector1;
+ arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents
+ fieldName = copyOf.fieldName;
+ typeName = copyOf.typeName;
+ if (isStruct()) {
+ structure = copyOf.structure;
+ } else {
+ referentType = copyOf.referentType;
+ }
+ typeParameters = copyOf.typeParameters;
+ coopmat = copyOf.coopmat;
+ }
+
+ // Make complete copy of the whole type graph rooted at 'copyOf'.
+ void deepCopy(const TType& copyOf)
+ {
+ TMap<TTypeList*,TTypeList*> copied; // to enable copying a type graph as a graph, not a tree
+ deepCopy(copyOf, copied);
+ }
+
+ // Recursively make temporary
+ void makeTemporary()
+ {
+ getQualifier().makeTemporary();
+
+ if (isStruct())
+ for (unsigned int i = 0; i < structure->size(); ++i)
+ (*structure)[i].type->makeTemporary();
+ }
+
+ TType* clone() const
+ {
+ TType *newType = new TType();
+ newType->deepCopy(*this);
+
+ return newType;
+ }
+
+ void makeVector() { vector1 = true; }
+
+ virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; }
+ virtual bool hiddenMember() const { return basicType == EbtVoid; }
+
+ virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); }
+ virtual const TString& getTypeName() const
+ {
+ assert(typeName);
+ return *typeName;
+ }
+
+ virtual const TString& getFieldName() const
+ {
+ assert(fieldName);
+ return *fieldName;
+ }
+
+ virtual TBasicType getBasicType() const { return basicType; }
+ virtual const TSampler& getSampler() const { return sampler; }
+ virtual TSampler& getSampler() { return sampler; }
+
+ virtual TQualifier& getQualifier() { return qualifier; }
+ virtual const TQualifier& getQualifier() const { return qualifier; }
+
+ virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both
+ virtual int getMatrixCols() const { return matrixCols; }
+ virtual int getMatrixRows() const { return matrixRows; }
+ virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); }
+ virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); }
+ virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); }
+ virtual bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; }
+ virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); }
+ virtual const TArraySizes* getArraySizes() const { return arraySizes; }
+ virtual TArraySizes* getArraySizes() { return arraySizes; }
+ virtual TType* getReferentType() const { return referentType; }
+ virtual const TArraySizes* getTypeParameters() const { return typeParameters; }
+ virtual TArraySizes* getTypeParameters() { return typeParameters; }
+
+ virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); }
+ virtual bool isScalarOrVec1() const { return isScalar() || vector1; }
+ virtual bool isVector() const { return vectorSize > 1 || vector1; }
+ virtual bool isMatrix() const { return matrixCols ? true : false; }
+ virtual bool isArray() const { return arraySizes != nullptr; }
+ virtual bool isSizedArray() const { return isArray() && arraySizes->isSized(); }
+ virtual bool isUnsizedArray() const { return isArray() && !arraySizes->isSized(); }
+ virtual bool isArrayVariablyIndexed() const { assert(isArray()); return arraySizes->isVariablyIndexed(); }
+ virtual void setArrayVariablyIndexed() { assert(isArray()); arraySizes->setVariablyIndexed(); }
+ virtual void updateImplicitArraySize(int size) { assert(isArray()); arraySizes->updateImplicitSize(size); }
+ virtual bool isStruct() const { return basicType == EbtStruct || basicType == EbtBlock; }
+ virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; }
+ virtual bool isIntegerDomain() const
+ {
+ switch (basicType) {
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtAtomicUint:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+ virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint
+#ifdef NV_EXTENSIONS
+ || basicType == EbtAccStructNV
+#endif
+ ; }
+ virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; }
+
+ // "Image" is a superset of "Subpass"
+ virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); }
+ virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); }
+ virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); }
+ virtual bool isParameterized() const { return typeParameters != nullptr; }
+ virtual bool isCoopMat() const { return coopmat; }
+
+ // return true if this type contains any subtype which satisfies the given predicate.
+ template <typename P>
+ bool contains(P predicate) const
+ {
+ if (predicate(this))
+ return true;
+
+ const auto hasa = [predicate](const TTypeLoc& tl) { return tl.type->contains(predicate); };
+
+ return isStruct() && std::any_of(structure->begin(), structure->end(), hasa);
+ }
+
+ // Recursively checks if the type contains the given basic type
+ virtual bool containsBasicType(TBasicType checkType) const
+ {
+ return contains([checkType](const TType* t) { return t->basicType == checkType; } );
+ }
+
+ // Recursively check the structure for any arrays, needed for some error checks
+ virtual bool containsArray() const
+ {
+ return contains([](const TType* t) { return t->isArray(); } );
+ }
+
+ // Check the structure for any structures, needed for some error checks
+ virtual bool containsStructure() const
+ {
+ return contains([this](const TType* t) { return t != this && t->isStruct(); } );
+ }
+
+ // Recursively check the structure for any unsized arrays, needed for triggering a copyUp().
+ virtual bool containsUnsizedArray() const
+ {
+ return contains([](const TType* t) { return t->isUnsizedArray(); } );
+ }
+
+ virtual bool containsOpaque() const
+ {
+ return contains([](const TType* t) { return t->isOpaque(); } );
+ }
+
+ // Recursively checks if the type contains a built-in variable
+ virtual bool containsBuiltIn() const
+ {
+ return contains([](const TType* t) { return t->isBuiltIn(); } );
+ }
+
+ virtual bool containsNonOpaque() const
+ {
+ const auto nonOpaque = [](const TType* t) {
+ switch (t->basicType) {
+ case EbtVoid:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtFloat16:
+ case EbtInt8:
+ case EbtUint8:
+ case EbtInt16:
+ case EbtUint16:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ case EbtReference:
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ return contains(nonOpaque);
+ }
+
+ virtual bool containsSpecializationSize() const
+ {
+ return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } );
+ }
+
+ virtual bool contains16BitInt() const
+ {
+ return containsBasicType(EbtInt16) || containsBasicType(EbtUint16);
+ }
+
+ virtual bool contains8BitInt() const
+ {
+ return containsBasicType(EbtInt8) || containsBasicType(EbtUint8);
+ }
+
+ virtual bool containsCoopMat() const
+ {
+ return contains([](const TType* t) { return t->coopmat; } );
+ }
+
+ // Array editing methods. Array descriptors can be shared across
+ // type instances. This allows all uses of the same array
+ // to be updated at once. E.g., all nodes can be explicitly sized
+ // by tracking and correcting one implicit size. Or, all nodes
+ // can get the explicit size on a redeclaration that gives size.
+ //
+ // N.B.: Don't share with the shared symbol tables (symbols are
+ // marked as isReadOnly(). Such symbols with arrays that will be
+ // edited need to copyUp() on first use, so that
+ // A) the edits don't effect the shared symbol table, and
+ // B) the edits are shared across all users.
+ void updateArraySizes(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(arraySizes != nullptr);
+ assert(type.arraySizes != nullptr);
+ *arraySizes = *type.arraySizes;
+ }
+ void copyArraySizes(const TArraySizes& s)
+ {
+ // For setting a fresh new set of array sizes, not yet worrying about sharing.
+ arraySizes = new TArraySizes;
+ *arraySizes = s;
+ }
+ void transferArraySizes(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ arraySizes = s;
+ }
+ void clearArraySizes()
+ {
+ arraySizes = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyArrayInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (arraySizes == nullptr)
+ copyArraySizes(*s);
+ else
+ arraySizes->addInnerSizes(*s);
+ }
+ }
+ void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); }
+
+ // Recursively make the implicit array size the explicit array size.
+ // Expicit arrays are compile-time or link-time sized, never run-time sized.
+ // Sometimes, policy calls for an array to be run-time sized even if it was
+ // never variably indexed: Don't turn a 'skipNonvariablyIndexed' array into
+ // an explicit array.
+ void adoptImplicitArraySizes(bool skipNonvariablyIndexed)
+ {
+ if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed()))
+ changeOuterArraySize(getImplicitArraySize());
+#ifdef NV_EXTENSIONS
+ // For multi-dim per-view arrays, set unsized inner dimension size to 1
+ if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized())
+ arraySizes->clearInnerUnsized();
+#endif
+ if (isStruct() && structure->size() > 0) {
+ int lastMember = (int)structure->size() - 1;
+ for (int i = 0; i < lastMember; ++i)
+ (*structure)[i].type->adoptImplicitArraySizes(false);
+ // implement the "last member of an SSBO" policy
+ (*structure)[lastMember].type->adoptImplicitArraySizes(getQualifier().storage == EvqBuffer);
+ }
+ }
+
+
+ void updateTypeParameters(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(typeParameters != nullptr);
+ assert(type.typeParameters != nullptr);
+ *typeParameters = *type.typeParameters;
+ }
+ void copyTypeParameters(const TArraySizes& s)
+ {
+ // For setting a fresh new set of type parameters, not yet worrying about sharing.
+ typeParameters = new TArraySizes;
+ *typeParameters = s;
+ }
+ void transferTypeParameters(TArraySizes* s)
+ {
+ // For setting an already allocated set of sizes that this type can use
+ // (no copy made).
+ typeParameters = s;
+ }
+ void clearTypeParameters()
+ {
+ typeParameters = nullptr;
+ }
+
+ // Add inner array sizes, to any existing sizes, via copy; the
+ // sizes passed in can still be reused for other purposes.
+ void copyTypeParametersInnerSizes(const TArraySizes* s)
+ {
+ if (s != nullptr) {
+ if (typeParameters == nullptr)
+ copyTypeParameters(*s);
+ else
+ typeParameters->addInnerSizes(*s);
+ }
+ }
+
+
+
+ const char* getBasicString() const
+ {
+ return TType::getBasicString(basicType);
+ }
+
+ static const char* getBasicString(TBasicType t)
+ {
+ switch (t) {
+ case EbtVoid: return "void";
+ case EbtFloat: return "float";
+ case EbtDouble: return "double";
+ case EbtFloat16: return "float16_t";
+ case EbtInt8: return "int8_t";
+ case EbtUint8: return "uint8_t";
+ case EbtInt16: return "int16_t";
+ case EbtUint16: return "uint16_t";
+ case EbtInt: return "int";
+ case EbtUint: return "uint";
+ case EbtInt64: return "int64_t";
+ case EbtUint64: return "uint64_t";
+ case EbtBool: return "bool";
+ case EbtAtomicUint: return "atomic_uint";
+ case EbtSampler: return "sampler/image";
+ case EbtStruct: return "structure";
+ case EbtBlock: return "block";
+#ifdef NV_EXTENSIONS
+ case EbtAccStructNV: return "accelerationStructureNV";
+#endif
+ case EbtReference: return "reference";
+ default: return "unknown type";
+ }
+ }
+
+ TString getCompleteString() const
+ {
+ TString typeString;
+
+ const auto appendStr = [&](const char* s) { typeString.append(s); };
+ const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); };
+ const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); };
+
+ if (qualifier.hasLayout()) {
+ // To reduce noise, skip this if the only layout is an xfb_buffer
+ // with no triggering xfb_offset.
+ TQualifier noXfbBuffer = qualifier;
+ noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd;
+ if (noXfbBuffer.hasLayout()) {
+ appendStr("layout(");
+ if (qualifier.hasAnyLocation()) {
+ appendStr(" location=");
+ appendUint(qualifier.layoutLocation);
+ if (qualifier.hasComponent()) {
+ appendStr(" component=");
+ appendUint(qualifier.layoutComponent);
+ }
+ if (qualifier.hasIndex()) {
+ appendStr(" index=");
+ appendUint(qualifier.layoutIndex);
+ }
+ }
+ if (qualifier.hasSet()) {
+ appendStr(" set=");
+ appendUint(qualifier.layoutSet);
+ }
+ if (qualifier.hasBinding()) {
+ appendStr(" binding=");
+ appendUint(qualifier.layoutBinding);
+ }
+ if (qualifier.hasStream()) {
+ appendStr(" stream=");
+ appendUint(qualifier.layoutStream);
+ }
+ if (qualifier.hasMatrix()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutMatrixString(qualifier.layoutMatrix));
+ }
+ if (qualifier.hasPacking()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutPackingString(qualifier.layoutPacking));
+ }
+ if (qualifier.hasOffset()) {
+ appendStr(" offset=");
+ appendInt(qualifier.layoutOffset);
+ }
+ if (qualifier.hasAlign()) {
+ appendStr(" align=");
+ appendInt(qualifier.layoutAlign);
+ }
+ if (qualifier.hasFormat()) {
+ appendStr(" ");
+ appendStr(TQualifier::getLayoutFormatString(qualifier.layoutFormat));
+ }
+ if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset()) {
+ appendStr(" xfb_buffer=");
+ appendUint(qualifier.layoutXfbBuffer);
+ }
+ if (qualifier.hasXfbOffset()) {
+ appendStr(" xfb_offset=");
+ appendUint(qualifier.layoutXfbOffset);
+ }
+ if (qualifier.hasXfbStride()) {
+ appendStr(" xfb_stride=");
+ appendUint(qualifier.layoutXfbStride);
+ }
+ if (qualifier.hasAttachment()) {
+ appendStr(" input_attachment_index=");
+ appendUint(qualifier.layoutAttachment);
+ }
+ if (qualifier.hasSpecConstantId()) {
+ appendStr(" constant_id=");
+ appendUint(qualifier.layoutSpecConstantId);
+ }
+ if (qualifier.layoutPushConstant)
+ appendStr(" push_constant");
+ if (qualifier.layoutBufferReference)
+ appendStr(" buffer_reference");
+ if (qualifier.hasBufferReferenceAlign()) {
+ appendStr(" buffer_reference_align=");
+ appendUint(1u << qualifier.layoutBufferReferenceAlign);
+ }
+
+#ifdef NV_EXTENSIONS
+ if (qualifier.layoutPassthrough)
+ appendStr(" passthrough");
+ if (qualifier.layoutViewportRelative)
+ appendStr(" layoutViewportRelative");
+ if (qualifier.layoutSecondaryViewportRelativeOffset != -2048) {
+ appendStr(" layoutSecondaryViewportRelativeOffset=");
+ appendInt(qualifier.layoutSecondaryViewportRelativeOffset);
+ }
+ if (qualifier.layoutShaderRecordNV)
+ appendStr(" shaderRecordNV");
+#endif
+
+ appendStr(")");
+ }
+ }
+
+ if (qualifier.invariant)
+ appendStr(" invariant");
+ if (qualifier.noContraction)
+ appendStr(" noContraction");
+ if (qualifier.centroid)
+ appendStr(" centroid");
+ if (qualifier.smooth)
+ appendStr(" smooth");
+ if (qualifier.flat)
+ appendStr(" flat");
+ if (qualifier.nopersp)
+ appendStr(" noperspective");
+#ifdef AMD_EXTENSIONS
+ if (qualifier.explicitInterp)
+ appendStr(" __explicitInterpAMD");
+#endif
+#ifdef NV_EXTENSIONS
+ if (qualifier.pervertexNV)
+ appendStr(" pervertexNV");
+ if (qualifier.perPrimitiveNV)
+ appendStr(" perprimitiveNV");
+ if (qualifier.perViewNV)
+ appendStr(" perviewNV");
+ if (qualifier.perTaskNV)
+ appendStr(" taskNV");
+#endif
+ if (qualifier.patch)
+ appendStr(" patch");
+ if (qualifier.sample)
+ appendStr(" sample");
+ if (qualifier.coherent)
+ appendStr(" coherent");
+ if (qualifier.devicecoherent)
+ appendStr(" devicecoherent");
+ if (qualifier.queuefamilycoherent)
+ appendStr(" queuefamilycoherent");
+ if (qualifier.workgroupcoherent)
+ appendStr(" workgroupcoherent");
+ if (qualifier.subgroupcoherent)
+ appendStr(" subgroupcoherent");
+ if (qualifier.nonprivate)
+ appendStr(" nonprivate");
+ if (qualifier.volatil)
+ appendStr(" volatile");
+ if (qualifier.restrict)
+ appendStr(" restrict");
+ if (qualifier.readonly)
+ appendStr(" readonly");
+ if (qualifier.writeonly)
+ appendStr(" writeonly");
+ if (qualifier.specConstant)
+ appendStr(" specialization-constant");
+ if (qualifier.nonUniform)
+ appendStr(" nonuniform");
+ appendStr(" ");
+ appendStr(getStorageQualifierString());
+ if (isArray()) {
+ for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) {
+ int size = arraySizes->getDimSize(i);
+ if (size == UnsizedArraySize && i == 0 && arraySizes->isVariablyIndexed())
+ appendStr(" runtime-sized array of");
+ else {
+ if (size == UnsizedArraySize) {
+ appendStr(" unsized");
+ if (i == 0) {
+ appendStr(" ");
+ appendInt(arraySizes->getImplicitSize());
+ }
+ } else {
+ appendStr(" ");
+ appendInt(arraySizes->getDimSize(i));
+ }
+ appendStr("-element array of");
+ }
+ }
+ }
+ if (isParameterized()) {
+ appendStr("<");
+ for(int i = 0; i < (int)typeParameters->getNumDims(); ++i) {
+ appendInt(typeParameters->getDimSize(i));
+ if (i != (int)typeParameters->getNumDims() - 1)
+ appendStr(", ");
+ }
+ appendStr(">");
+ }
+ if (qualifier.precision != EpqNone) {
+ appendStr(" ");
+ appendStr(getPrecisionQualifierString());
+ }
+ if (isMatrix()) {
+ appendStr(" ");
+ appendInt(matrixCols);
+ appendStr("X");
+ appendInt(matrixRows);
+ appendStr(" matrix of");
+ } else if (isVector()) {
+ appendStr(" ");
+ appendInt(vectorSize);
+ appendStr("-component vector of");
+ }
+
+ appendStr(" ");
+ typeString.append(getBasicTypeString());
+
+ if (qualifier.builtIn != EbvNone) {
+ appendStr(" ");
+ appendStr(getBuiltInVariableString());
+ }
+
+ // Add struct/block members
+ if (isStruct()) {
+ appendStr("{");
+ for (size_t i = 0; i < structure->size(); ++i) {
+ if (! (*structure)[i].type->hiddenMember()) {
+ typeString.append((*structure)[i].type->getCompleteString());
+ typeString.append(" ");
+ typeString.append((*structure)[i].type->getFieldName());
+ if (i < structure->size() - 1)
+ appendStr(", ");
+ }
+ }
+ appendStr("}");
+ }
+
+ return typeString;
+ }
+
+ TString getBasicTypeString() const
+ {
+ if (basicType == EbtSampler)
+ return sampler.getString();
+ else
+ return getBasicString();
+ }
+
+ const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); }
+ const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); }
+ const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); }
+ const TTypeList* getStruct() const { assert(isStruct()); return structure; }
+ void setStruct(TTypeList* s) { assert(isStruct()); structure = s; }
+ TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads
+
+ int computeNumComponents() const
+ {
+ int components = 0;
+
+ if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) {
+ for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++)
+ components += ((*tl).type)->computeNumComponents();
+ } else if (matrixCols)
+ components = matrixCols * matrixRows;
+ else
+ components = vectorSize;
+
+ if (arraySizes != nullptr) {
+ components *= arraySizes->getCumulativeSize();
+ }
+
+ return components;
+ }
+
+ // append this type's mangled name to the passed in 'name'
+ void appendMangledName(TString& name) const
+ {
+ buildMangledName(name);
+ name += ';' ;
+ }
+
+ // Do two structure types match? They could be declared independently,
+ // in different places, but still might satisfy the definition of matching.
+ // From the spec:
+ //
+ // "Structures must have the same name, sequence of type names, and
+ // type definitions, and member names to be considered the same type.
+ // This rule applies recursively for nested or embedded types."
+ //
+ bool sameStructType(const TType& right) const
+ {
+ // Most commonly, they are both nullptr, or the same pointer to the same actual structure
+ if ((!isStruct() && !right.isStruct()) ||
+ (isStruct() && right.isStruct() && structure == right.structure))
+ return true;
+
+ // Both being nullptr was caught above, now they both have to be structures of the same number of elements
+ if (!isStruct() || !right.isStruct() ||
+ structure->size() != right.structure->size())
+ return false;
+
+ // Structure names have to match
+ if (*typeName != *right.typeName)
+ return false;
+
+ // Compare the names and types of all the members, which have to match
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName())
+ return false;
+
+ if (*(*structure)[i].type != *(*right.structure)[i].type)
+ return false;
+ }
+
+ return true;
+ }
+
+ bool sameReferenceType(const TType& right) const
+ {
+ if ((basicType == EbtReference) != (right.basicType == EbtReference))
+ return false;
+
+ if ((basicType != EbtReference) && (right.basicType != EbtReference))
+ return true;
+
+ assert(referentType != nullptr);
+ assert(right.referentType != nullptr);
+
+ if (referentType == right.referentType)
+ return true;
+
+ return *referentType == *right.referentType;
+ }
+
+ // See if two types match, in all aspects except arrayness
+ bool sameElementType(const TType& right) const
+ {
+ return basicType == right.basicType && sameElementShape(right);
+ }
+
+ // See if two type's arrayness match
+ bool sameArrayness(const TType& right) const
+ {
+ return ((arraySizes == nullptr && right.arraySizes == nullptr) ||
+ (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes));
+ }
+
+ // See if two type's arrayness match in everything except their outer dimension
+ bool sameInnerArrayness(const TType& right) const
+ {
+ assert(arraySizes != nullptr && right.arraySizes != nullptr);
+ return arraySizes->sameInnerArrayness(*right.arraySizes);
+ }
+
+ // See if two type's parameters match
+ bool sameTypeParameters(const TType& right) const
+ {
+ return ((typeParameters == nullptr && right.typeParameters == nullptr) ||
+ (typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters));
+ }
+
+ // See if two type's elements match in all ways except basic type
+ bool sameElementShape(const TType& right) const
+ {
+ return sampler == right.sampler &&
+ vectorSize == right.vectorSize &&
+ matrixCols == right.matrixCols &&
+ matrixRows == right.matrixRows &&
+ vector1 == right.vector1 &&
+ coopmat == right.coopmat &&
+ sameStructType(right) &&
+ sameReferenceType(right);
+ }
+
+ // See if a cooperative matrix type parameter with unspecified parameters is
+ // an OK function parameter
+ bool coopMatParameterOK(const TType& right) const
+ {
+ return coopmat && right.coopmat &&
+ typeParameters == nullptr && right.typeParameters != nullptr;
+ }
+
+ // See if two types match in all ways (just the actual type, not qualification)
+ bool operator==(const TType& right) const
+ {
+ return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right);
+ }
+
+ bool operator!=(const TType& right) const
+ {
+ return ! operator==(right);
+ }
+
+ unsigned int getBufferReferenceAlignment() const
+ {
+ if (getBasicType() == glslang::EbtReference) {
+ return getReferentType()->getQualifier().hasBufferReferenceAlign() ?
+ (1u << getReferentType()->getQualifier().layoutBufferReferenceAlign) : 16u;
+ } else {
+ return 0;
+ }
+ }
+
+protected:
+ // Require consumer to pick between deep copy and shallow copy.
+ TType(const TType& type);
+ TType& operator=(const TType& type);
+
+ // Recursively copy a type graph, while preserving the graph-like
+ // quality. That is, don't make more than one copy of a structure that
+ // gets reused multiple times in the type graph.
+ void deepCopy(const TType& copyOf, TMap<TTypeList*,TTypeList*>& copiedMap)
+ {
+ shallowCopy(copyOf);
+
+ if (copyOf.arraySizes) {
+ arraySizes = new TArraySizes;
+ *arraySizes = *copyOf.arraySizes;
+ }
+
+ if (copyOf.typeParameters) {
+ typeParameters = new TArraySizes;
+ *typeParameters = *copyOf.typeParameters;
+ }
+
+ if (copyOf.isStruct() && copyOf.structure) {
+ auto prevCopy = copiedMap.find(copyOf.structure);
+ if (prevCopy != copiedMap.end())
+ structure = prevCopy->second;
+ else {
+ structure = new TTypeList;
+ copiedMap[copyOf.structure] = structure;
+ for (unsigned int i = 0; i < copyOf.structure->size(); ++i) {
+ TTypeLoc typeLoc;
+ typeLoc.loc = (*copyOf.structure)[i].loc;
+ typeLoc.type = new TType();
+ typeLoc.type->deepCopy(*(*copyOf.structure)[i].type, copiedMap);
+ structure->push_back(typeLoc);
+ }
+ }
+ }
+
+ if (copyOf.fieldName)
+ fieldName = NewPoolTString(copyOf.fieldName->c_str());
+ if (copyOf.typeName)
+ typeName = NewPoolTString(copyOf.typeName->c_str());
+ }
+
+
+ void buildMangledName(TString&) const;
+
+ TBasicType basicType : 8;
+ int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate.
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar.
+ // GLSL 4.5 never has a 1-component vector; so this will always be false until such
+ // functionality is added.
+ // HLSL does have a 1-component vectors, so this will be true to disambiguate
+ // from a scalar.
+ bool coopmat : 1;
+ TQualifier qualifier;
+
+ TArraySizes* arraySizes; // nullptr unless an array; can be shared across types
+ // A type can't be both a structure (EbtStruct/EbtBlock) and a reference (EbtReference), so
+ // conserve space by making these a union
+ union {
+ TTypeList* structure; // invalid unless this is a struct; can be shared across types
+ TType *referentType; // invalid unless this is an EbtReference
+ };
+ TString *fieldName; // for structure field names
+ TString *typeName; // for structure type name
+ TSampler sampler;
+ TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types
+};
+
+} // end namespace glslang
+
+#endif // _TYPES_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/arrays.h b/src/3rdparty/glslang/glslang/Include/arrays.h
new file mode 100644
index 0000000..7f047d9
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/arrays.h
@@ -0,0 +1,341 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2013 LunarG, Inc.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement types for tracking GLSL arrays, arrays of arrays, etc.
+//
+
+#ifndef _ARRAYS_INCLUDED
+#define _ARRAYS_INCLUDED
+
+#include <algorithm>
+
+namespace glslang {
+
+// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
+const int UnsizedArraySize = 0;
+
+class TIntermTyped;
+extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
+
+// Specialization constants need both a nominal size and a node that defines
+// the specialization constant being used. Array types are the same when their
+// size and specialization constant nodes are the same.
+struct TArraySize {
+ unsigned int size;
+ TIntermTyped* node; // nullptr means no specialization constant node
+ bool operator==(const TArraySize& rhs) const
+ {
+ if (size != rhs.size)
+ return false;
+ if (node == nullptr || rhs.node == nullptr)
+ return node == rhs.node;
+
+ return SameSpecializationConstants(node, rhs.node);
+ }
+};
+
+//
+// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
+// It has generic-container semantics, while TArraySizes has array-of-array semantics.
+// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
+//
+struct TSmallArrayVector {
+ //
+ // TODO: memory: TSmallArrayVector is intended to be smaller.
+ // Almost all arrays could be handled by two sizes each fitting
+ // in 16 bits, needing a real vector only in the cases where there
+ // are more than 3 sizes or a size needing more than 16 bits.
+ //
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TSmallArrayVector() : sizes(nullptr) { }
+ virtual ~TSmallArrayVector() { dealloc(); }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TSmallArrayVector& operator=(const TSmallArrayVector& from)
+ {
+ if (from.sizes == nullptr)
+ sizes = nullptr;
+ else {
+ alloc();
+ *sizes = *from.sizes;
+ }
+
+ return *this;
+ }
+
+ int size() const
+ {
+ if (sizes == nullptr)
+ return 0;
+ return (int)sizes->size();
+ }
+
+ unsigned int frontSize() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().size;
+ }
+
+ TIntermTyped* frontNode() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().node;
+ }
+
+ void changeFront(unsigned int s)
+ {
+ assert(sizes != nullptr);
+ // this should only happen for implicitly sized arrays, not specialization constants
+ assert(sizes->front().node == nullptr);
+ sizes->front().size = s;
+ }
+
+ void push_back(unsigned int e, TIntermTyped* n)
+ {
+ alloc();
+ TArraySize pair = { e, n };
+ sizes->push_back(pair);
+ }
+
+ void push_back(const TSmallArrayVector& newDims)
+ {
+ alloc();
+ sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end());
+ }
+
+ void pop_front()
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ if (sizes->size() == 1)
+ dealloc();
+ else
+ sizes->erase(sizes->begin());
+ }
+
+ // 'this' should currently not be holding anything, and copyNonFront
+ // will make it hold a copy of all but the first element of rhs.
+ // (This would be useful for making a type that is dereferenced by
+ // one dimension.)
+ void copyNonFront(const TSmallArrayVector& rhs)
+ {
+ assert(sizes == nullptr);
+ if (rhs.size() > 1) {
+ alloc();
+ sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
+ }
+ }
+
+ unsigned int getDimSize(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].size;
+ }
+
+ void setDimSize(int i, unsigned int size) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ assert((*sizes)[i].node == nullptr);
+ (*sizes)[i].size = size;
+ }
+
+ TIntermTyped* getDimNode(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].node;
+ }
+
+ bool operator==(const TSmallArrayVector& rhs) const
+ {
+ if (sizes == nullptr && rhs.sizes == nullptr)
+ return true;
+ if (sizes == nullptr || rhs.sizes == nullptr)
+ return false;
+ return *sizes == *rhs.sizes;
+ }
+ bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
+
+protected:
+ TSmallArrayVector(const TSmallArrayVector&);
+
+ void alloc()
+ {
+ if (sizes == nullptr)
+ sizes = new TVector<TArraySize>;
+ }
+ void dealloc()
+ {
+ delete sizes;
+ sizes = nullptr;
+ }
+
+ TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
+};
+
+//
+// Represent an array, or array of arrays, to arbitrary depth. This is not
+// done through a hierarchy of types in a type tree, rather all contiguous arrayness
+// in the type hierarchy is localized into this single cumulative object.
+//
+// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
+// for the vast majority of types that are non-array types.
+//
+// Order Policy: these are all identical:
+// - left to right order within a contiguous set of ...[..][..][..]... in the source language
+// - index order 0, 1, 2, ... within the 'sizes' member below
+// - outer-most to inner-most
+//
+struct TArraySizes {
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TArraySizes() : implicitArraySize(1), variablyIndexed(false) { }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TArraySizes& operator=(const TArraySizes& from)
+ {
+ implicitArraySize = from.implicitArraySize;
+ variablyIndexed = from.variablyIndexed;
+ sizes = from.sizes;
+
+ return *this;
+ }
+
+ // translate from array-of-array semantics to container semantics
+ int getNumDims() const { return sizes.size(); }
+ int getDimSize(int dim) const { return sizes.getDimSize(dim); }
+ TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
+ void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
+ int getOuterSize() const { return sizes.frontSize(); }
+ TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
+ int getCumulativeSize() const
+ {
+ int size = 1;
+ for (int d = 0; d < sizes.size(); ++d) {
+ // this only makes sense in paths that have a known array size
+ assert(sizes.getDimSize(d) != UnsizedArraySize);
+ size *= sizes.getDimSize(d);
+ }
+ return size;
+ }
+ void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
+ void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
+ void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
+ void addInnerSize(TArraySize pair) {
+ sizes.push_back(pair.size, pair.node);
+ }
+ void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
+ void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
+ int getImplicitSize() const { return implicitArraySize; }
+ void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); }
+ bool isInnerUnsized() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ return true;
+ }
+
+ return false;
+ }
+ bool clearInnerUnsized()
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ setDimSize(d, 1);
+ }
+
+ return false;
+ }
+ bool isInnerSpecialization() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimNode(d) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+ bool isOuterSpecialization()
+ {
+ return sizes.getDimNode(0) != nullptr;
+ }
+
+ bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); }
+ bool isSized() const { return getOuterSize() != UnsizedArraySize; }
+ void dereference() { sizes.pop_front(); }
+ void copyDereferenced(const TArraySizes& rhs)
+ {
+ assert(sizes.size() == 0);
+ if (rhs.sizes.size() > 1)
+ sizes.copyNonFront(rhs.sizes);
+ }
+
+ bool sameInnerArrayness(const TArraySizes& rhs) const
+ {
+ if (sizes.size() != rhs.sizes.size())
+ return false;
+
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
+ sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
+ return false;
+ }
+
+ return true;
+ }
+
+ void setVariablyIndexed() { variablyIndexed = true; }
+ bool isVariablyIndexed() const { return variablyIndexed; }
+
+ bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
+ bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
+
+protected:
+ TSmallArrayVector sizes;
+
+ TArraySizes(const TArraySizes&);
+
+ // For tracking maximum referenced compile-time constant index.
+ // Applies only to the outer-most dimension. Potentially becomes
+ // the implicit size of the array, if not variably indexed and
+ // otherwise legal.
+ int implicitArraySize;
+ bool variablyIndexed; // true if array is indexed with a non compile-time constant
+};
+
+} // end namespace glslang
+
+#endif // _ARRAYS_INCLUDED_
diff --git a/src/3rdparty/glslang/glslang/Include/intermediate.h b/src/3rdparty/glslang/glslang/Include/intermediate.h
new file mode 100644
index 0000000..32e684c
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/intermediate.h
@@ -0,0 +1,1730 @@
+//
+// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+// Copyright (C) 2012-2016 LunarG, Inc.
+// Copyright (C) 2017 ARM Limited.
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Definition of the in-memory high-level intermediate representation
+// of shaders. This is a tree that parser creates.
+//
+// Nodes in the tree are defined as a hierarchy of classes derived from
+// TIntermNode. Each is a node in a tree. There is no preset branching factor;
+// each node can have it's own type of list of children.
+//
+
+#ifndef __INTERMEDIATE_H
+#define __INTERMEDIATE_H
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900
+ #pragma warning(disable : 4464) // relative include path contains '..'
+ #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted
+#endif
+
+#include "../Include/Common.h"
+#include "../Include/Types.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+class TIntermediate;
+
+//
+// Operators used by the high-level (parse tree) representation.
+//
+enum TOperator {
+ EOpNull, // if in a node, should only mean a node is still being built
+ EOpSequence, // denotes a list of statements, or parameters, etc.
+ EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
+ EOpFunctionCall,
+ EOpFunction, // For function definition
+ EOpParameters, // an aggregate listing the parameters to a function
+
+ //
+ // Unary operators
+ //
+
+ EOpNegative,
+ EOpLogicalNot,
+ EOpVectorLogicalNot,
+ EOpBitwiseNot,
+
+ EOpPostIncrement,
+ EOpPostDecrement,
+ EOpPreIncrement,
+ EOpPreDecrement,
+
+ // (u)int* -> bool
+ EOpConvInt8ToBool,
+ EOpConvUint8ToBool,
+ EOpConvInt16ToBool,
+ EOpConvUint16ToBool,
+ EOpConvIntToBool,
+ EOpConvUintToBool,
+ EOpConvInt64ToBool,
+ EOpConvUint64ToBool,
+
+ // float* -> bool
+ EOpConvFloat16ToBool,
+ EOpConvFloatToBool,
+ EOpConvDoubleToBool,
+
+ // bool -> (u)int*
+ EOpConvBoolToInt8,
+ EOpConvBoolToUint8,
+ EOpConvBoolToInt16,
+ EOpConvBoolToUint16,
+ EOpConvBoolToInt,
+ EOpConvBoolToUint,
+ EOpConvBoolToInt64,
+ EOpConvBoolToUint64,
+
+ // bool -> float*
+ EOpConvBoolToFloat16,
+ EOpConvBoolToFloat,
+ EOpConvBoolToDouble,
+
+ // int8_t -> (u)int*
+ EOpConvInt8ToInt16,
+ EOpConvInt8ToInt,
+ EOpConvInt8ToInt64,
+ EOpConvInt8ToUint8,
+ EOpConvInt8ToUint16,
+ EOpConvInt8ToUint,
+ EOpConvInt8ToUint64,
+
+ // uint8_t -> (u)int*
+ EOpConvUint8ToInt8,
+ EOpConvUint8ToInt16,
+ EOpConvUint8ToInt,
+ EOpConvUint8ToInt64,
+ EOpConvUint8ToUint16,
+ EOpConvUint8ToUint,
+ EOpConvUint8ToUint64,
+
+ // int8_t -> float*
+ EOpConvInt8ToFloat16,
+ EOpConvInt8ToFloat,
+ EOpConvInt8ToDouble,
+
+ // uint8_t -> float*
+ EOpConvUint8ToFloat16,
+ EOpConvUint8ToFloat,
+ EOpConvUint8ToDouble,
+
+ // int16_t -> (u)int*
+ EOpConvInt16ToInt8,
+ EOpConvInt16ToInt,
+ EOpConvInt16ToInt64,
+ EOpConvInt16ToUint8,
+ EOpConvInt16ToUint16,
+ EOpConvInt16ToUint,
+ EOpConvInt16ToUint64,
+
+ // uint16_t -> (u)int*
+ EOpConvUint16ToInt8,
+ EOpConvUint16ToInt16,
+ EOpConvUint16ToInt,
+ EOpConvUint16ToInt64,
+ EOpConvUint16ToUint8,
+ EOpConvUint16ToUint,
+ EOpConvUint16ToUint64,
+
+ // int16_t -> float*
+ EOpConvInt16ToFloat16,
+ EOpConvInt16ToFloat,
+ EOpConvInt16ToDouble,
+
+ // uint16_t -> float*
+ EOpConvUint16ToFloat16,
+ EOpConvUint16ToFloat,
+ EOpConvUint16ToDouble,
+
+ // int32_t -> (u)int*
+ EOpConvIntToInt8,
+ EOpConvIntToInt16,
+ EOpConvIntToInt64,
+ EOpConvIntToUint8,
+ EOpConvIntToUint16,
+ EOpConvIntToUint,
+ EOpConvIntToUint64,
+
+ // uint32_t -> (u)int*
+ EOpConvUintToInt8,
+ EOpConvUintToInt16,
+ EOpConvUintToInt,
+ EOpConvUintToInt64,
+ EOpConvUintToUint8,
+ EOpConvUintToUint16,
+ EOpConvUintToUint64,
+
+ // int32_t -> float*
+ EOpConvIntToFloat16,
+ EOpConvIntToFloat,
+ EOpConvIntToDouble,
+
+ // uint32_t -> float*
+ EOpConvUintToFloat16,
+ EOpConvUintToFloat,
+ EOpConvUintToDouble,
+
+ // int64_t -> (u)int*
+ EOpConvInt64ToInt8,
+ EOpConvInt64ToInt16,
+ EOpConvInt64ToInt,
+ EOpConvInt64ToUint8,
+ EOpConvInt64ToUint16,
+ EOpConvInt64ToUint,
+ EOpConvInt64ToUint64,
+
+ // uint64_t -> (u)int*
+ EOpConvUint64ToInt8,
+ EOpConvUint64ToInt16,
+ EOpConvUint64ToInt,
+ EOpConvUint64ToInt64,
+ EOpConvUint64ToUint8,
+ EOpConvUint64ToUint16,
+ EOpConvUint64ToUint,
+
+ // int64_t -> float*
+ EOpConvInt64ToFloat16,
+ EOpConvInt64ToFloat,
+ EOpConvInt64ToDouble,
+
+ // uint64_t -> float*
+ EOpConvUint64ToFloat16,
+ EOpConvUint64ToFloat,
+ EOpConvUint64ToDouble,
+
+ // float16_t -> (u)int*
+ EOpConvFloat16ToInt8,
+ EOpConvFloat16ToInt16,
+ EOpConvFloat16ToInt,
+ EOpConvFloat16ToInt64,
+ EOpConvFloat16ToUint8,
+ EOpConvFloat16ToUint16,
+ EOpConvFloat16ToUint,
+ EOpConvFloat16ToUint64,
+
+ // float16_t -> float*
+ EOpConvFloat16ToFloat,
+ EOpConvFloat16ToDouble,
+
+ // float -> (u)int*
+ EOpConvFloatToInt8,
+ EOpConvFloatToInt16,
+ EOpConvFloatToInt,
+ EOpConvFloatToInt64,
+ EOpConvFloatToUint8,
+ EOpConvFloatToUint16,
+ EOpConvFloatToUint,
+ EOpConvFloatToUint64,
+
+ // float -> float*
+ EOpConvFloatToFloat16,
+ EOpConvFloatToDouble,
+
+ // float64 _t-> (u)int*
+ EOpConvDoubleToInt8,
+ EOpConvDoubleToInt16,
+ EOpConvDoubleToInt,
+ EOpConvDoubleToInt64,
+ EOpConvDoubleToUint8,
+ EOpConvDoubleToUint16,
+ EOpConvDoubleToUint,
+ EOpConvDoubleToUint64,
+
+ // float64_t -> float*
+ EOpConvDoubleToFloat16,
+ EOpConvDoubleToFloat,
+
+ // uint64_t <-> pointer
+ EOpConvUint64ToPtr,
+ EOpConvPtrToUint64,
+
+ //
+ // binary operations
+ //
+
+ EOpAdd,
+ EOpSub,
+ EOpMul,
+ EOpDiv,
+ EOpMod,
+ EOpRightShift,
+ EOpLeftShift,
+ EOpAnd,
+ EOpInclusiveOr,
+ EOpExclusiveOr,
+ EOpEqual,
+ EOpNotEqual,
+ EOpVectorEqual,
+ EOpVectorNotEqual,
+ EOpLessThan,
+ EOpGreaterThan,
+ EOpLessThanEqual,
+ EOpGreaterThanEqual,
+ EOpComma,
+
+ EOpVectorTimesScalar,
+ EOpVectorTimesMatrix,
+ EOpMatrixTimesVector,
+ EOpMatrixTimesScalar,
+
+ EOpLogicalOr,
+ EOpLogicalXor,
+ EOpLogicalAnd,
+
+ EOpIndexDirect,
+ EOpIndexIndirect,
+ EOpIndexDirectStruct,
+
+ EOpVectorSwizzle,
+
+ EOpMethod,
+ EOpScoping,
+
+ //
+ // Built-in functions mapped to operators
+ //
+
+ EOpRadians,
+ EOpDegrees,
+ EOpSin,
+ EOpCos,
+ EOpTan,
+ EOpAsin,
+ EOpAcos,
+ EOpAtan,
+ EOpSinh,
+ EOpCosh,
+ EOpTanh,
+ EOpAsinh,
+ EOpAcosh,
+ EOpAtanh,
+
+ EOpPow,
+ EOpExp,
+ EOpLog,
+ EOpExp2,
+ EOpLog2,
+ EOpSqrt,
+ EOpInverseSqrt,
+
+ EOpAbs,
+ EOpSign,
+ EOpFloor,
+ EOpTrunc,
+ EOpRound,
+ EOpRoundEven,
+ EOpCeil,
+ EOpFract,
+ EOpModf,
+ EOpMin,
+ EOpMax,
+ EOpClamp,
+ EOpMix,
+ EOpStep,
+ EOpSmoothStep,
+
+ EOpIsNan,
+ EOpIsInf,
+
+ EOpFma,
+
+ EOpFrexp,
+ EOpLdexp,
+
+ EOpFloatBitsToInt,
+ EOpFloatBitsToUint,
+ EOpIntBitsToFloat,
+ EOpUintBitsToFloat,
+ EOpDoubleBitsToInt64,
+ EOpDoubleBitsToUint64,
+ EOpInt64BitsToDouble,
+ EOpUint64BitsToDouble,
+ EOpFloat16BitsToInt16,
+ EOpFloat16BitsToUint16,
+ EOpInt16BitsToFloat16,
+ EOpUint16BitsToFloat16,
+ EOpPackSnorm2x16,
+ EOpUnpackSnorm2x16,
+ EOpPackUnorm2x16,
+ EOpUnpackUnorm2x16,
+ EOpPackSnorm4x8,
+ EOpUnpackSnorm4x8,
+ EOpPackUnorm4x8,
+ EOpUnpackUnorm4x8,
+ EOpPackHalf2x16,
+ EOpUnpackHalf2x16,
+ EOpPackDouble2x32,
+ EOpUnpackDouble2x32,
+ EOpPackInt2x32,
+ EOpUnpackInt2x32,
+ EOpPackUint2x32,
+ EOpUnpackUint2x32,
+ EOpPackFloat2x16,
+ EOpUnpackFloat2x16,
+ EOpPackInt2x16,
+ EOpUnpackInt2x16,
+ EOpPackUint2x16,
+ EOpUnpackUint2x16,
+ EOpPackInt4x16,
+ EOpUnpackInt4x16,
+ EOpPackUint4x16,
+ EOpUnpackUint4x16,
+ EOpPack16,
+ EOpPack32,
+ EOpPack64,
+ EOpUnpack32,
+ EOpUnpack16,
+ EOpUnpack8,
+
+ EOpLength,
+ EOpDistance,
+ EOpDot,
+ EOpCross,
+ EOpNormalize,
+ EOpFaceForward,
+ EOpReflect,
+ EOpRefract,
+
+#ifdef AMD_EXTENSIONS
+ EOpMin3,
+ EOpMax3,
+ EOpMid3,
+#endif
+
+ EOpDPdx, // Fragment only
+ EOpDPdy, // Fragment only
+ EOpFwidth, // Fragment only
+ EOpDPdxFine, // Fragment only
+ EOpDPdyFine, // Fragment only
+ EOpFwidthFine, // Fragment only
+ EOpDPdxCoarse, // Fragment only
+ EOpDPdyCoarse, // Fragment only
+ EOpFwidthCoarse, // Fragment only
+
+ EOpInterpolateAtCentroid, // Fragment only
+ EOpInterpolateAtSample, // Fragment only
+ EOpInterpolateAtOffset, // Fragment only
+
+#ifdef AMD_EXTENSIONS
+ EOpInterpolateAtVertex,
+#endif
+
+ EOpMatrixTimesMatrix,
+ EOpOuterProduct,
+ EOpDeterminant,
+ EOpMatrixInverse,
+ EOpTranspose,
+
+ EOpFtransform,
+
+ EOpNoise,
+
+ EOpEmitVertex, // geometry only
+ EOpEndPrimitive, // geometry only
+ EOpEmitStreamVertex, // geometry only
+ EOpEndStreamPrimitive, // geometry only
+
+ EOpBarrier,
+ EOpMemoryBarrier,
+ EOpMemoryBarrierAtomicCounter,
+ EOpMemoryBarrierBuffer,
+ EOpMemoryBarrierImage,
+ EOpMemoryBarrierShared, // compute only
+ EOpGroupMemoryBarrier, // compute only
+
+ EOpBallot,
+ EOpReadInvocation,
+ EOpReadFirstInvocation,
+
+ EOpAnyInvocation,
+ EOpAllInvocations,
+ EOpAllInvocationsEqual,
+
+ EOpSubgroupGuardStart,
+ EOpSubgroupBarrier,
+ EOpSubgroupMemoryBarrier,
+ EOpSubgroupMemoryBarrierBuffer,
+ EOpSubgroupMemoryBarrierImage,
+ EOpSubgroupMemoryBarrierShared, // compute only
+ EOpSubgroupElect,
+ EOpSubgroupAll,
+ EOpSubgroupAny,
+ EOpSubgroupAllEqual,
+ EOpSubgroupBroadcast,
+ EOpSubgroupBroadcastFirst,
+ EOpSubgroupBallot,
+ EOpSubgroupInverseBallot,
+ EOpSubgroupBallotBitExtract,
+ EOpSubgroupBallotBitCount,
+ EOpSubgroupBallotInclusiveBitCount,
+ EOpSubgroupBallotExclusiveBitCount,
+ EOpSubgroupBallotFindLSB,
+ EOpSubgroupBallotFindMSB,
+ EOpSubgroupShuffle,
+ EOpSubgroupShuffleXor,
+ EOpSubgroupShuffleUp,
+ EOpSubgroupShuffleDown,
+ EOpSubgroupAdd,
+ EOpSubgroupMul,
+ EOpSubgroupMin,
+ EOpSubgroupMax,
+ EOpSubgroupAnd,
+ EOpSubgroupOr,
+ EOpSubgroupXor,
+ EOpSubgroupInclusiveAdd,
+ EOpSubgroupInclusiveMul,
+ EOpSubgroupInclusiveMin,
+ EOpSubgroupInclusiveMax,
+ EOpSubgroupInclusiveAnd,
+ EOpSubgroupInclusiveOr,
+ EOpSubgroupInclusiveXor,
+ EOpSubgroupExclusiveAdd,
+ EOpSubgroupExclusiveMul,
+ EOpSubgroupExclusiveMin,
+ EOpSubgroupExclusiveMax,
+ EOpSubgroupExclusiveAnd,
+ EOpSubgroupExclusiveOr,
+ EOpSubgroupExclusiveXor,
+ EOpSubgroupClusteredAdd,
+ EOpSubgroupClusteredMul,
+ EOpSubgroupClusteredMin,
+ EOpSubgroupClusteredMax,
+ EOpSubgroupClusteredAnd,
+ EOpSubgroupClusteredOr,
+ EOpSubgroupClusteredXor,
+ EOpSubgroupQuadBroadcast,
+ EOpSubgroupQuadSwapHorizontal,
+ EOpSubgroupQuadSwapVertical,
+ EOpSubgroupQuadSwapDiagonal,
+
+#ifdef NV_EXTENSIONS
+ EOpSubgroupPartition,
+ EOpSubgroupPartitionedAdd,
+ EOpSubgroupPartitionedMul,
+ EOpSubgroupPartitionedMin,
+ EOpSubgroupPartitionedMax,
+ EOpSubgroupPartitionedAnd,
+ EOpSubgroupPartitionedOr,
+ EOpSubgroupPartitionedXor,
+ EOpSubgroupPartitionedInclusiveAdd,
+ EOpSubgroupPartitionedInclusiveMul,
+ EOpSubgroupPartitionedInclusiveMin,
+ EOpSubgroupPartitionedInclusiveMax,
+ EOpSubgroupPartitionedInclusiveAnd,
+ EOpSubgroupPartitionedInclusiveOr,
+ EOpSubgroupPartitionedInclusiveXor,
+ EOpSubgroupPartitionedExclusiveAdd,
+ EOpSubgroupPartitionedExclusiveMul,
+ EOpSubgroupPartitionedExclusiveMin,
+ EOpSubgroupPartitionedExclusiveMax,
+ EOpSubgroupPartitionedExclusiveAnd,
+ EOpSubgroupPartitionedExclusiveOr,
+ EOpSubgroupPartitionedExclusiveXor,
+#endif
+
+ EOpSubgroupGuardStop,
+
+#ifdef AMD_EXTENSIONS
+ EOpMinInvocations,
+ EOpMaxInvocations,
+ EOpAddInvocations,
+ EOpMinInvocationsNonUniform,
+ EOpMaxInvocationsNonUniform,
+ EOpAddInvocationsNonUniform,
+ EOpMinInvocationsInclusiveScan,
+ EOpMaxInvocationsInclusiveScan,
+ EOpAddInvocationsInclusiveScan,
+ EOpMinInvocationsInclusiveScanNonUniform,
+ EOpMaxInvocationsInclusiveScanNonUniform,
+ EOpAddInvocationsInclusiveScanNonUniform,
+ EOpMinInvocationsExclusiveScan,
+ EOpMaxInvocationsExclusiveScan,
+ EOpAddInvocationsExclusiveScan,
+ EOpMinInvocationsExclusiveScanNonUniform,
+ EOpMaxInvocationsExclusiveScanNonUniform,
+ EOpAddInvocationsExclusiveScanNonUniform,
+ EOpSwizzleInvocations,
+ EOpSwizzleInvocationsMasked,
+ EOpWriteInvocation,
+ EOpMbcnt,
+
+ EOpCubeFaceIndex,
+ EOpCubeFaceCoord,
+ EOpTime,
+#endif
+
+ EOpAtomicAdd,
+ EOpAtomicMin,
+ EOpAtomicMax,
+ EOpAtomicAnd,
+ EOpAtomicOr,
+ EOpAtomicXor,
+ EOpAtomicExchange,
+ EOpAtomicCompSwap,
+ EOpAtomicLoad,
+ EOpAtomicStore,
+
+ EOpAtomicCounterIncrement, // results in pre-increment value
+ EOpAtomicCounterDecrement, // results in post-decrement value
+ EOpAtomicCounter,
+ EOpAtomicCounterAdd,
+ EOpAtomicCounterSubtract,
+ EOpAtomicCounterMin,
+ EOpAtomicCounterMax,
+ EOpAtomicCounterAnd,
+ EOpAtomicCounterOr,
+ EOpAtomicCounterXor,
+ EOpAtomicCounterExchange,
+ EOpAtomicCounterCompSwap,
+
+ EOpAny,
+ EOpAll,
+
+ EOpCooperativeMatrixLoad,
+ EOpCooperativeMatrixStore,
+ EOpCooperativeMatrixMulAdd,
+
+ //
+ // Branch
+ //
+
+ EOpKill, // Fragment only
+ EOpReturn,
+ EOpBreak,
+ EOpContinue,
+ EOpCase,
+ EOpDefault,
+
+ //
+ // Constructors
+ //
+
+ EOpConstructGuardStart,
+ EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
+ EOpConstructUint,
+ EOpConstructInt8,
+ EOpConstructUint8,
+ EOpConstructInt16,
+ EOpConstructUint16,
+ EOpConstructInt64,
+ EOpConstructUint64,
+ EOpConstructBool,
+ EOpConstructFloat,
+ EOpConstructDouble,
+ EOpConstructVec2,
+ EOpConstructVec3,
+ EOpConstructVec4,
+ EOpConstructDVec2,
+ EOpConstructDVec3,
+ EOpConstructDVec4,
+ EOpConstructBVec2,
+ EOpConstructBVec3,
+ EOpConstructBVec4,
+ EOpConstructI8Vec2,
+ EOpConstructI8Vec3,
+ EOpConstructI8Vec4,
+ EOpConstructU8Vec2,
+ EOpConstructU8Vec3,
+ EOpConstructU8Vec4,
+ EOpConstructI16Vec2,
+ EOpConstructI16Vec3,
+ EOpConstructI16Vec4,
+ EOpConstructU16Vec2,
+ EOpConstructU16Vec3,
+ EOpConstructU16Vec4,
+ EOpConstructIVec2,
+ EOpConstructIVec3,
+ EOpConstructIVec4,
+ EOpConstructUVec2,
+ EOpConstructUVec3,
+ EOpConstructUVec4,
+ EOpConstructI64Vec2,
+ EOpConstructI64Vec3,
+ EOpConstructI64Vec4,
+ EOpConstructU64Vec2,
+ EOpConstructU64Vec3,
+ EOpConstructU64Vec4,
+ EOpConstructMat2x2,
+ EOpConstructMat2x3,
+ EOpConstructMat2x4,
+ EOpConstructMat3x2,
+ EOpConstructMat3x3,
+ EOpConstructMat3x4,
+ EOpConstructMat4x2,
+ EOpConstructMat4x3,
+ EOpConstructMat4x4,
+ EOpConstructDMat2x2,
+ EOpConstructDMat2x3,
+ EOpConstructDMat2x4,
+ EOpConstructDMat3x2,
+ EOpConstructDMat3x3,
+ EOpConstructDMat3x4,
+ EOpConstructDMat4x2,
+ EOpConstructDMat4x3,
+ EOpConstructDMat4x4,
+ EOpConstructIMat2x2,
+ EOpConstructIMat2x3,
+ EOpConstructIMat2x4,
+ EOpConstructIMat3x2,
+ EOpConstructIMat3x3,
+ EOpConstructIMat3x4,
+ EOpConstructIMat4x2,
+ EOpConstructIMat4x3,
+ EOpConstructIMat4x4,
+ EOpConstructUMat2x2,
+ EOpConstructUMat2x3,
+ EOpConstructUMat2x4,
+ EOpConstructUMat3x2,
+ EOpConstructUMat3x3,
+ EOpConstructUMat3x4,
+ EOpConstructUMat4x2,
+ EOpConstructUMat4x3,
+ EOpConstructUMat4x4,
+ EOpConstructBMat2x2,
+ EOpConstructBMat2x3,
+ EOpConstructBMat2x4,
+ EOpConstructBMat3x2,
+ EOpConstructBMat3x3,
+ EOpConstructBMat3x4,
+ EOpConstructBMat4x2,
+ EOpConstructBMat4x3,
+ EOpConstructBMat4x4,
+ EOpConstructFloat16,
+ EOpConstructF16Vec2,
+ EOpConstructF16Vec3,
+ EOpConstructF16Vec4,
+ EOpConstructF16Mat2x2,
+ EOpConstructF16Mat2x3,
+ EOpConstructF16Mat2x4,
+ EOpConstructF16Mat3x2,
+ EOpConstructF16Mat3x3,
+ EOpConstructF16Mat3x4,
+ EOpConstructF16Mat4x2,
+ EOpConstructF16Mat4x3,
+ EOpConstructF16Mat4x4,
+ EOpConstructStruct,
+ EOpConstructTextureSampler,
+ EOpConstructNonuniform, // expected to be transformed away, not present in final AST
+ EOpConstructReference,
+ EOpConstructCooperativeMatrix,
+ EOpConstructGuardEnd,
+
+ //
+ // moves
+ //
+
+ EOpAssign,
+ EOpAddAssign,
+ EOpSubAssign,
+ EOpMulAssign,
+ EOpVectorTimesMatrixAssign,
+ EOpVectorTimesScalarAssign,
+ EOpMatrixTimesScalarAssign,
+ EOpMatrixTimesMatrixAssign,
+ EOpDivAssign,
+ EOpModAssign,
+ EOpAndAssign,
+ EOpInclusiveOrAssign,
+ EOpExclusiveOrAssign,
+ EOpLeftShiftAssign,
+ EOpRightShiftAssign,
+
+ //
+ // Array operators
+ //
+
+ // Can apply to arrays, vectors, or matrices.
+ // Can be decomposed to a constant at compile time, but this does not always happen,
+ // due to link-time effects. So, consumer can expect either a link-time sized or
+ // run-time sized array.
+ EOpArrayLength,
+
+ //
+ // Image operations
+ //
+
+ EOpImageGuardBegin,
+
+ EOpImageQuerySize,
+ EOpImageQuerySamples,
+ EOpImageLoad,
+ EOpImageStore,
+#ifdef AMD_EXTENSIONS
+ EOpImageLoadLod,
+ EOpImageStoreLod,
+#endif
+ EOpImageAtomicAdd,
+ EOpImageAtomicMin,
+ EOpImageAtomicMax,
+ EOpImageAtomicAnd,
+ EOpImageAtomicOr,
+ EOpImageAtomicXor,
+ EOpImageAtomicExchange,
+ EOpImageAtomicCompSwap,
+ EOpImageAtomicLoad,
+ EOpImageAtomicStore,
+
+ EOpSubpassLoad,
+ EOpSubpassLoadMS,
+ EOpSparseImageLoad,
+#ifdef AMD_EXTENSIONS
+ EOpSparseImageLoadLod,
+#endif
+
+ EOpImageGuardEnd,
+
+ //
+ // Texture operations
+ //
+
+ EOpTextureGuardBegin,
+
+ EOpTextureQuerySize,
+ EOpTextureQueryLod,
+ EOpTextureQueryLevels,
+ EOpTextureQuerySamples,
+
+ EOpSamplingGuardBegin,
+
+ EOpTexture,
+ EOpTextureProj,
+ EOpTextureLod,
+ EOpTextureOffset,
+ EOpTextureFetch,
+ EOpTextureFetchOffset,
+ EOpTextureProjOffset,
+ EOpTextureLodOffset,
+ EOpTextureProjLod,
+ EOpTextureProjLodOffset,
+ EOpTextureGrad,
+ EOpTextureGradOffset,
+ EOpTextureProjGrad,
+ EOpTextureProjGradOffset,
+ EOpTextureGather,
+ EOpTextureGatherOffset,
+ EOpTextureGatherOffsets,
+ EOpTextureClamp,
+ EOpTextureOffsetClamp,
+ EOpTextureGradClamp,
+ EOpTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpTextureGatherLod,
+ EOpTextureGatherLodOffset,
+ EOpTextureGatherLodOffsets,
+ EOpFragmentMaskFetch,
+ EOpFragmentFetch,
+#endif
+
+ EOpSparseTextureGuardBegin,
+
+ EOpSparseTexture,
+ EOpSparseTextureLod,
+ EOpSparseTextureOffset,
+ EOpSparseTextureFetch,
+ EOpSparseTextureFetchOffset,
+ EOpSparseTextureLodOffset,
+ EOpSparseTextureGrad,
+ EOpSparseTextureGradOffset,
+ EOpSparseTextureGather,
+ EOpSparseTextureGatherOffset,
+ EOpSparseTextureGatherOffsets,
+ EOpSparseTexelsResident,
+ EOpSparseTextureClamp,
+ EOpSparseTextureOffsetClamp,
+ EOpSparseTextureGradClamp,
+ EOpSparseTextureGradOffsetClamp,
+#ifdef AMD_EXTENSIONS
+ EOpSparseTextureGatherLod,
+ EOpSparseTextureGatherLodOffset,
+ EOpSparseTextureGatherLodOffsets,
+#endif
+
+ EOpSparseTextureGuardEnd,
+
+#ifdef NV_EXTENSIONS
+ EOpImageFootprintGuardBegin,
+ EOpImageSampleFootprintNV,
+ EOpImageSampleFootprintClampNV,
+ EOpImageSampleFootprintLodNV,
+ EOpImageSampleFootprintGradNV,
+ EOpImageSampleFootprintGradClampNV,
+ EOpImageFootprintGuardEnd,
+#endif
+ EOpSamplingGuardEnd,
+ EOpTextureGuardEnd,
+
+ //
+ // Integer operations
+ //
+
+ EOpAddCarry,
+ EOpSubBorrow,
+ EOpUMulExtended,
+ EOpIMulExtended,
+ EOpBitfieldExtract,
+ EOpBitfieldInsert,
+ EOpBitFieldReverse,
+ EOpBitCount,
+ EOpFindLSB,
+ EOpFindMSB,
+
+#ifdef NV_EXTENSIONS
+ EOpTraceNV,
+ EOpReportIntersectionNV,
+ EOpIgnoreIntersectionNV,
+ EOpTerminateRayNV,
+ EOpExecuteCallableNV,
+ EOpWritePackedPrimitiveIndices4x8NV,
+#endif
+ //
+ // HLSL operations
+ //
+
+ EOpClip, // discard if input value < 0
+ EOpIsFinite,
+ EOpLog10, // base 10 log
+ EOpRcp, // 1/x
+ EOpSaturate, // clamp from 0 to 1
+ EOpSinCos, // sin and cos in out parameters
+ EOpGenMul, // mul(x,y) on any of mat/vec/scalars
+ EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w
+ EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return
+ EOpInterlockedAnd, // ...
+ EOpInterlockedCompareExchange, // ...
+ EOpInterlockedCompareStore, // ...
+ EOpInterlockedExchange, // ...
+ EOpInterlockedMax, // ...
+ EOpInterlockedMin, // ...
+ EOpInterlockedOr, // ...
+ EOpInterlockedXor, // ...
+ EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents
+ EOpDeviceMemoryBarrier, // ...
+ EOpDeviceMemoryBarrierWithGroupSync, // ...
+ EOpWorkgroupMemoryBarrier, // ...
+ EOpWorkgroupMemoryBarrierWithGroupSync, // ...
+ EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid
+ EOpF32tof16, // HLSL conversion: half of a PackHalf2x16
+ EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16
+ EOpLit, // HLSL lighting coefficient vector
+ EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture
+ EOpAsDouble, // slightly different from EOpUint64BitsToDouble
+ EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range
+
+ EOpMethodSample, // Texture object methods. These are translated to existing
+ EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodSampleCmpLevelZero, // ...
+ EOpMethodSampleGrad, // ...
+ EOpMethodSampleLevel, // ...
+ EOpMethodLoad, // ...
+ EOpMethodGetDimensions, // ...
+ EOpMethodGetSamplePosition, // ...
+ EOpMethodGather, // ...
+ EOpMethodCalculateLevelOfDetail, // ...
+ EOpMethodCalculateLevelOfDetailUnclamped, // ...
+
+ // Load already defined above for textures
+ EOpMethodLoad2, // Structure buffer object methods. These are translated to existing
+ EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that
+ EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods().
+ EOpMethodStore, // ...
+ EOpMethodStore2, // ...
+ EOpMethodStore3, // ...
+ EOpMethodStore4, // ...
+ EOpMethodIncrementCounter, // ...
+ EOpMethodDecrementCounter, // ...
+ // EOpMethodAppend is defined for geo shaders below
+ EOpMethodConsume,
+
+ // SM5 texture methods
+ EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about
+ EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily
+ EOpMethodGatherBlue, // because HLSL arguments are slightly different.
+ EOpMethodGatherAlpha, // ...
+ EOpMethodGatherCmp, // ...
+ EOpMethodGatherCmpRed, // ...
+ EOpMethodGatherCmpGreen, // ...
+ EOpMethodGatherCmpBlue, // ...
+ EOpMethodGatherCmpAlpha, // ...
+
+ // geometry methods
+ EOpMethodAppend, // Geometry shader methods
+ EOpMethodRestartStrip, // ...
+
+ // matrix
+ EOpMatrixSwizzle, // select multiple matrix components (non-column)
+
+ // SM6 wave ops
+ EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize.
+ EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
+ EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
+ EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
+};
+
+class TIntermTraverser;
+class TIntermOperator;
+class TIntermAggregate;
+class TIntermUnary;
+class TIntermBinary;
+class TIntermConstantUnion;
+class TIntermSelection;
+class TIntermSwitch;
+class TIntermBranch;
+class TIntermTyped;
+class TIntermMethod;
+class TIntermSymbol;
+class TIntermLoop;
+
+} // end namespace glslang
+
+//
+// Base class for the tree nodes
+//
+// (Put outside the glslang namespace, as it's used as part of the external interface.)
+//
+class TIntermNode {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+
+ TIntermNode() { loc.init(); }
+ virtual const glslang::TSourceLoc& getLoc() const { return loc; }
+ virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
+ virtual void traverse(glslang::TIntermTraverser*) = 0;
+ virtual glslang::TIntermTyped* getAsTyped() { return 0; }
+ virtual glslang::TIntermOperator* getAsOperator() { return 0; }
+ virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
+ virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
+ virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
+ virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
+ virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
+ virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
+ virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
+ virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
+ virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
+ virtual glslang::TIntermLoop* getAsLoopNode() { return 0; }
+
+ virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
+ virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
+ virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
+ virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
+ virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
+ virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
+ virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
+ virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
+ virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
+ virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
+ virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
+ virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; }
+ virtual ~TIntermNode() { }
+
+protected:
+ TIntermNode(const TIntermNode&);
+ TIntermNode& operator=(const TIntermNode&);
+ glslang::TSourceLoc loc;
+};
+
+namespace glslang {
+
+//
+// This is just to help yacc.
+//
+struct TIntermNodePair {
+ TIntermNode* node1;
+ TIntermNode* node2;
+};
+
+//
+// Intermediate class for nodes that have a type.
+//
+class TIntermTyped : public TIntermNode {
+public:
+ TIntermTyped(const TType& t) { type.shallowCopy(t); }
+ TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
+ virtual TIntermTyped* getAsTyped() { return this; }
+ virtual const TIntermTyped* getAsTyped() const { return this; }
+ virtual void setType(const TType& t) { type.shallowCopy(t); }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { return type; }
+
+ virtual TBasicType getBasicType() const { return type.getBasicType(); }
+ virtual TQualifier& getQualifier() { return type.getQualifier(); }
+ virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
+ virtual void propagatePrecision(TPrecisionQualifier);
+ virtual int getVectorSize() const { return type.getVectorSize(); }
+ virtual int getMatrixCols() const { return type.getMatrixCols(); }
+ virtual int getMatrixRows() const { return type.getMatrixRows(); }
+ virtual bool isMatrix() const { return type.isMatrix(); }
+ virtual bool isArray() const { return type.isArray(); }
+ virtual bool isVector() const { return type.isVector(); }
+ virtual bool isScalar() const { return type.isScalar(); }
+ virtual bool isStruct() const { return type.isStruct(); }
+ virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
+ virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
+ TString getCompleteString() const { return type.getCompleteString(); }
+
+protected:
+ TIntermTyped& operator=(const TIntermTyped&);
+ TType type;
+};
+
+//
+// Handle for, do-while, and while loops.
+//
+class TIntermLoop : public TIntermNode {
+public:
+ TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
+ body(aBody),
+ test(aTest),
+ terminal(aTerminal),
+ first(testFirst),
+ unroll(false),
+ dontUnroll(false),
+ dependency(0)
+ { }
+
+ virtual TIntermLoop* getAsLoopNode() { return this; }
+ virtual const TIntermLoop* getAsLoopNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TIntermNode* getBody() const { return body; }
+ TIntermTyped* getTest() const { return test; }
+ TIntermTyped* getTerminal() const { return terminal; }
+ bool testFirst() const { return first; }
+
+ void setUnroll() { unroll = true; }
+ void setDontUnroll() { dontUnroll = true; }
+ bool getUnroll() const { return unroll; }
+ bool getDontUnroll() const { return dontUnroll; }
+
+ static const unsigned int dependencyInfinite = 0xFFFFFFFF;
+ void setLoopDependency(int d) { dependency = d; }
+ int getLoopDependency() const { return dependency; }
+
+protected:
+ TIntermNode* body; // code to loop over
+ TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
+ TIntermTyped* terminal; // exists for for-loops
+ bool first; // true for while and for, not for do-while
+ bool unroll; // true if unroll requested
+ bool dontUnroll; // true if request to not unroll
+ unsigned int dependency; // loop dependency hint; 0 means not set or unknown
+};
+
+//
+// Handle case, break, continue, return, and kill.
+//
+class TIntermBranch : public TIntermNode {
+public:
+ TIntermBranch(TOperator op, TIntermTyped* e) :
+ flowOp(op),
+ expression(e) { }
+ virtual TIntermBranch* getAsBranchNode() { return this; }
+ virtual const TIntermBranch* getAsBranchNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TOperator getFlowOp() const { return flowOp; }
+ TIntermTyped* getExpression() const { return expression; }
+protected:
+ TOperator flowOp;
+ TIntermTyped* expression;
+};
+
+//
+// Represent method names before seeing their calling signature
+// or resolving them to operations. Just an expression as the base object
+// and a textural name.
+//
+class TIntermMethod : public TIntermTyped {
+public:
+ TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
+ virtual TIntermMethod* getAsMethodNode() { return this; }
+ virtual const TIntermMethod* getAsMethodNode() const { return this; }
+ virtual const TString& getMethodName() const { return method; }
+ virtual TIntermTyped* getObject() const { return object; }
+ virtual void traverse(TIntermTraverser*);
+protected:
+ TIntermTyped* object;
+ TString method;
+};
+
+//
+// Nodes that correspond to symbols or constants in the source code.
+//
+class TIntermSymbol : public TIntermTyped {
+public:
+ // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
+ // per process threadPoolAllocator, then it causes increased memory usage per compile
+ // it is essential to use "symbol = sym" to assign to symbol
+ TIntermSymbol(int i, const TString& n, const TType& t)
+ : TIntermTyped(t), id(i),
+#ifdef ENABLE_HLSL
+ flattenSubset(-1),
+#endif
+ constSubtree(nullptr)
+ { name = n; }
+ virtual int getId() const { return id; }
+ virtual void changeId(int i) { id = i; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermSymbol* getAsSymbolNode() { return this; }
+ virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
+ void setConstArray(const TConstUnionArray& c) { constArray = c; }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ TIntermTyped* getConstSubtree() const { return constSubtree; }
+#ifdef ENABLE_HLSL
+ void setFlattenSubset(int subset) { flattenSubset = subset; }
+ int getFlattenSubset() const { return flattenSubset; } // -1 means full object
+#endif
+
+ // This is meant for cases where a node has already been constructed, and
+ // later on, it becomes necessary to switch to a different symbol.
+ virtual void switchId(int newId) { id = newId; }
+
+protected:
+ int id; // the unique id of the symbol this node represents
+#ifdef ENABLE_HLSL
+ int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
+#endif
+ TString name; // the name of the symbol this node represents
+ TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
+ TIntermTyped* constSubtree;
+};
+
+class TIntermConstantUnion : public TIntermTyped {
+public:
+ TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
+ virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
+ virtual TIntermTyped* fold(TOperator, const TType&) const;
+ void setLiteral() { literal = true; }
+ void setExpression() { literal = false; }
+ bool isLiteral() const { return literal; }
+
+protected:
+ TIntermConstantUnion& operator=(const TIntermConstantUnion&);
+
+ const TConstUnionArray constArray;
+ bool literal; // true if node represents a literal in the source code
+};
+
+// Represent the independent aspects of a texturing TOperator
+struct TCrackedTextureOp {
+ bool query;
+ bool proj;
+ bool lod;
+ bool fetch;
+ bool offset;
+ bool offsets;
+ bool gather;
+ bool grad;
+ bool subpass;
+ bool lodClamp;
+#ifdef AMD_EXTENSIONS
+ bool fragMask;
+#endif
+};
+
+//
+// Intermediate class for node types that hold operators.
+//
+class TIntermOperator : public TIntermTyped {
+public:
+ virtual TIntermOperator* getAsOperator() { return this; }
+ virtual const TIntermOperator* getAsOperator() const { return this; }
+ TOperator getOp() const { return op; }
+ void setOp(TOperator newOp) { op = newOp; }
+ bool modifiesState() const;
+ bool isConstructor() const;
+ bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
+ bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
+ bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
+ bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
+#ifdef NV_EXTENSIONS
+ bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
+#endif
+ bool isSparseImage() const { return op == EOpSparseImageLoad; }
+
+ void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
+ TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
+ operationPrecision :
+ type.getQualifier().precision; }
+ TString getCompleteString() const
+ {
+ TString cs = type.getCompleteString();
+ if (getOperationPrecision() != type.getQualifier().precision) {
+ cs += ", operation at ";
+ cs += GetPrecisionQualifierString(getOperationPrecision());
+ }
+
+ return cs;
+ }
+
+ // Crack the op into the individual dimensions of texturing operation.
+ void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
+ {
+ cracked.query = false;
+ cracked.proj = false;
+ cracked.lod = false;
+ cracked.fetch = false;
+ cracked.offset = false;
+ cracked.offsets = false;
+ cracked.gather = false;
+ cracked.grad = false;
+ cracked.subpass = false;
+ cracked.lodClamp = false;
+#ifdef AMD_EXTENSIONS
+ cracked.fragMask = false;
+#endif
+
+ switch (op) {
+ case EOpImageQuerySize:
+ case EOpImageQuerySamples:
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLod:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpSparseTexelsResident:
+ cracked.query = true;
+ break;
+ case EOpTexture:
+ case EOpSparseTexture:
+ break;
+ case EOpTextureClamp:
+ case EOpSparseTextureClamp:
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureProj:
+ cracked.proj = true;
+ break;
+ case EOpTextureLod:
+ case EOpSparseTextureLod:
+ cracked.lod = true;
+ break;
+ case EOpTextureOffset:
+ case EOpSparseTextureOffset:
+ cracked.offset = true;
+ break;
+ case EOpTextureOffsetClamp:
+ case EOpSparseTextureOffsetClamp:
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureFetch:
+ case EOpSparseTextureFetch:
+ cracked.fetch = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureFetchOffset:
+ case EOpSparseTextureFetchOffset:
+ cracked.fetch = true;
+ cracked.offset = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureProjOffset:
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureLodOffset:
+ case EOpSparseTextureLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureProjLod:
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGrad:
+ case EOpSparseTextureGrad:
+ cracked.grad = true;
+ break;
+ case EOpTextureGradClamp:
+ case EOpSparseTextureGradClamp:
+ cracked.grad = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGradOffset:
+ case EOpSparseTextureGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureProjGrad:
+ cracked.grad = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGradOffsetClamp:
+ case EOpSparseTextureGradOffsetClamp:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGather:
+ case EOpSparseTextureGather:
+ cracked.gather = true;
+ break;
+ case EOpTextureGatherOffset:
+ case EOpSparseTextureGatherOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureGatherOffsets:
+ case EOpSparseTextureGatherOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ break;
+#ifdef AMD_EXTENSIONS
+ case EOpTextureGatherLod:
+ case EOpSparseTextureGatherLod:
+ cracked.gather = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffset:
+ case EOpSparseTextureGatherLodOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureGatherLodOffsets:
+ case EOpSparseTextureGatherLodOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ cracked.lod = true;
+ break;
+ case EOpImageLoadLod:
+ case EOpImageStoreLod:
+ case EOpSparseImageLoadLod:
+ cracked.lod = true;
+ break;
+ case EOpFragmentMaskFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+ case EOpFragmentFetch:
+ cracked.subpass = sampler.dim == EsdSubpass;
+ cracked.fragMask = true;
+ break;
+#endif
+#ifdef NV_EXTENSIONS
+ case EOpImageSampleFootprintNV:
+ break;
+ case EOpImageSampleFootprintClampNV:
+ cracked.lodClamp = true;
+ break;
+ case EOpImageSampleFootprintLodNV:
+ cracked.lod = true;
+ break;
+ case EOpImageSampleFootprintGradNV:
+ cracked.grad = true;
+ break;
+ case EOpImageSampleFootprintGradClampNV:
+ cracked.lodClamp = true;
+ cracked.grad = true;
+ break;
+#endif
+ case EOpSubpassLoad:
+ case EOpSubpassLoadMS:
+ cracked.subpass = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+protected:
+ TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {}
+ TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {}
+ TOperator op;
+ // The result precision is in the inherited TType, and is usually meant to be both
+ // the operation precision and the result precision. However, some more complex things,
+ // like built-in function calls, distinguish between the two, in which case non-EqpNone
+ // 'operationPrecision' overrides the result precision as far as operation precision
+ // is concerned.
+ TPrecisionQualifier operationPrecision;
+};
+
+//
+// Nodes for all the basic binary math operators.
+//
+class TIntermBinary : public TIntermOperator {
+public:
+ TIntermBinary(TOperator o) : TIntermOperator(o) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setLeft(TIntermTyped* n) { left = n; }
+ virtual void setRight(TIntermTyped* n) { right = n; }
+ virtual TIntermTyped* getLeft() const { return left; }
+ virtual TIntermTyped* getRight() const { return right; }
+ virtual TIntermBinary* getAsBinaryNode() { return this; }
+ virtual const TIntermBinary* getAsBinaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* left;
+ TIntermTyped* right;
+};
+
+//
+// Nodes for unary math operators.
+//
+class TIntermUnary : public TIntermOperator {
+public:
+ TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
+ TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setOperand(TIntermTyped* o) { operand = o; }
+ virtual TIntermTyped* getOperand() { return operand; }
+ virtual const TIntermTyped* getOperand() const { return operand; }
+ virtual TIntermUnary* getAsUnaryNode() { return this; }
+ virtual const TIntermUnary* getAsUnaryNode() const { return this; }
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* operand;
+};
+
+typedef TVector<TIntermNode*> TIntermSequence;
+typedef TVector<TStorageQualifier> TQualifierList;
+//
+// Nodes that operate on an arbitrary sized set of children.
+//
+class TIntermAggregate : public TIntermOperator {
+public:
+ TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { }
+ TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { }
+ ~TIntermAggregate() { delete pragmaTable; }
+ virtual TIntermAggregate* getAsAggregate() { return this; }
+ virtual const TIntermAggregate* getAsAggregate() const { return this; }
+ virtual void setOperator(TOperator o) { op = o; }
+ virtual TIntermSequence& getSequence() { return sequence; }
+ virtual const TIntermSequence& getSequence() const { return sequence; }
+ virtual void setName(const TString& n) { name = n; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual void setUserDefined() { userDefined = true; }
+ virtual bool isUserDefined() { return userDefined; }
+ virtual TQualifierList& getQualifierList() { return qualifier; }
+ virtual const TQualifierList& getQualifierList() const { return qualifier; }
+ void setOptimize(bool o) { optimize = o; }
+ void setDebug(bool d) { debug = d; }
+ bool getOptimize() const { return optimize; }
+ bool getDebug() const { return debug; }
+ void setPragmaTable(const TPragmaTable& pTable);
+ const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
+protected:
+ TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
+ TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
+ TIntermSequence sequence;
+ TQualifierList qualifier;
+ TString name;
+ bool userDefined; // used for user defined function names
+ bool optimize;
+ bool debug;
+ TPragmaTable* pragmaTable;
+};
+
+//
+// For if tests.
+//
+class TIntermSelection : public TIntermTyped {
+public:
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
+ TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
+ TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB),
+ shortCircuit(true),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* getCondition() const { return condition; }
+ virtual TIntermNode* getTrueBlock() const { return trueBlock; }
+ virtual TIntermNode* getFalseBlock() const { return falseBlock; }
+ virtual TIntermSelection* getAsSelectionNode() { return this; }
+ virtual const TIntermSelection* getAsSelectionNode() const { return this; }
+
+ void setNoShortCircuit() { shortCircuit = false; }
+ bool getShortCircuit() const { return shortCircuit; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermNode* trueBlock;
+ TIntermNode* falseBlock;
+ bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+//
+// For switch statements. Designed use is that a switch will have sequence of nodes
+// that are either case/default nodes or a *single* node that represents all the code
+// in between (if any) consecutive case/defaults. So, a traversal need only deal with
+// 0 or 1 nodes per case/default statement.
+//
+class TIntermSwitch : public TIntermNode {
+public:
+ TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b),
+ flatten(false), dontFlatten(false) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermNode* getCondition() const { return condition; }
+ virtual TIntermAggregate* getBody() const { return body; }
+ virtual TIntermSwitch* getAsSwitchNode() { return this; }
+ virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
+
+ void setFlatten() { flatten = true; }
+ void setDontFlatten() { dontFlatten = true; }
+ bool getFlatten() const { return flatten; }
+ bool getDontFlatten() const { return dontFlatten; }
+
+protected:
+ TIntermTyped* condition;
+ TIntermAggregate* body;
+ bool flatten; // true if flatten requested
+ bool dontFlatten; // true if requested to not flatten
+};
+
+enum TVisit
+{
+ EvPreVisit,
+ EvInVisit,
+ EvPostVisit
+};
+
+//
+// For traversing the tree. User should derive from this,
+// put their traversal specific data in it, and then pass
+// it to a Traverse method.
+//
+// When using this, just fill in the methods for nodes you want visited.
+// Return false from a pre-visit to skip visiting that node's subtree.
+//
+// Explicitly set postVisit to true if you want post visiting, otherwise,
+// filled in methods will only be called at pre-visit time (before processing
+// the subtree). Similarly for inVisit for in-order visiting of nodes with
+// multiple children.
+//
+// If you only want post-visits, explicitly turn off preVisit (and inVisit)
+// and turn on postVisit.
+//
+// In general, for the visit*() methods, return true from interior nodes
+// to have the traversal continue on to children.
+//
+// If you process children yourself, or don't want them processed, return false.
+//
+class TIntermTraverser {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+ TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
+ preVisit(preVisit),
+ inVisit(inVisit),
+ postVisit(postVisit),
+ rightToLeft(rightToLeft),
+ depth(0),
+ maxDepth(0) { }
+ virtual ~TIntermTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol*) { }
+ virtual void visitConstantUnion(TIntermConstantUnion*) { }
+ virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
+ virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
+ virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
+ virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
+ virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
+ virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
+ virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
+
+ int getMaxDepth() const { return maxDepth; }
+
+ void incrementDepth(TIntermNode *current)
+ {
+ depth++;
+ maxDepth = (std::max)(maxDepth, depth);
+ path.push_back(current);
+ }
+
+ void decrementDepth()
+ {
+ depth--;
+ path.pop_back();
+ }
+
+ TIntermNode *getParentNode()
+ {
+ return path.size() == 0 ? NULL : path.back();
+ }
+
+ const bool preVisit;
+ const bool inVisit;
+ const bool postVisit;
+ const bool rightToLeft;
+
+protected:
+ TIntermTraverser& operator=(TIntermTraverser&);
+
+ int depth;
+ int maxDepth;
+
+ // All the nodes from root to the current node's parent during traversing.
+ TVector<TIntermNode *> path;
+};
+
+// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
+// sized with the same symbol, involving no operations"
+inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
+{
+ return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
+ node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
+}
+
+} // end namespace glslang
+
+#endif // __INTERMEDIATE_H
diff --git a/src/3rdparty/glslang/glslang/Include/revision.h b/src/3rdparty/glslang/glslang/Include/revision.h
new file mode 100644
index 0000000..f810a33
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/revision.h
@@ -0,0 +1,3 @@
+// This header is generated by the make-revision script.
+
+#define GLSLANG_PATCH_LEVEL 3170
diff --git a/src/3rdparty/glslang/glslang/Include/revision.template b/src/3rdparty/glslang/glslang/Include/revision.template
new file mode 100644
index 0000000..4a16bee
--- /dev/null
+++ b/src/3rdparty/glslang/glslang/Include/revision.template
@@ -0,0 +1,13 @@
+// The file revision.h should be updated to the latest version, somehow, on
+// check-in, if glslang has changed.
+//
+// revision.template is the source for revision.h when using SubWCRev as the
+// method of updating revision.h. You don't have to do it this way, the
+// requirement is only that revision.h gets updated.
+//
+// revision.h is under source control so that not all consumers of glslang
+// source have to figure out how to create revision.h just to get a build
+// going. However, if it is not updated, it can be a version behind.
+
+#define GLSLANG_REVISION "$WCREV$"
+#define GLSLANG_DATE "$WCDATE$"