summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/glslang/src/glslang/Include
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/glslang/src/glslang/Include')
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/BaseTypes.h315
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/Common.h257
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/ConstantUnion.h617
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/InfoSink.h145
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/InitializeGlobals.h47
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/PoolAlloc.h325
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/ResourceLimits.h140
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/ShHandle.h174
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/Types.h1714
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/arrays.h318
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/intermediate.h1090
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/revision.h6
-rw-r--r--chromium/third_party/glslang/src/glslang/Include/revision.template13
13 files changed, 5161 insertions, 0 deletions
diff --git a/chromium/third_party/glslang/src/glslang/Include/BaseTypes.h b/chromium/third_party/glslang/src/glslang/Include/BaseTypes.h
new file mode 100644
index 00000000000..2c2757724b8
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/BaseTypes.h
@@ -0,0 +1,315 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _BASICTYPES_INCLUDED_
+#define _BASICTYPES_INCLUDED_
+
+namespace glslang {
+
+//
+// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
+//
+enum TBasicType {
+ EbtVoid,
+ EbtFloat,
+ EbtDouble,
+ EbtInt,
+ EbtUint,
+ EbtInt64,
+ EbtUint64,
+ EbtBool,
+ EbtAtomicUint,
+ EbtSampler,
+ EbtStruct,
+ EbtBlock,
+ EbtNumTypes
+};
+
+//
+// Storage qualifiers. Should align with different kinds of storage or
+// resource or GLSL storage qualifier. Expansion is deprecated.
+//
+// N.B.: You probably DON'T want to add anything here, but rather just add it
+// to the built-in variables. See the comment above TBuiltInVariable.
+//
+// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
+// DO NOT follow the design pattern of, say EvqInstanceId, etc.
+//
+enum TStorageQualifier {
+ EvqTemporary, // For temporaries (within a function), read/write
+ EvqGlobal, // For globals read/write
+ EvqConst, // User-defined constant values, will be semantically constant and constant folded
+ EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
+ EvqUniform, // read only, shared with app
+ EvqBuffer, // read/write, shared with app
+ EvqShared, // compute shader's read/write 'shared' qualifier
+
+ // parameters
+ EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
+ EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
+ EvqInOut,
+ EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
+
+ // built-ins read by vertex shader
+ EvqVertexId,
+ EvqInstanceId,
+
+ // built-ins written by vertex shader
+ EvqPosition,
+ EvqPointSize,
+ EvqClipVertex,
+
+ // built-ins read by fragment shader
+ EvqFace,
+ EvqFragCoord,
+ EvqPointCoord,
+
+ // built-ins written by fragment shader
+ EvqFragColor,
+ EvqFragDepth,
+
+ // end of list
+ EvqLast
+};
+
+//
+// Subcategories of the TStorageQualifier, simply to give a direct mapping
+// between built-in variable names and an numerical value (the enum).
+//
+// For backward compatibility, there is some redundancy between the
+// TStorageQualifier and these. Existing members should both be maintained accurately.
+// However, any new built-in variable (and any existing non-redundant one)
+// must follow the pattern that the specific built-in is here, and only its
+// general qualifier is in TStorageQualifier.
+//
+// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
+// shows up as two different built-in variables in a single stage, but
+// only has a single enum in TBuiltInVariable, so both the
+// TStorageQualifier and the TBuitinVariable are needed to distinguish
+// between them.
+//
+enum TBuiltInVariable {
+ EbvNone,
+ EbvNumWorkGroups,
+ EbvWorkGroupSize,
+ EbvWorkGroupId,
+ EbvLocalInvocationId,
+ EbvGlobalInvocationId,
+ EbvLocalInvocationIndex,
+ EbvSubGroupSize,
+ EbvSubGroupInvocation,
+ EbvSubGroupEqMask,
+ EbvSubGroupGeMask,
+ EbvSubGroupGtMask,
+ EbvSubGroupLeMask,
+ EbvSubGroupLtMask,
+ EbvVertexId,
+ EbvInstanceId,
+ EbvVertexIndex,
+ EbvInstanceIndex,
+ EbvBaseVertex,
+ EbvBaseInstance,
+ EbvDrawId,
+ EbvPosition,
+ EbvPointSize,
+ EbvClipVertex,
+ EbvClipDistance,
+ EbvCullDistance,
+ EbvNormal,
+ EbvVertex,
+ EbvMultiTexCoord0,
+ EbvMultiTexCoord1,
+ EbvMultiTexCoord2,
+ EbvMultiTexCoord3,
+ EbvMultiTexCoord4,
+ EbvMultiTexCoord5,
+ EbvMultiTexCoord6,
+ EbvMultiTexCoord7,
+ EbvFrontColor,
+ EbvBackColor,
+ EbvFrontSecondaryColor,
+ EbvBackSecondaryColor,
+ EbvTexCoord,
+ EbvFogFragCoord,
+ EbvInvocationId,
+ EbvPrimitiveId,
+ EbvLayer,
+ EbvViewportIndex,
+ EbvPatchVertices,
+ EbvTessLevelOuter,
+ EbvTessLevelInner,
+ EbvBoundingBox,
+ EbvTessCoord,
+ EbvColor,
+ EbvSecondaryColor,
+ EbvFace,
+ EbvFragCoord,
+ EbvPointCoord,
+ EbvFragColor,
+ EbvFragData,
+ EbvFragDepth,
+ EbvSampleId,
+ EbvSamplePosition,
+ EbvSampleMask,
+ EbvHelperInvocation,
+
+ EbvLast
+};
+
+// These will show up in error messages
+__inline const char* GetStorageQualifierString(TStorageQualifier q)
+{
+ switch (q) {
+ case EvqTemporary: return "temp"; break;
+ case EvqGlobal: return "global"; break;
+ case EvqConst: return "const"; break;
+ case EvqConstReadOnly: return "const (read only)"; break;
+ case EvqVaryingIn: return "in"; break;
+ case EvqVaryingOut: return "out"; break;
+ case EvqUniform: return "uniform"; break;
+ case EvqBuffer: return "buffer"; break;
+ case EvqShared: return "shared"; break;
+ case EvqIn: return "in"; break;
+ case EvqOut: return "out"; break;
+ case EvqInOut: return "inout"; break;
+ case EvqVertexId: return "gl_VertexId"; break;
+ case EvqInstanceId: return "gl_InstanceId"; break;
+ case EvqPosition: return "gl_Position"; break;
+ case EvqPointSize: return "gl_PointSize"; break;
+ case EvqClipVertex: return "gl_ClipVertex"; break;
+ case EvqFace: return "gl_FrontFacing"; break;
+ case EvqFragCoord: return "gl_FragCoord"; break;
+ case EvqPointCoord: return "gl_PointCoord"; break;
+ case EvqFragColor: return "fragColor"; break;
+ case EvqFragDepth: return "gl_FragDepth"; break;
+ default: return "unknown qualifier";
+ }
+}
+
+__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
+{
+ switch (v) {
+ case EbvNone: return "";
+ case EbvNumWorkGroups: return "NumWorkGroups";
+ case EbvWorkGroupSize: return "WorkGroupSize";
+ case EbvWorkGroupId: return "WorkGroupID";
+ case EbvLocalInvocationId: return "LocalInvocationID";
+ case EbvGlobalInvocationId: return "GlobalInvocationID";
+ case EbvLocalInvocationIndex: return "LocalInvocationIndex";
+ case EbvSubGroupSize: return "SubGroupSize";
+ case EbvSubGroupInvocation: return "SubGroupInvocation";
+ case EbvSubGroupEqMask: return "SubGroupEqMask";
+ case EbvSubGroupGeMask: return "SubGroupGeMask";
+ case EbvSubGroupGtMask: return "SubGroupGtMask";
+ case EbvSubGroupLeMask: return "SubGroupLeMask";
+ case EbvSubGroupLtMask: return "SubGroupLtMask";
+ case EbvVertexId: return "VertexId";
+ case EbvInstanceId: return "InstanceId";
+ case EbvVertexIndex: return "VertexIndex";
+ case EbvInstanceIndex: return "InstanceIndex";
+ case EbvBaseVertex: return "BaseVertex";
+ case EbvBaseInstance: return "BaseInstance";
+ case EbvDrawId: return "DrawId";
+ case EbvPosition: return "Position";
+ case EbvPointSize: return "PointSize";
+ case EbvClipVertex: return "ClipVertex";
+ case EbvClipDistance: return "ClipDistance";
+ case EbvCullDistance: return "CullDistance";
+ case EbvNormal: return "Normal";
+ case EbvVertex: return "Vertex";
+ case EbvMultiTexCoord0: return "MultiTexCoord0";
+ case EbvMultiTexCoord1: return "MultiTexCoord1";
+ case EbvMultiTexCoord2: return "MultiTexCoord2";
+ case EbvMultiTexCoord3: return "MultiTexCoord3";
+ case EbvMultiTexCoord4: return "MultiTexCoord4";
+ case EbvMultiTexCoord5: return "MultiTexCoord5";
+ case EbvMultiTexCoord6: return "MultiTexCoord6";
+ case EbvMultiTexCoord7: return "MultiTexCoord7";
+ case EbvFrontColor: return "FrontColor";
+ case EbvBackColor: return "BackColor";
+ case EbvFrontSecondaryColor: return "FrontSecondaryColor";
+ case EbvBackSecondaryColor: return "BackSecondaryColor";
+ case EbvTexCoord: return "TexCoord";
+ case EbvFogFragCoord: return "FogFragCoord";
+ case EbvInvocationId: return "InvocationID";
+ case EbvPrimitiveId: return "PrimitiveID";
+ case EbvLayer: return "Layer";
+ case EbvViewportIndex: return "ViewportIndex";
+ case EbvPatchVertices: return "PatchVertices";
+ case EbvTessLevelOuter: return "TessLevelOuter";
+ case EbvTessLevelInner: return "TessLevelInner";
+ case EbvBoundingBox: return "BoundingBox";
+ case EbvTessCoord: return "TessCoord";
+ case EbvColor: return "Color";
+ case EbvSecondaryColor: return "SecondaryColor";
+ case EbvFace: return "Face";
+ case EbvFragCoord: return "FragCoord";
+ case EbvPointCoord: return "PointCoord";
+ case EbvFragColor: return "FragColor";
+ case EbvFragData: return "FragData";
+ case EbvFragDepth: return "FragDepth";
+ case EbvSampleId: return "SampleId";
+ case EbvSamplePosition: return "SamplePosition";
+ case EbvSampleMask: return "SampleMaskIn";
+ case EbvHelperInvocation: return "HelperInvocation";
+ default: return "unknown built-in variable";
+ }
+}
+
+// In this enum, order matters; users can assume higher precision is a bigger value
+// and EpqNone is 0.
+enum TPrecisionQualifier {
+ EpqNone = 0,
+ EpqLow,
+ EpqMedium,
+ EpqHigh
+};
+
+__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
+{
+ switch(p) {
+ case EpqNone: return ""; break;
+ case EpqLow: return "lowp"; break;
+ case EpqMedium: return "mediump"; break;
+ case EpqHigh: return "highp"; break;
+ default: return "unknown precision qualifier";
+ }
+}
+
+} // end namespace glslang
+
+#endif // _BASICTYPES_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/Common.h b/chromium/third_party/glslang/src/glslang/Include/Common.h
new file mode 100644
index 00000000000..efb78d44f8c
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/Common.h
@@ -0,0 +1,257 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _COMMON_INCLUDED_
+#define _COMMON_INCLUDED_
+
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+ #include <basetsd.h>
+ #define snprintf sprintf_s
+ #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
+#elif defined (solaris)
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <sys/int_types.h>
+ #define UINT_PTR uintptr_t
+#else
+ #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
+ #include <stdint.h>
+ #define UINT_PTR uintptr_t
+#endif
+
+#if defined(__ANDROID__) || _MSC_VER < 1700
+#include <sstream>
+namespace std {
+template<typename T>
+std::string to_string(const T& val) {
+ std::ostringstream os;
+ os << val;
+ return os.str();
+}
+}
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1700
+inline long long int strtoll (const char* str, char** endptr, int base)
+{
+ return _strtoi64(str, endptr, base);
+}
+inline long long int atoll (const char* str)
+{
+ return strtoll(str, NULL, 10);
+}
+#endif
+
+/* windows only pragma */
+#ifdef _MSC_VER
+ #pragma warning(disable : 4786) // Don't warn about too long identifiers
+ #pragma warning(disable : 4514) // unused inline method
+ #pragma warning(disable : 4201) // nameless union
+#endif
+
+#include <set>
+#include <unordered_set>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#include <list>
+#include <algorithm>
+#include <string>
+#include <stdio.h>
+#include <assert.h>
+
+#include "PoolAlloc.h"
+
+//
+// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
+//
+#define POOL_ALLOCATOR_NEW_DELETE(A) \
+ void* operator new(size_t s) { return (A).allocate(s); } \
+ void* operator new(size_t, void *_Where) { return (_Where); } \
+ void operator delete(void*) { } \
+ void operator delete(void *, void *) { } \
+ void* operator new[](size_t s) { return (A).allocate(s); } \
+ void* operator new[](size_t, void *_Where) { return (_Where); } \
+ void operator delete[](void*) { } \
+ void operator delete[](void *, void *) { }
+
+namespace glslang {
+
+ //
+ // Pool version of string.
+ //
+ typedef pool_allocator<char> TStringAllocator;
+ typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
+
+} // end namespace glslang
+
+// Repackage the std::hash for use by unordered map/set with a TString key.
+namespace std {
+
+ template<> struct hash<glslang::TString> {
+ std::size_t operator()(const glslang::TString& s) const
+ {
+ const unsigned _FNV_offset_basis = 2166136261U;
+ const unsigned _FNV_prime = 16777619U;
+ unsigned _Val = _FNV_offset_basis;
+ size_t _Count = s.size();
+ const char* _First = s.c_str();
+ for (size_t _Next = 0; _Next < _Count; ++_Next)
+ {
+ _Val ^= (unsigned)_First[_Next];
+ _Val *= _FNV_prime;
+ }
+
+ return _Val;
+ }
+ };
+}
+
+namespace glslang {
+
+inline TString* NewPoolTString(const char* s)
+{
+ void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
+ return new(memory) TString(s);
+}
+
+template<class T> inline T* NewPoolObject(T)
+{
+ return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
+}
+
+template<class T> inline T* NewPoolObject(T, int instances)
+{
+ return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
+}
+
+//
+// Pool allocator versions of vectors, lists, and maps
+//
+template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
+ TVector() : std::vector<T, pool_allocator<T> >() {}
+ TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
+ TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
+ TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
+};
+
+template <class T> class TList : public std::list<T, pool_allocator<T> > {
+};
+
+template <class K, class D, class CMP = std::less<K> >
+class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
+};
+
+template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
+class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
+};
+
+//
+// Persistent string memory. Should only be used for strings that survive
+// across compiles/links.
+//
+typedef std::basic_string<char> TPersistString;
+
+//
+// templatized min and max functions.
+//
+template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
+template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
+
+//
+// Create a TString object from an integer.
+//
+inline const TString String(const int i, const int base = 10)
+{
+ char text[16]; // 32 bit ints are at most 10 digits in base 10
+
+ #if defined _MSC_VER || defined MINGW_HAS_SECURE_API
+ _itoa_s(i, text, sizeof(text), base);
+ #else
+ // we assume base 10 for all cases
+ snprintf(text, sizeof(text), "%d", i);
+ #endif
+
+ return text;
+}
+
+struct TSourceLoc {
+ void init() { name = nullptr; string = 0; line = 0; column = 0; }
+ // Returns the name if it exists. Otherwise, returns the string number.
+ std::string getStringNameOrNum(bool quoteStringName = true) const
+ {
+ if (name != nullptr)
+ return quoteStringName ? ("\"" + std::string(name) + "\"") : name;
+ return std::to_string((long long)string);
+ }
+ const char* name; // descriptive name for this string
+ int string;
+ int line;
+ int column;
+};
+
+typedef TMap<TString, TString> TPragmaTable;
+
+const int MaxTokenLength = 1024;
+
+template <class T> bool IsPow2(T powerOf2)
+{
+ if (powerOf2 <= 0)
+ return false;
+
+ return (powerOf2 & (powerOf2 - 1)) == 0;
+}
+
+// Round number up to a multiple of the given powerOf2, which is not
+// a power, just a number that must be a power of 2.
+template <class T> void RoundToPow2(T& number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
+}
+
+template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
+{
+ assert(IsPow2(powerOf2));
+ return ! (number & (powerOf2 - 1));
+}
+
+} // end namespace glslang
+
+#endif // _COMMON_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/ConstantUnion.h b/chromium/third_party/glslang/src/glslang/Include/ConstantUnion.h
new file mode 100644
index 00000000000..8ee2c84ccf2
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/ConstantUnion.h
@@ -0,0 +1,617 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _CONSTANT_UNION_INCLUDED_
+#define _CONSTANT_UNION_INCLUDED_
+
+namespace glslang {
+
+class TConstUnion {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnion() : iConst(0), type(EbtInt) { }
+
+ void setIConst(int i)
+ {
+ iConst = i;
+ type = EbtInt;
+ }
+
+ void setUConst(unsigned int u)
+ {
+ uConst = u;
+ type = EbtUint;
+ }
+
+ void setI64Const(long long i64)
+ {
+ i64Const = i64;
+ type = EbtInt64;
+ }
+
+ void setU64Const(unsigned long long u64)
+ {
+ u64Const = u64;
+ type = EbtUint64;
+ }
+
+ void setDConst(double d)
+ {
+ dConst = d;
+ type = EbtDouble;
+ }
+
+ void setBConst(bool b)
+ {
+ bConst = b;
+ type = EbtBool;
+ }
+
+ int getIConst() const { return iConst; }
+ unsigned int getUConst() const { return uConst; }
+ long long getI64Const() const { return i64Const; }
+ unsigned long long getU64Const() const { return u64Const; }
+ double getDConst() const { return dConst; }
+ bool getBConst() const { return bConst; }
+
+ bool operator==(const int i) const
+ {
+ if (i == iConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned int u) const
+ {
+ if (u == uConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const long long i64) const
+ {
+ if (i64 == i64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const unsigned long long u64) const
+ {
+ if (u64 == u64Const)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const double d) const
+ {
+ if (d == dConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const bool b) const
+ {
+ if (b == bConst)
+ return true;
+
+ return false;
+ }
+
+ bool operator==(const TConstUnion& constant) const
+ {
+ if (constant.type != type)
+ return false;
+
+ switch (type) {
+ case EbtInt:
+ if (constant.iConst == iConst)
+ return true;
+
+ break;
+ case EbtUint:
+ if (constant.uConst == uConst)
+ return true;
+
+ break;
+ case EbtInt64:
+ if (constant.i64Const == i64Const)
+ return true;
+
+ break;
+ case EbtUint64:
+ if (constant.u64Const == u64Const)
+ return true;
+
+ break;
+ case EbtDouble:
+ if (constant.dConst == dConst)
+ return true;
+
+ break;
+ case EbtBool:
+ if (constant.bConst == bConst)
+ return true;
+
+ break;
+ default:
+ assert(false && "Default missing");
+ }
+
+ return false;
+ }
+
+ bool operator!=(const int i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned int u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const long long i) const
+ {
+ return !operator==(i);
+ }
+
+ bool operator!=(const unsigned long long u) const
+ {
+ return !operator==(u);
+ }
+
+ bool operator!=(const float f) const
+ {
+ return !operator==(f);
+ }
+
+ bool operator!=(const bool b) const
+ {
+ return !operator==(b);
+ }
+
+ bool operator!=(const TConstUnion& constant) const
+ {
+ return !operator==(constant);
+ }
+
+ bool operator>(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt:
+ if (iConst > constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst > constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const > constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const > constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst > constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ bool operator<(const TConstUnion& constant) const
+ {
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt:
+ if (iConst < constant.iConst)
+ return true;
+
+ return false;
+ case EbtUint:
+ if (uConst < constant.uConst)
+ return true;
+
+ return false;
+ case EbtInt64:
+ if (i64Const < constant.i64Const)
+ return true;
+
+ return false;
+ case EbtUint64:
+ if (u64Const < constant.u64Const)
+ return true;
+
+ return false;
+ case EbtDouble:
+ if (dConst < constant.dConst)
+ return true;
+
+ return false;
+ default:
+ assert(false && "Default missing");
+ return false;
+ }
+ }
+
+ TConstUnion operator+(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
+ case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator-(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
+ case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator*(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
+ case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
+ case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator%(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
+ case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
+ case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator>>(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator<<(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt:
+ switch (constant.type) {
+ case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
+ case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
+ case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint:
+ switch (constant.type) {
+ case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
+ case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
+ case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtInt64:
+ switch (constant.type) {
+ case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
+ case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ case EbtUint64:
+ switch (constant.type) {
+ case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
+ case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
+ case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+ break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator|(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator^(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
+ case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
+ case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
+ case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator~() const
+ {
+ TConstUnion returnValue;
+ switch (type) {
+ case EbtInt: returnValue.setIConst(~iConst); break;
+ case EbtUint: returnValue.setUConst(~uConst); break;
+ case EbtInt64: returnValue.setI64Const(~i64Const); break;
+ case EbtUint64: returnValue.setU64Const(~u64Const); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator&&(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TConstUnion operator||(const TConstUnion& constant) const
+ {
+ TConstUnion returnValue;
+ assert(type == constant.type);
+ switch (type) {
+ case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
+ default: assert(false && "Default missing");
+ }
+
+ return returnValue;
+ }
+
+ TBasicType getType() const { return type; }
+
+private:
+ union {
+ int iConst; // used for ivec, scalar ints
+ unsigned int uConst; // used for uvec, scalar uints
+ long long i64Const; // used for i64vec, scalar int64s
+ unsigned long long u64Const; // used for u64vec, scalar uint64s
+ bool bConst; // used for bvec, scalar bools
+ double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
+ };
+
+ TBasicType type;
+};
+
+// Encapsulate having a pointer to an array of TConstUnion,
+// which only needs to be allocated if it's size is going to be
+// bigger than 0.
+//
+// One convenience is being able to use [] to go inside the array, instead
+// of C++ assuming it as an array of pointers to vectors.
+//
+// General usage is that the size is known up front, and it is
+// created once with the proper size.
+//
+class TConstUnionArray {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TConstUnionArray() : unionArray(nullptr) { }
+ virtual ~TConstUnionArray() { }
+
+ explicit TConstUnionArray(int size)
+ {
+ if (size == 0)
+ unionArray = nullptr;
+ else
+ unionArray = new TConstUnionVector(size);
+ }
+ TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
+ TConstUnionArray(const TConstUnionArray& a, int start, int size)
+ {
+ unionArray = new TConstUnionVector(size);
+ for (int i = 0; i < size; ++i)
+ (*unionArray)[i] = a[start + i];
+ }
+
+ // Use this constructor for a smear operation
+ TConstUnionArray(int size, const TConstUnion& val)
+ {
+ unionArray = new TConstUnionVector(size, val);
+ }
+
+ int size() const { return unionArray ? (int)unionArray->size() : 0; }
+ TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
+ const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
+ bool operator==(const TConstUnionArray& rhs) const
+ {
+ // this includes the case that both are unallocated
+ if (unionArray == rhs.unionArray)
+ return true;
+
+ if (! unionArray || ! rhs.unionArray)
+ return false;
+
+ if (! unionArray || ! rhs.unionArray)
+ return false;
+
+ return *unionArray == *rhs.unionArray;
+ }
+ bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
+
+ double dot(const TConstUnionArray& rhs)
+ {
+ assert(rhs.unionArray->size() == unionArray->size());
+ double sum = 0.0;
+
+ for (size_t comp = 0; comp < unionArray->size(); ++comp)
+ sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
+
+ return sum;
+ }
+
+ bool empty() const { return unionArray == nullptr; }
+
+protected:
+ typedef TVector<TConstUnion> TConstUnionVector;
+ TConstUnionVector* unionArray;
+};
+
+} // end namespace glslang
+
+#endif // _CONSTANT_UNION_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/InfoSink.h b/chromium/third_party/glslang/src/glslang/Include/InfoSink.h
new file mode 100644
index 00000000000..5862e5d8a5c
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/InfoSink.h
@@ -0,0 +1,145 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _INFOSINK_INCLUDED_
+#define _INFOSINK_INCLUDED_
+
+#include "../Include/Common.h"
+#include <math.h>
+
+namespace glslang {
+
+//
+// TPrefixType is used to centralize how info log messages start.
+// See below.
+//
+enum TPrefixType {
+ EPrefixNone,
+ EPrefixWarning,
+ EPrefixError,
+ EPrefixInternalError,
+ EPrefixUnimplemented,
+ EPrefixNote
+};
+
+enum TOutputStream {
+ ENull = 0,
+ EDebugger = 0x01,
+ EStdOut = 0x02,
+ EString = 0x04,
+};
+//
+// Encapsulate info logs for all objects that have them.
+//
+// The methods are a general set of tools for getting a variety of
+// messages and types inserted into the log.
+//
+class TInfoSinkBase {
+public:
+ TInfoSinkBase() : outputStream(4) {}
+ void erase() { sink.erase(); }
+ TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
+ TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
+ TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(long unsigned int n) { append(String(n)); return *this; }
+ TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
+ snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
+ append(buf);
+ return *this; }
+ TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
+ TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
+ const char* c_str() const { return sink.c_str(); }
+ void prefix(TPrefixType message) {
+ switch(message) {
+ case EPrefixNone: break;
+ case EPrefixWarning: append("WARNING: "); break;
+ case EPrefixError: append("ERROR: "); break;
+ case EPrefixInternalError: append("INTERNAL ERROR: "); break;
+ case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
+ case EPrefixNote: append("NOTE: "); break;
+ default: append("UNKNOWN ERROR: "); break;
+ }
+ }
+ void location(const TSourceLoc& loc) {
+ const int maxSize = 24;
+ char locText[maxSize];
+ snprintf(locText, maxSize, ":%d", loc.line);
+ append(loc.getStringNameOrNum(false).c_str());
+ append(locText);
+ append(": ");
+ }
+ void message(TPrefixType message, const char* s) {
+ prefix(message);
+ append(s);
+ append("\n");
+ }
+ void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
+ prefix(message);
+ location(loc);
+ append(s);
+ append("\n");
+ }
+
+ void setOutputStream(int output = 4)
+ {
+ outputStream = output;
+ }
+
+protected:
+ void append(const char* s);
+
+ void append(int count, char c);
+ void append(const TPersistString& t);
+ void append(const TString& t);
+
+ void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
+ sink.reserve(sink.capacity() + sink.capacity() / 2); }
+ void appendToStream(const char* s);
+ TPersistString sink;
+ int outputStream;
+};
+
+} // end namespace glslang
+
+class TInfoSink {
+public:
+ glslang::TInfoSinkBase info;
+ glslang::TInfoSinkBase debug;
+};
+
+#endif // _INFOSINK_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/InitializeGlobals.h b/chromium/third_party/glslang/src/glslang/Include/InitializeGlobals.h
new file mode 100644
index 00000000000..6c9f54a03d4
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/InitializeGlobals.h
@@ -0,0 +1,47 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef __INITIALIZE_GLOBALS_INCLUDED_
+#define __INITIALIZE_GLOBALS_INCLUDED_
+
+namespace glslang {
+
+void InitializeMemoryPools();
+void FreeGlobalPools();
+bool InitializePoolIndex();
+void FreePoolIndex();
+
+} // end namespace glslang
+
+#endif // __INITIALIZE_GLOBALS_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/PoolAlloc.h b/chromium/third_party/glslang/src/glslang/Include/PoolAlloc.h
new file mode 100644
index 00000000000..c3bebc6317e
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/PoolAlloc.h
@@ -0,0 +1,325 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _POOLALLOC_INCLUDED_
+#define _POOLALLOC_INCLUDED_
+
+#ifdef _DEBUG
+# define GUARD_BLOCKS // define to enable guard block sanity checking
+#endif
+
+//
+// This header defines an allocator that can be used to efficiently
+// allocate a large number of small requests for heap memory, with the
+// intention that they are not individually deallocated, but rather
+// collectively deallocated at one time.
+//
+// This simultaneously
+//
+// * Makes each individual allocation much more efficient; the
+// typical allocation is trivial.
+// * Completely avoids the cost of doing individual deallocation.
+// * Saves the trouble of tracking down and plugging a large class of leaks.
+//
+// Individual classes can use this allocator by supplying their own
+// new and delete methods.
+//
+// STL containers can use this allocator by using the pool_allocator
+// class as the allocator (second) template argument.
+//
+
+#include <stddef.h>
+#include <string.h>
+#include <vector>
+
+namespace glslang {
+
+// If we are using guard blocks, we must track each indivual
+// allocation. If we aren't using guard blocks, these
+// never get instantiated, so won't have any impact.
+//
+
+class TAllocation {
+public:
+ TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
+ size(size), mem(mem), prevAlloc(prev) {
+ // Allocations are bracketed:
+ // [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
+ // This would be cleaner with if (guardBlockSize)..., but that
+ // makes the compiler print warnings about 0 length memsets,
+ // even with the if() protecting them.
+# ifdef GUARD_BLOCKS
+ memset(preGuard(), guardBlockBeginVal, guardBlockSize);
+ memset(data(), userDataFill, size);
+ memset(postGuard(), guardBlockEndVal, guardBlockSize);
+# endif
+ }
+
+ void check() const {
+ checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
+ checkGuardBlock(postGuard(), guardBlockEndVal, "after");
+ }
+
+ void checkAllocList() const;
+
+ // Return total size needed to accommodate user buffer of 'size',
+ // plus our tracking data.
+ inline static size_t allocationSize(size_t size) {
+ return size + 2 * guardBlockSize + headerSize();
+ }
+
+ // Offset from surrounding buffer to get to user data buffer.
+ inline static unsigned char* offsetAllocation(unsigned char* m) {
+ return m + guardBlockSize + headerSize();
+ }
+
+private:
+ void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
+
+ // Find offsets to pre and post guard blocks, and user data buffer
+ unsigned char* preGuard() const { return mem + headerSize(); }
+ unsigned char* data() const { return preGuard() + guardBlockSize; }
+ unsigned char* postGuard() const { return data() + size; }
+
+ size_t size; // size of the user data area
+ unsigned char* mem; // beginning of our allocation (pts to header)
+ TAllocation* prevAlloc; // prior allocation in the chain
+
+ const static unsigned char guardBlockBeginVal;
+ const static unsigned char guardBlockEndVal;
+ const static unsigned char userDataFill;
+
+ const static size_t guardBlockSize;
+# ifdef GUARD_BLOCKS
+ inline static size_t headerSize() { return sizeof(TAllocation); }
+# else
+ inline static size_t headerSize() { return 0; }
+# endif
+};
+
+//
+// There are several stacks. One is to track the pushing and popping
+// of the user, and not yet implemented. The others are simply a
+// repositories of free pages or used pages.
+//
+// Page stacks are linked together with a simple header at the beginning
+// of each allocation obtained from the underlying OS. Multi-page allocations
+// are returned to the OS. Individual page allocations are kept for future
+// re-use.
+//
+// The "page size" used is not, nor must it match, the underlying OS
+// page size. But, having it be about that size or equal to a set of
+// pages is likely most optimal.
+//
+class TPoolAllocator {
+public:
+ TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
+
+ //
+ // Don't call the destructor just to free up the memory, call pop()
+ //
+ ~TPoolAllocator();
+
+ //
+ // Call push() to establish a new place to pop memory too. Does not
+ // have to be called to get things started.
+ //
+ void push();
+
+ //
+ // Call pop() to free all memory allocated since the last call to push(),
+ // or if no last call to push, frees all memory since first allocation.
+ //
+ void pop();
+
+ //
+ // Call popAll() to free all memory allocated.
+ //
+ void popAll();
+
+ //
+ // Call allocate() to actually acquire memory. Returns 0 if no memory
+ // available, otherwise a properly aligned pointer to 'numBytes' of memory.
+ //
+ void* allocate(size_t numBytes);
+
+ //
+ // There is no deallocate. The point of this class is that
+ // deallocation can be skipped by the user of it, as the model
+ // of use is to simultaneously deallocate everything at once
+ // by calling pop(), and to not have to solve memory leak problems.
+ //
+
+protected:
+ friend struct tHeader;
+
+ struct tHeader {
+ tHeader(tHeader* nextPage, size_t pageCount) :
+#ifdef GUARD_BLOCKS
+ lastAllocation(0),
+#endif
+ nextPage(nextPage), pageCount(pageCount) { }
+
+ ~tHeader() {
+#ifdef GUARD_BLOCKS
+ if (lastAllocation)
+ lastAllocation->checkAllocList();
+#endif
+ }
+
+#ifdef GUARD_BLOCKS
+ TAllocation* lastAllocation;
+#endif
+ tHeader* nextPage;
+ size_t pageCount;
+ };
+
+ struct tAllocState {
+ size_t offset;
+ tHeader* page;
+ };
+ typedef std::vector<tAllocState> tAllocStack;
+
+ // Track allocations if and only if we're using guard blocks
+#ifndef GUARD_BLOCKS
+ void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
+#else
+ void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
+ new(memory) TAllocation(numBytes, memory, block->lastAllocation);
+ block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
+#endif
+
+ // This is optimized entirely away if GUARD_BLOCKS is not defined.
+ return TAllocation::offsetAllocation(memory);
+ }
+
+ size_t pageSize; // granularity of allocation from the OS
+ size_t alignment; // all returned allocations will be aligned at
+ // this granularity, which will be a power of 2
+ size_t alignmentMask;
+ size_t headerSkip; // amount of memory to skip to make room for the
+ // header (basically, size of header, rounded
+ // up to make it aligned
+ size_t currentPageOffset; // next offset in top of inUseList to allocate from
+ tHeader* freeList; // list of popped memory
+ tHeader* inUseList; // list of all memory currently being used
+ tAllocStack stack; // stack of where to allocate from, to partition pool
+
+ int numCalls; // just an interesting statistic
+ size_t totalBytes; // just an interesting statistic
+private:
+ TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
+ TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
+};
+
+
+//
+// There could potentially be many pools with pops happening at
+// different times. But a simple use is to have a global pop
+// with everyone using the same global allocator.
+//
+typedef TPoolAllocator* PoolAllocatorPointer;
+extern TPoolAllocator& GetThreadPoolAllocator();
+
+struct TThreadMemoryPools
+{
+ TPoolAllocator* threadPoolAllocator;
+};
+
+void SetThreadPoolAllocator(TPoolAllocator& poolAllocator);
+
+//
+// This STL compatible allocator is intended to be used as the allocator
+// parameter to templatized STL containers, like vector and map.
+//
+// It will use the pools for allocation, and not
+// do any deallocation, but will still do destruction.
+//
+template<class T>
+class pool_allocator {
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T value_type;
+ template<class Other>
+ struct rebind {
+ typedef pool_allocator<Other> other;
+ };
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pool_allocator() : allocator(GetThreadPoolAllocator()) { }
+ pool_allocator(TPoolAllocator& a) : allocator(a) { }
+ pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
+
+ template<class Other>
+ pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
+
+ pointer allocate(size_type n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+ pointer allocate(size_type n, const void*) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
+
+ void deallocate(void*, size_type) { }
+ void deallocate(pointer, size_type) { }
+
+ pointer _Charalloc(size_t n) {
+ return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
+
+ void construct(pointer p, const T& val) { new ((void *)p) T(val); }
+ void destroy(pointer p) { p->T::~T(); }
+
+ bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
+ bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
+
+ size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
+ size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
+
+ void setAllocator(TPoolAllocator* a) { allocator = *a; }
+ TPoolAllocator& getAllocator() const { return allocator; }
+
+protected:
+ pool_allocator& operator=(const pool_allocator&) { return *this; }
+ TPoolAllocator& allocator;
+};
+
+} // end namespace glslang
+
+#endif // _POOLALLOC_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/ResourceLimits.h b/chromium/third_party/glslang/src/glslang/Include/ResourceLimits.h
new file mode 100644
index 00000000000..e8c743d6b7c
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/ResourceLimits.h
@@ -0,0 +1,140 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _RESOURCE_LIMITS_INCLUDED_
+#define _RESOURCE_LIMITS_INCLUDED_
+
+struct TLimits {
+ bool nonInductiveForLoops;
+ bool whileLoops;
+ bool doWhileLoops;
+ bool generalUniformIndexing;
+ bool generalAttributeMatrixVectorIndexing;
+ bool generalVaryingIndexing;
+ bool generalSamplerIndexing;
+ bool generalVariableIndexing;
+ bool generalConstantMatrixVectorIndexing;
+};
+
+struct TBuiltInResource {
+ int maxLights;
+ int maxClipPlanes;
+ int maxTextureUnits;
+ int maxTextureCoords;
+ int maxVertexAttribs;
+ int maxVertexUniformComponents;
+ int maxVaryingFloats;
+ int maxVertexTextureImageUnits;
+ int maxCombinedTextureImageUnits;
+ int maxTextureImageUnits;
+ int maxFragmentUniformComponents;
+ int maxDrawBuffers;
+ int maxVertexUniformVectors;
+ int maxVaryingVectors;
+ int maxFragmentUniformVectors;
+ int maxVertexOutputVectors;
+ int maxFragmentInputVectors;
+ int minProgramTexelOffset;
+ int maxProgramTexelOffset;
+ int maxClipDistances;
+ int maxComputeWorkGroupCountX;
+ int maxComputeWorkGroupCountY;
+ int maxComputeWorkGroupCountZ;
+ int maxComputeWorkGroupSizeX;
+ int maxComputeWorkGroupSizeY;
+ int maxComputeWorkGroupSizeZ;
+ int maxComputeUniformComponents;
+ int maxComputeTextureImageUnits;
+ int maxComputeImageUniforms;
+ int maxComputeAtomicCounters;
+ int maxComputeAtomicCounterBuffers;
+ int maxVaryingComponents;
+ int maxVertexOutputComponents;
+ int maxGeometryInputComponents;
+ int maxGeometryOutputComponents;
+ int maxFragmentInputComponents;
+ int maxImageUnits;
+ int maxCombinedImageUnitsAndFragmentOutputs;
+ int maxCombinedShaderOutputResources;
+ int maxImageSamples;
+ int maxVertexImageUniforms;
+ int maxTessControlImageUniforms;
+ int maxTessEvaluationImageUniforms;
+ int maxGeometryImageUniforms;
+ int maxFragmentImageUniforms;
+ int maxCombinedImageUniforms;
+ int maxGeometryTextureImageUnits;
+ int maxGeometryOutputVertices;
+ int maxGeometryTotalOutputComponents;
+ int maxGeometryUniformComponents;
+ int maxGeometryVaryingComponents;
+ int maxTessControlInputComponents;
+ int maxTessControlOutputComponents;
+ int maxTessControlTextureImageUnits;
+ int maxTessControlUniformComponents;
+ int maxTessControlTotalOutputComponents;
+ int maxTessEvaluationInputComponents;
+ int maxTessEvaluationOutputComponents;
+ int maxTessEvaluationTextureImageUnits;
+ int maxTessEvaluationUniformComponents;
+ int maxTessPatchComponents;
+ int maxPatchVertices;
+ int maxTessGenLevel;
+ int maxViewports;
+ int maxVertexAtomicCounters;
+ int maxTessControlAtomicCounters;
+ int maxTessEvaluationAtomicCounters;
+ int maxGeometryAtomicCounters;
+ int maxFragmentAtomicCounters;
+ int maxCombinedAtomicCounters;
+ int maxAtomicCounterBindings;
+ int maxVertexAtomicCounterBuffers;
+ int maxTessControlAtomicCounterBuffers;
+ int maxTessEvaluationAtomicCounterBuffers;
+ int maxGeometryAtomicCounterBuffers;
+ int maxFragmentAtomicCounterBuffers;
+ int maxCombinedAtomicCounterBuffers;
+ int maxAtomicCounterBufferSize;
+ int maxTransformFeedbackBuffers;
+ int maxTransformFeedbackInterleavedComponents;
+ int maxCullDistances;
+ int maxCombinedClipAndCullDistances;
+ int maxSamples;
+
+ TLimits limits;
+};
+
+#endif // _RESOURCE_LIMITS_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/ShHandle.h b/chromium/third_party/glslang/src/glslang/Include/ShHandle.h
new file mode 100644
index 00000000000..fee641396cd
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/ShHandle.h
@@ -0,0 +1,174 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _SHHANDLE_INCLUDED_
+#define _SHHANDLE_INCLUDED_
+
+//
+// Machine independent part of the compiler private objects
+// sent as ShHandle to the driver.
+//
+// This should not be included by driver code.
+//
+
+
+#define SH_EXPORTING
+#include "../Public/ShaderLang.h"
+#include "../MachineIndependent/Versions.h"
+#include "InfoSink.h"
+
+class TCompiler;
+class TLinker;
+class TUniformMap;
+
+//
+// The base class used to back handles returned to the driver.
+//
+class TShHandleBase {
+public:
+ TShHandleBase() { }
+ virtual ~TShHandleBase() { }
+ virtual TCompiler* getAsCompiler() { return 0; }
+ virtual TLinker* getAsLinker() { return 0; }
+ virtual TUniformMap* getAsUniformMap() { return 0; }
+};
+
+//
+// The base class for the machine dependent linker to derive from
+// for managing where uniforms live.
+//
+class TUniformMap : public TShHandleBase {
+public:
+ TUniformMap() { }
+ virtual ~TUniformMap() { }
+ virtual TUniformMap* getAsUniformMap() { return this; }
+ virtual int getLocation(const char* name) = 0;
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink infoSink;
+};
+
+class TIntermNode;
+
+//
+// The base class for the machine dependent compiler to derive from
+// for managing object code from the compile.
+//
+class TCompiler : public TShHandleBase {
+public:
+ TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
+ virtual ~TCompiler() { }
+ EShLanguage getLanguage() { return language; }
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+
+ virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
+
+ virtual TCompiler* getAsCompiler() { return this; }
+ virtual bool linkable() { return haveValidObjectCode; }
+
+ TInfoSink& infoSink;
+protected:
+ TCompiler& operator=(TCompiler&);
+
+ EShLanguage language;
+ bool haveValidObjectCode;
+};
+
+//
+// Link operations are based on a list of compile results...
+//
+typedef glslang::TVector<TCompiler*> TCompilerList;
+typedef glslang::TVector<TShHandleBase*> THandleList;
+
+//
+// The base class for the machine dependent linker to derive from
+// to manage the resulting executable.
+//
+
+class TLinker : public TShHandleBase {
+public:
+ TLinker(EShExecutable e, TInfoSink& iSink) :
+ infoSink(iSink),
+ executable(e),
+ haveReturnableObjectCode(false),
+ appAttributeBindings(0),
+ fixedAttributeBindings(0),
+ excludedAttributes(0),
+ excludedCount(0),
+ uniformBindings(0) { }
+ virtual TLinker* getAsLinker() { return this; }
+ virtual ~TLinker() { }
+ virtual bool link(TCompilerList&, TUniformMap*) = 0;
+ virtual bool link(THandleList&) { return false; }
+ virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
+ virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
+ virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
+ virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
+ virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
+ virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
+ virtual TInfoSink& getInfoSink() { return infoSink; }
+ TInfoSink& infoSink;
+protected:
+ TLinker& operator=(TLinker&);
+ EShExecutable executable;
+ bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
+
+ const ShBindingTable* appAttributeBindings;
+ const ShBindingTable* fixedAttributeBindings;
+ const int* excludedAttributes;
+ int excludedCount;
+ ShBindingTable* uniformBindings; // created by the linker
+};
+
+//
+// This is the interface between the machine independent code
+// and the machine dependent code.
+//
+// The machine dependent code should derive from the classes
+// above. Then Construct*() and Delete*() will create and
+// destroy the machine dependent objects, which contain the
+// above machine independent information.
+//
+TCompiler* ConstructCompiler(EShLanguage, int);
+
+TShHandleBase* ConstructLinker(EShExecutable, int);
+TShHandleBase* ConstructBindings();
+void DeleteLinker(TShHandleBase*);
+void DeleteBindingList(TShHandleBase* bindingList);
+
+TUniformMap* ConstructUniformMap();
+void DeleteCompiler(TCompiler*);
+
+void DeleteUniformMap(TUniformMap*);
+
+#endif // _SHHANDLE_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/Types.h b/chromium/third_party/glslang/src/glslang/Include/Types.h
new file mode 100644
index 00000000000..e37c77cdb78
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/Types.h
@@ -0,0 +1,1714 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2016 LunarG, Inc.
+//Copyright (C) 2015-2016 Google, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+#ifndef _TYPES_INCLUDED
+#define _TYPES_INCLUDED
+
+#include "../Include/Common.h"
+#include "../Include/BaseTypes.h"
+#include "../Public/ShaderLang.h"
+#include "arrays.h"
+
+namespace glslang {
+
+const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded
+
+const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed
+inline bool IsAnonymous(const TString& name)
+{
+ return name.compare(0, 5, AnonymousPrefix) == 0;
+}
+
+//
+// Details within a sampler type
+//
+enum TSamplerDim {
+ EsdNone,
+ Esd1D,
+ Esd2D,
+ Esd3D,
+ EsdCube,
+ EsdRect,
+ EsdBuffer,
+ EsdSubpass, // goes only with non-sampled image (image is true)
+ EsdNumDims
+};
+
+struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler
+ TBasicType type : 8; // type returned by sampler
+ TSamplerDim dim : 8;
+ bool arrayed : 1;
+ bool shadow : 1;
+ bool ms : 1;
+ bool image : 1; // image, combined should be false
+ bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler
+ bool sampler : 1; // true means a pure sampler, other fields should be clear()
+ bool external : 1; // GL_OES_EGL_image_external
+
+ bool isImage() const { return image && dim != EsdSubpass; }
+ bool isSubpass() const { return dim == EsdSubpass; }
+ bool isCombined() const { return combined; }
+ bool isPureSampler() const { return sampler; }
+ bool isTexture() const { return !sampler && !image; }
+
+ void clear()
+ {
+ type = EbtVoid;
+ dim = EsdNone;
+ arrayed = false;
+ shadow = false;
+ ms = false;
+ image = false;
+ combined = false;
+ sampler = false;
+ external = false;
+ }
+
+ // make a combined sampler and texture
+ void set(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ combined = true;
+ }
+
+ // make an image
+ void setImage(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ image = true;
+ }
+
+ // make a texture with no sampler
+ void setTexture(TBasicType t, TSamplerDim d, bool a = false, bool s = false, bool m = false)
+ {
+ clear();
+ type = t;
+ dim = d;
+ arrayed = a;
+ shadow = s;
+ ms = m;
+ }
+
+ // make a subpass input attachment
+ void setSubpass(TBasicType t, bool m = false)
+ {
+ clear();
+ type = t;
+ image = true;
+ dim = EsdSubpass;
+ ms = m;
+ }
+
+ // make a pure sampler, no texture, no image, nothing combined, the 'sampler' keyword
+ void setPureSampler(bool s)
+ {
+ clear();
+ sampler = true;
+ shadow = s;
+ }
+
+ bool operator==(const TSampler& right) const
+ {
+ return type == right.type &&
+ dim == right.dim &&
+ arrayed == right.arrayed &&
+ shadow == right.shadow &&
+ ms == right.ms &&
+ image == right.image &&
+ combined == right.combined &&
+ sampler == right.sampler &&
+ external == right.external;
+ }
+
+ bool operator!=(const TSampler& right) const
+ {
+ return ! operator==(right);
+ }
+
+ TString getString() const
+ {
+ TString s;
+
+ if (sampler) {
+ s.append("sampler");
+ return s;
+ }
+
+ switch (type) {
+ case EbtFloat: break;
+ case EbtInt: s.append("i"); break;
+ case EbtUint: s.append("u"); break;
+ case EbtInt64: s.append("i64"); break;
+ case EbtUint64: s.append("u64"); break;
+ default: break; // some compilers want this
+ }
+ if (image) {
+ if (dim == EsdSubpass)
+ s.append("subpass");
+ else
+ s.append("image");
+ } else if (combined) {
+ s.append("sampler");
+ } else {
+ s.append("texture");
+ }
+ if (external) {
+ s.append("ExternalOES");
+ return s;
+ }
+ switch (dim) {
+ case Esd1D: s.append("1D"); break;
+ case Esd2D: s.append("2D"); break;
+ case Esd3D: s.append("3D"); break;
+ case EsdCube: s.append("Cube"); break;
+ case EsdRect: s.append("2DRect"); break;
+ case EsdBuffer: s.append("Buffer"); break;
+ case EsdSubpass: s.append("Input"); break;
+ default: break; // some compilers want this
+ }
+ if (ms)
+ s.append("MS");
+ if (arrayed)
+ s.append("Array");
+ if (shadow)
+ s.append("Shadow");
+
+ return s;
+ }
+};
+
+//
+// Need to have association of line numbers to types in a list for building structs.
+//
+class TType;
+struct TTypeLoc {
+ TType* type;
+ TSourceLoc loc;
+};
+typedef TVector<TTypeLoc> TTypeList;
+
+typedef TVector<TString*> TIdentifierList;
+
+//
+// Following are a series of helper enums for managing layouts and qualifiers,
+// used for TPublicType, TType, others.
+//
+
+enum TLayoutPacking {
+ ElpNone,
+ ElpShared, // default, but different than saying nothing
+ ElpStd140,
+ ElpStd430,
+ ElpPacked,
+ ElpCount // If expanding, see bitfield width below
+};
+
+enum TLayoutMatrix {
+ ElmNone,
+ ElmRowMajor,
+ ElmColumnMajor, // default, but different than saying nothing
+ ElmCount // If expanding, see bitfield width below
+};
+
+// Union of geometry shader and tessellation shader geometry types.
+// They don't go into TType, but rather have current state per shader or
+// active parser type (TPublicType).
+enum TLayoutGeometry {
+ ElgNone,
+ ElgPoints,
+ ElgLines,
+ ElgLinesAdjacency,
+ ElgLineStrip,
+ ElgTriangles,
+ ElgTrianglesAdjacency,
+ ElgTriangleStrip,
+ ElgQuads,
+ ElgIsolines,
+};
+
+enum TVertexSpacing {
+ EvsNone,
+ EvsEqual,
+ EvsFractionalEven,
+ EvsFractionalOdd
+};
+
+enum TVertexOrder {
+ EvoNone,
+ EvoCw,
+ EvoCcw
+};
+
+// Note: order matters, as type of format is done by comparison.
+enum TLayoutFormat {
+ ElfNone,
+
+ // Float image
+ ElfRgba32f,
+ ElfRgba16f,
+ ElfR32f,
+ ElfRgba8,
+ ElfRgba8Snorm,
+
+ ElfEsFloatGuard, // to help with comparisons
+
+ ElfRg32f,
+ ElfRg16f,
+ ElfR11fG11fB10f,
+ ElfR16f,
+ ElfRgba16,
+ ElfRgb10A2,
+ ElfRg16,
+ ElfRg8,
+ ElfR16,
+ ElfR8,
+ ElfRgba16Snorm,
+ ElfRg16Snorm,
+ ElfRg8Snorm,
+ ElfR16Snorm,
+ ElfR8Snorm,
+
+ ElfFloatGuard, // to help with comparisons
+
+ // Int image
+ ElfRgba32i,
+ ElfRgba16i,
+ ElfRgba8i,
+ ElfR32i,
+
+ ElfEsIntGuard, // to help with comparisons
+
+ ElfRg32i,
+ ElfRg16i,
+ ElfRg8i,
+ ElfR16i,
+ ElfR8i,
+
+ ElfIntGuard, // to help with comparisons
+
+ // Uint image
+ ElfRgba32ui,
+ ElfRgba16ui,
+ ElfRgba8ui,
+ ElfR32ui,
+
+ ElfEsUintGuard, // to help with comparisons
+
+ ElfRg32ui,
+ ElfRg16ui,
+ ElfRgb10a2ui,
+ ElfRg8ui,
+ ElfR16ui,
+ ElfR8ui,
+
+ ElfCount
+};
+
+enum TLayoutDepth {
+ EldNone,
+ EldAny,
+ EldGreater,
+ EldLess,
+ EldUnchanged,
+
+ EldCount
+};
+
+enum TBlendEquationShift {
+ // No 'EBlendNone':
+ // These are used as bit-shift amounts. A mask of such shifts will have type 'int',
+ // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set.
+ EBlendMultiply,
+ EBlendScreen,
+ EBlendOverlay,
+ EBlendDarken,
+ EBlendLighten,
+ EBlendColordodge,
+ EBlendColorburn,
+ EBlendHardlight,
+ EBlendSoftlight,
+ EBlendDifference,
+ EBlendExclusion,
+ EBlendHslHue,
+ EBlendHslSaturation,
+ EBlendHslColor,
+ EBlendHslLuminosity,
+ EBlendAllEquations,
+
+ EBlendCount
+};
+
+class TQualifier {
+public:
+ static const int layoutNotSet = -1;
+
+ void clear()
+ {
+ precision = EpqNone;
+ invariant = false;
+ noContraction = false;
+ makeTemporary();
+ }
+
+ // drop qualifiers that don't belong in a temporary variable
+ void makeTemporary()
+ {
+ storage = EvqTemporary;
+ builtIn = EbvNone;
+ centroid = false;
+ smooth = false;
+ flat = false;
+ nopersp = false;
+ patch = false;
+ sample = false;
+ coherent = false;
+ volatil = false;
+ restrict = false;
+ readonly = false;
+ writeonly = false;
+ specConstant = false;
+ clearLayout();
+ }
+
+ // Drop just the storage qualification, which perhaps should
+ // never be done, as it is fundamentally inconsistent, but need to
+ // explore what downstream consumers need.
+ // E.g., in a deference, it is an inconsistency between:
+ // A) partially dereferenced resource is still in the storage class it started in
+ // B) partially dereferenced resource is a new temporary object
+ // If A, then nothing should change, if B, then everything should change, but this is half way.
+ void makePartialTemporary()
+ {
+ storage = EvqTemporary;
+ specConstant = false;
+ }
+
+ TStorageQualifier storage : 6;
+ TBuiltInVariable builtIn : 8;
+ TPrecisionQualifier precision : 3;
+ bool invariant : 1; // require canonical treatment for cross-shader invariance
+ bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects
+ bool centroid : 1;
+ bool smooth : 1;
+ bool flat : 1;
+ bool nopersp : 1;
+ bool patch : 1;
+ bool sample : 1;
+ bool coherent : 1;
+ bool volatil : 1;
+ bool restrict : 1;
+ bool readonly : 1;
+ bool writeonly : 1;
+ bool specConstant : 1; // having a constant_id is not sufficient: expressions have no id, but are still specConstant
+
+ bool isMemory() const
+ {
+ return coherent || volatil || restrict || readonly || writeonly;
+ }
+ bool isInterpolation() const
+ {
+ return flat || smooth || nopersp;
+ }
+ bool isAuxiliary() const
+ {
+ return centroid || patch || sample;
+ }
+
+ bool isPipeInput() const
+ {
+ switch (storage) {
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isPipeOutput() const
+ {
+ switch (storage) {
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamInput() const
+ {
+ switch (storage) {
+ case EvqIn:
+ case EvqInOut:
+ case EvqConstReadOnly:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isParamOutput() const
+ {
+ switch (storage) {
+ case EvqOut:
+ case EvqInOut:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isUniformOrBuffer() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool isIo() const
+ {
+ switch (storage) {
+ case EvqUniform:
+ case EvqBuffer:
+ case EvqVaryingIn:
+ case EvqFragCoord:
+ case EvqPointCoord:
+ case EvqFace:
+ case EvqVertexId:
+ case EvqInstanceId:
+ case EvqPosition:
+ case EvqPointSize:
+ case EvqClipVertex:
+ case EvqVaryingOut:
+ case EvqFragColor:
+ case EvqFragDepth:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // True if this type of IO is supposed to be arrayed with extra level for per-vertex data
+ bool isArrayedIo(EShLanguage language) const
+ {
+ switch (language) {
+ case EShLangGeometry:
+ return isPipeInput();
+ case EShLangTessControl:
+ return ! patch && (isPipeInput() || isPipeOutput());
+ case EShLangTessEvaluation:
+ return ! patch && isPipeInput();
+ default:
+ return false;
+ }
+ }
+
+ // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield
+ void clearLayout()
+ {
+ layoutMatrix = ElmNone;
+ layoutPacking = ElpNone;
+ layoutOffset = layoutNotSet;
+ layoutAlign = layoutNotSet;
+
+ layoutLocation = layoutLocationEnd;
+ layoutComponent = layoutComponentEnd;
+ layoutSet = layoutSetEnd;
+ layoutBinding = layoutBindingEnd;
+ layoutIndex = layoutIndexEnd;
+
+ layoutStream = layoutStreamEnd;
+
+ layoutXfbBuffer = layoutXfbBufferEnd;
+ layoutXfbStride = layoutXfbStrideEnd;
+ layoutXfbOffset = layoutXfbOffsetEnd;
+ layoutAttachment = layoutAttachmentEnd;
+ layoutSpecConstantId = layoutSpecConstantIdEnd;
+
+ layoutFormat = ElfNone;
+
+ layoutPushConstant = false;
+ }
+ bool hasLayout() const
+ {
+ return hasUniformLayout() ||
+ hasAnyLocation() ||
+ hasBinding() ||
+ hasStream() ||
+ hasXfb() ||
+ hasFormat() ||
+ layoutPushConstant;
+ }
+ TLayoutMatrix layoutMatrix : 3;
+ TLayoutPacking layoutPacking : 4;
+ int layoutOffset;
+ int layoutAlign;
+
+ unsigned int layoutLocation :12;
+ static const unsigned int layoutLocationEnd = 0xFFF;
+
+ unsigned int layoutComponent : 3;
+ static const unsigned int layoutComponentEnd = 4;
+
+ unsigned int layoutSet : 7;
+ static const unsigned int layoutSetEnd = 0x3F;
+
+ unsigned int layoutBinding : 8;
+ static const unsigned int layoutBindingEnd = 0xFF;
+
+ unsigned int layoutIndex : 8;
+ static const unsigned int layoutIndexEnd = 0xFF;
+
+ unsigned int layoutStream : 8;
+ static const unsigned int layoutStreamEnd = 0xFF;
+
+ unsigned int layoutXfbBuffer : 4;
+ static const unsigned int layoutXfbBufferEnd = 0xF;
+
+ unsigned int layoutXfbStride : 10;
+ static const unsigned int layoutXfbStrideEnd = 0x3FF;
+
+ unsigned int layoutXfbOffset : 10;
+ static const unsigned int layoutXfbOffsetEnd = 0x3FF;
+
+ unsigned int layoutAttachment : 8; // for input_attachment_index
+ static const unsigned int layoutAttachmentEnd = 0XFF;
+
+ unsigned int layoutSpecConstantId : 11;
+ static const unsigned int layoutSpecConstantIdEnd = 0x7FF;
+
+ TLayoutFormat layoutFormat : 8;
+
+ bool layoutPushConstant;
+
+ bool hasUniformLayout() const
+ {
+ return hasMatrix() ||
+ hasPacking() ||
+ hasOffset() ||
+ hasBinding() ||
+ hasAlign();
+ }
+ bool hasMatrix() const
+ {
+ return layoutMatrix != ElmNone;
+ }
+ bool hasPacking() const
+ {
+ return layoutPacking != ElpNone;
+ }
+ bool hasOffset() const
+ {
+ return layoutOffset != layoutNotSet;
+ }
+ bool hasAlign() const
+ {
+ return layoutAlign != layoutNotSet;
+ }
+ bool hasAnyLocation() const
+ {
+ return hasLocation() ||
+ hasComponent() ||
+ hasIndex();
+ }
+ bool hasLocation() const
+ {
+ return layoutLocation != layoutLocationEnd;
+ }
+ bool hasComponent() const
+ {
+ return layoutComponent != layoutComponentEnd;
+ }
+ bool hasIndex() const
+ {
+ return layoutIndex != layoutIndexEnd;
+ }
+ bool hasSet() const
+ {
+ return layoutSet != layoutSetEnd;
+ }
+ bool hasBinding() const
+ {
+ return layoutBinding != layoutBindingEnd;
+ }
+ bool hasStream() const
+ {
+ return layoutStream != layoutStreamEnd;
+ }
+ bool hasFormat() const
+ {
+ return layoutFormat != ElfNone;
+ }
+ bool hasXfb() const
+ {
+ return hasXfbBuffer() ||
+ hasXfbStride() ||
+ hasXfbOffset();
+ }
+ bool hasXfbBuffer() const
+ {
+ return layoutXfbBuffer != layoutXfbBufferEnd;
+ }
+ bool hasXfbStride() const
+ {
+ return layoutXfbStride != layoutXfbStrideEnd;
+ }
+ bool hasXfbOffset() const
+ {
+ return layoutXfbOffset != layoutXfbOffsetEnd;
+ }
+ bool hasAttachment() const
+ {
+ return layoutAttachment != layoutAttachmentEnd;
+ }
+ bool hasSpecConstantId() const
+ {
+ // Not the same thing as being a specialization constant, this
+ // is just whether or not it was declared with an ID.
+ return layoutSpecConstantId != layoutSpecConstantIdEnd;
+ }
+ bool isSpecConstant() const
+ {
+ // True if type is a specialization constant, whether or not it
+ // had a specialization-constant ID, and false if it is not a
+ // true front-end constant.
+ return specConstant;
+ }
+ bool isFrontEndConstant() const
+ {
+ // True if the front-end knows the final constant value.
+ // This allows front-end constant folding.
+ return storage == EvqConst && ! specConstant;
+ }
+ bool isConstant() const
+ {
+ // True if is either kind of constant; specialization or regular.
+ return isFrontEndConstant() || isSpecConstant();
+ }
+ void makeSpecConstant()
+ {
+ storage = EvqConst;
+ specConstant = true;
+ }
+ static const char* getLayoutPackingString(TLayoutPacking packing)
+ {
+ switch (packing) {
+ case ElpPacked: return "packed";
+ case ElpShared: return "shared";
+ case ElpStd140: return "std140";
+ case ElpStd430: return "std430";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutMatrixString(TLayoutMatrix m)
+ {
+ switch (m) {
+ case ElmColumnMajor: return "column_major";
+ case ElmRowMajor: return "row_major";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutFormatString(TLayoutFormat f)
+ {
+ switch (f) {
+ case ElfRgba32f: return "rgba32f";
+ case ElfRgba16f: return "rgba16f";
+ case ElfRg32f: return "rg32f";
+ case ElfRg16f: return "rg16f";
+ case ElfR11fG11fB10f: return "r11f_g11f_b10f";
+ case ElfR32f: return "r32f";
+ case ElfR16f: return "r16f";
+ case ElfRgba16: return "rgba16";
+ case ElfRgb10A2: return "rgb10_a2";
+ case ElfRgba8: return "rgba8";
+ case ElfRg16: return "rg16";
+ case ElfRg8: return "rg8";
+ case ElfR16: return "r16";
+ case ElfR8: return "r8";
+ case ElfRgba16Snorm: return "rgba16_snorm";
+ case ElfRgba8Snorm: return "rgba8_snorm";
+ case ElfRg16Snorm: return "rg16_snorm";
+ case ElfRg8Snorm: return "rg8_snorm";
+ case ElfR16Snorm: return "r16_snorm";
+ case ElfR8Snorm: return "r8_snorm";
+
+ case ElfRgba32i: return "rgba32i";
+ case ElfRgba16i: return "rgba16i";
+ case ElfRgba8i: return "rgba8i";
+ case ElfRg32i: return "rg32i";
+ case ElfRg16i: return "rg16i";
+ case ElfRg8i: return "rg8i";
+ case ElfR32i: return "r32i";
+ case ElfR16i: return "r16i";
+ case ElfR8i: return "r8i";
+
+ case ElfRgba32ui: return "rgba32ui";
+ case ElfRgba16ui: return "rgba16ui";
+ case ElfRgba8ui: return "rgba8ui";
+ case ElfRg32ui: return "rg32ui";
+ case ElfRg16ui: return "rg16ui";
+ case ElfRgb10a2ui: return "rgb10_a2ui";
+ case ElfRg8ui: return "rg8ui";
+ case ElfR32ui: return "r32ui";
+ case ElfR16ui: return "r16ui";
+ case ElfR8ui: return "r8ui";
+ default: return "none";
+ }
+ }
+ static const char* getLayoutDepthString(TLayoutDepth d)
+ {
+ switch (d) {
+ case EldAny: return "depth_any";
+ case EldGreater: return "depth_greater";
+ case EldLess: return "depth_less";
+ case EldUnchanged: return "depth_unchanged";
+ default: return "none";
+ }
+ }
+ static const char* getBlendEquationString(TBlendEquationShift e)
+ {
+ switch (e) {
+ case EBlendMultiply: return "blend_support_multiply";
+ case EBlendScreen: return "blend_support_screen";
+ case EBlendOverlay: return "blend_support_overlay";
+ case EBlendDarken: return "blend_support_darken";
+ case EBlendLighten: return "blend_support_lighten";
+ case EBlendColordodge: return "blend_support_colordodge";
+ case EBlendColorburn: return "blend_support_colorburn";
+ case EBlendHardlight: return "blend_support_hardlight";
+ case EBlendSoftlight: return "blend_support_softlight";
+ case EBlendDifference: return "blend_support_difference";
+ case EBlendExclusion: return "blend_support_exclusion";
+ case EBlendHslHue: return "blend_support_hsl_hue";
+ case EBlendHslSaturation: return "blend_support_hsl_saturation";
+ case EBlendHslColor: return "blend_support_hsl_color";
+ case EBlendHslLuminosity: return "blend_support_hsl_luminosity";
+ case EBlendAllEquations: return "blend_support_all_equations";
+ default: return "unknown";
+ }
+ }
+ static const char* getGeometryString(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return "points";
+ case ElgLines: return "lines";
+ case ElgLinesAdjacency: return "lines_adjacency";
+ case ElgLineStrip: return "line_strip";
+ case ElgTriangles: return "triangles";
+ case ElgTrianglesAdjacency: return "triangles_adjacency";
+ case ElgTriangleStrip: return "triangle_strip";
+ case ElgQuads: return "quads";
+ case ElgIsolines: return "isolines";
+ default: return "none";
+ }
+ }
+ static const char* getVertexSpacingString(TVertexSpacing spacing)
+ {
+ switch (spacing) {
+ case EvsEqual: return "equal_spacing";
+ case EvsFractionalEven: return "fractional_even_spacing";
+ case EvsFractionalOdd: return "fractional_odd_spacing";
+ default: return "none";
+ }
+ }
+ static const char* getVertexOrderString(TVertexOrder order)
+ {
+ switch (order) {
+ case EvoCw: return "cw";
+ case EvoCcw: return "ccw";
+ default: return "none";
+ }
+ }
+ static int mapGeometryToSize(TLayoutGeometry geometry)
+ {
+ switch (geometry) {
+ case ElgPoints: return 1;
+ case ElgLines: return 2;
+ case ElgLinesAdjacency: return 4;
+ case ElgTriangles: return 3;
+ case ElgTrianglesAdjacency: return 6;
+ default: return 0;
+ }
+ }
+};
+
+// Qualifiers that don't need to be keep per object. They have shader scope, not object scope.
+// So, they will not be part of TType, TQualifier, etc.
+struct TShaderQualifiers {
+ TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives
+ bool pixelCenterInteger; // fragment shader
+ bool originUpperLeft; // fragment shader
+ int invocations;
+ int vertices; // both for tessellation "vertices" and geometry "max_vertices"
+ TVertexSpacing spacing;
+ TVertexOrder order;
+ bool pointMode;
+ int localSize[3]; // compute shader
+ int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize
+ bool earlyFragmentTests; // fragment input
+ TLayoutDepth layoutDepth;
+ bool blendEquation; // true if any blend equation was specified
+
+ void init()
+ {
+ geometry = ElgNone;
+ originUpperLeft = false;
+ pixelCenterInteger = false;
+ invocations = TQualifier::layoutNotSet;
+ vertices = TQualifier::layoutNotSet;
+ spacing = EvsNone;
+ order = EvoNone;
+ pointMode = false;
+ localSize[0] = 1;
+ localSize[1] = 1;
+ localSize[2] = 1;
+ localSizeSpecId[0] = TQualifier::layoutNotSet;
+ localSizeSpecId[1] = TQualifier::layoutNotSet;
+ localSizeSpecId[2] = TQualifier::layoutNotSet;
+ earlyFragmentTests = false;
+ layoutDepth = EldNone;
+ blendEquation = false;
+ }
+
+ // Merge in characteristics from the 'src' qualifier. They can override when
+ // set, but never erase when not set.
+ void merge(const TShaderQualifiers& src)
+ {
+ if (src.geometry != ElgNone)
+ geometry = src.geometry;
+ if (src.pixelCenterInteger)
+ pixelCenterInteger = src.pixelCenterInteger;
+ if (src.originUpperLeft)
+ originUpperLeft = src.originUpperLeft;
+ if (src.invocations != TQualifier::layoutNotSet)
+ invocations = src.invocations;
+ if (src.vertices != TQualifier::layoutNotSet)
+ vertices = src.vertices;
+ if (src.spacing != EvsNone)
+ spacing = src.spacing;
+ if (src.order != EvoNone)
+ order = src.order;
+ if (src.pointMode)
+ pointMode = true;
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSize[i] > 1)
+ localSize[i] = src.localSize[i];
+ }
+ for (int i = 0; i < 3; ++i) {
+ if (src.localSizeSpecId[i] != TQualifier::layoutNotSet)
+ localSizeSpecId[i] = src.localSizeSpecId[i];
+ }
+ if (src.earlyFragmentTests)
+ earlyFragmentTests = true;
+ if (src.layoutDepth)
+ layoutDepth = src.layoutDepth;
+ if (src.blendEquation)
+ blendEquation = src.blendEquation;
+ }
+};
+
+//
+// TPublicType is just temporarily used while parsing and not quite the same
+// information kept per node in TType. Due to the bison stack, it can't have
+// types that it thinks have non-trivial constructors. It should
+// just be used while recognizing the grammar, not anything else.
+// Once enough is known about the situation, the proper information
+// moved into a TType, or the parse context, etc.
+//
+class TPublicType {
+public:
+ TBasicType basicType;
+ TSampler sampler;
+ TQualifier qualifier;
+ TShaderQualifiers shaderQualifiers;
+ int vectorSize : 4;
+ int matrixCols : 4;
+ int matrixRows : 4;
+ TArraySizes* arraySizes;
+ const TType* userDef;
+ TSourceLoc loc;
+
+ void initType(const TSourceLoc& l)
+ {
+ basicType = EbtVoid;
+ vectorSize = 1;
+ matrixRows = 0;
+ matrixCols = 0;
+ arraySizes = nullptr;
+ userDef = nullptr;
+ loc = l;
+ }
+
+ void initQualifiers(bool global = false)
+ {
+ qualifier.clear();
+ if (global)
+ qualifier.storage = EvqGlobal;
+ }
+
+ void init(const TSourceLoc& l, bool global = false)
+ {
+ initType(l);
+ sampler.clear();
+ initQualifiers(global);
+ shaderQualifiers.init();
+ }
+
+ void setVector(int s)
+ {
+ matrixRows = 0;
+ matrixCols = 0;
+ vectorSize = s;
+ }
+
+ void setMatrix(int c, int r)
+ {
+ matrixRows = r;
+ matrixCols = c;
+ vectorSize = 0;
+ }
+
+ bool isScalar() const
+ {
+ return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr;
+ }
+
+ // "Image" is a superset of "Subpass"
+ bool isImage() const { return basicType == EbtSampler && sampler.isImage(); }
+ bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); }
+};
+
+//
+// Base class for things that have a type.
+//
+class TType {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ // for "empty" type (no args) or simple scalar/vector/matrix
+ explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ }
+ // for explicit precision qualifier
+ TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0,
+ bool isVector = false) :
+ basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1),
+ arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ qualifier.storage = q;
+ qualifier.precision = p;
+ assert(p >= 0 && p <= EpqHigh);
+ }
+ // for turning a TPublicType into a TType, using a shallow copy
+ explicit TType(const TPublicType& p) :
+ basicType(p.basicType),
+ vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false),
+ arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr)
+ {
+ if (basicType == EbtSampler)
+ sampler = p.sampler;
+ else
+ sampler.clear();
+ qualifier = p.qualifier;
+ if (p.userDef) {
+ structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues
+ typeName = NewPoolTString(p.userDef->getTypeName().c_str());
+ }
+ }
+ // to efficiently make a dereferenced type
+ // without ever duplicating the outer structure that will be thrown away
+ // and using only shallow copy
+ TType(const TType& type, int derefIndex, bool rowMajor = false)
+ {
+ if (type.isArray()) {
+ shallowCopy(type);
+ if (type.getArraySizes()->getNumDims() == 1) {
+ arraySizes = nullptr;
+ } else {
+ // want our own copy of the array, so we can edit it
+ arraySizes = new TArraySizes;
+ arraySizes->copyDereferenced(*type.arraySizes);
+ }
+ } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) {
+ // do a structure dereference
+ const TTypeList& memberList = *type.getStruct();
+ shallowCopy(*memberList[derefIndex].type);
+ return;
+ } else {
+ // do a vector/matrix dereference
+ shallowCopy(type);
+ if (matrixCols > 0) {
+ // dereference from matrix to vector
+ if (rowMajor)
+ vectorSize = matrixCols;
+ else
+ vectorSize = matrixRows;
+ matrixCols = 0;
+ matrixRows = 0;
+ if (vectorSize == 1)
+ vector1 = true;
+ } else if (isVector()) {
+ // dereference from vector to scalar
+ vectorSize = 1;
+ vector1 = false;
+ }
+ }
+ }
+ // for making structures, ...
+ TType(TTypeList* userDef, const TString& n) :
+ basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
+ arraySizes(nullptr), structure(userDef), fieldName(nullptr)
+ {
+ sampler.clear();
+ qualifier.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ // For interface blocks
+ TType(TTypeList* userDef, const TString& n, const TQualifier& q) :
+ basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
+ qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr)
+ {
+ sampler.clear();
+ typeName = NewPoolTString(n.c_str());
+ }
+ virtual ~TType() {}
+
+ // Not for use across pool pops; it will cause multiple instances of TType to point to the same information.
+ // This only works if that information (like a structure's list of types) does not change and
+ // the instances are sharing the same pool.
+ void shallowCopy(const TType& copyOf)
+ {
+ basicType = copyOf.basicType;
+ sampler = copyOf.sampler;
+ qualifier = copyOf.qualifier;
+ vectorSize = copyOf.vectorSize;
+ matrixCols = copyOf.matrixCols;
+ matrixRows = copyOf.matrixRows;
+ vector1 = copyOf.vector1;
+ arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents
+ structure = copyOf.structure;
+ fieldName = copyOf.fieldName;
+ typeName = copyOf.typeName;
+ }
+
+ void deepCopy(const TType& copyOf)
+ {
+ shallowCopy(copyOf);
+
+ if (copyOf.arraySizes) {
+ arraySizes = new TArraySizes;
+ *arraySizes = *copyOf.arraySizes;
+ }
+
+ if (copyOf.structure) {
+ structure = new TTypeList;
+ for (unsigned int i = 0; i < copyOf.structure->size(); ++i) {
+ TTypeLoc typeLoc;
+ typeLoc.loc = (*copyOf.structure)[i].loc;
+ typeLoc.type = new TType();
+ typeLoc.type->deepCopy(*(*copyOf.structure)[i].type);
+ structure->push_back(typeLoc);
+ }
+ }
+
+ if (copyOf.fieldName)
+ fieldName = NewPoolTString(copyOf.fieldName->c_str());
+ if (copyOf.typeName)
+ typeName = NewPoolTString(copyOf.typeName->c_str());
+ }
+
+ TType* clone()
+ {
+ TType *newType = new TType();
+ newType->deepCopy(*this);
+
+ return newType;
+ }
+
+ void makeVector() { vector1 = true; }
+
+ // Merge type from parent, where a parentType is at the beginning of a declaration,
+ // establishing some characteristics for all subsequent names, while this type
+ // is on the individual names.
+ void mergeType(const TPublicType& parentType)
+ {
+ // arrayness is currently the only child aspect that has to be preserved
+ basicType = parentType.basicType;
+ vectorSize = parentType.vectorSize;
+ matrixCols = parentType.matrixCols;
+ matrixRows = parentType.matrixRows;
+ vector1 = false; // TPublicType is only GLSL which so far has no vec1
+ qualifier = parentType.qualifier;
+ sampler = parentType.sampler;
+ if (parentType.arraySizes)
+ newArraySizes(*parentType.arraySizes);
+ if (parentType.userDef) {
+ structure = parentType.userDef->getWritableStruct();
+ setTypeName(parentType.userDef->getTypeName());
+ }
+ }
+
+ virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; }
+ virtual bool hiddenMember() const { return basicType == EbtVoid; }
+
+ virtual void setTypeName(const TString& n) { typeName = NewPoolTString(n.c_str()); }
+ virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); }
+ virtual const TString& getTypeName() const
+ {
+ assert(typeName);
+ return *typeName;
+ }
+
+ virtual const TString& getFieldName() const
+ {
+ assert(fieldName);
+ return *fieldName;
+ }
+
+ virtual TBasicType getBasicType() const { return basicType; }
+ virtual const TSampler& getSampler() const { return sampler; }
+
+ virtual TQualifier& getQualifier() { return qualifier; }
+ virtual const TQualifier& getQualifier() const { return qualifier; }
+
+ virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both
+ virtual int getMatrixCols() const { return matrixCols; }
+ virtual int getMatrixRows() const { return matrixRows; }
+ virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); }
+ virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); }
+ virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); }
+ virtual bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; }
+ virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); }
+ virtual const TArraySizes* getArraySizes() const { return arraySizes; }
+ virtual TArraySizes& getArraySizes() { assert(arraySizes != nullptr); return *arraySizes; }
+
+ virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); }
+ virtual bool isVector() const { return vectorSize > 1 || vector1; }
+ virtual bool isMatrix() const { return matrixCols ? true : false; }
+ virtual bool isArray() const { return arraySizes != nullptr; }
+ virtual bool isExplicitlySizedArray() const { return isArray() && getOuterArraySize() != UnsizedArraySize; }
+ virtual bool isImplicitlySizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage != EvqBuffer; }
+ virtual bool isRuntimeSizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage == EvqBuffer; }
+ virtual bool isStruct() const { return structure != nullptr; }
+ virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble; }
+
+ virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint; }
+
+ // "Image" is a superset of "Subpass"
+ virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); }
+ virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); }
+
+ // Recursively checks if the type contains the given basic type
+ virtual bool containsBasicType(TBasicType checkType) const
+ {
+ if (basicType == checkType)
+ return true;
+ if (! structure)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsBasicType(checkType))
+ return true;
+ }
+ return false;
+ }
+
+ // Recursively check the structure for any arrays, needed for some error checks
+ virtual bool containsArray() const
+ {
+ if (isArray())
+ return true;
+ if (structure == nullptr)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsArray())
+ return true;
+ }
+ return false;
+ }
+
+ // Check the structure for any structures, needed for some error checks
+ virtual bool containsStructure() const
+ {
+ if (structure == nullptr)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->structure)
+ return true;
+ }
+ return false;
+ }
+
+ // Recursively check the structure for any implicitly-sized arrays, needed for triggering a copyUp().
+ virtual bool containsImplicitlySizedArray() const
+ {
+ if (isImplicitlySizedArray())
+ return true;
+ if (structure == nullptr)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsImplicitlySizedArray())
+ return true;
+ }
+ return false;
+ }
+
+ virtual bool containsOpaque() const
+ {
+ if (isOpaque())
+ return true;
+ if (! structure)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsOpaque())
+ return true;
+ }
+ return false;
+ }
+
+ virtual bool containsNonOpaque() const
+ {
+ // list all non-opaque types
+ switch (basicType) {
+ case EbtVoid:
+ case EbtFloat:
+ case EbtDouble:
+ case EbtInt:
+ case EbtUint:
+ case EbtInt64:
+ case EbtUint64:
+ case EbtBool:
+ return true;
+ default:
+ break;
+ }
+ if (! structure)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsNonOpaque())
+ return true;
+ }
+ return false;
+ }
+
+ virtual bool containsSpecializationSize() const
+ {
+ if (isArray() && arraySizes->containsNode())
+ return true;
+ if (! structure)
+ return false;
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->containsSpecializationSize())
+ return true;
+ }
+ return false;
+ }
+
+ // Array editing methods. Array descriptors can be shared across
+ // type instances. This allows all uses of the same array
+ // to be updated at once. E.g., all nodes can be explicitly sized
+ // by tracking and correcting one implicit size. Or, all nodes
+ // can get the explicit size on a redeclaration that gives size.
+ //
+ // N.B.: Don't share with the shared symbol tables (symbols are
+ // marked as isReadOnly(). Such symbols with arrays that will be
+ // edited need to copyUp() on first use, so that
+ // A) the edits don't effect the shared symbol table, and
+ // B) the edits are shared across all users.
+ void updateArraySizes(const TType& type)
+ {
+ // For when we may already be sharing existing array descriptors,
+ // keeping the pointers the same, just updating the contents.
+ assert(arraySizes != nullptr);
+ assert(type.arraySizes != nullptr);
+ *arraySizes = *type.arraySizes;
+ }
+ void newArraySizes(const TArraySizes& s)
+ {
+ // For setting a fresh new set of array sizes, not yet worrying about sharing.
+ arraySizes = new TArraySizes;
+ *arraySizes = s;
+ }
+ void clearArraySizes()
+ {
+ arraySizes = 0;
+ }
+ void addArrayOuterSizes(const TArraySizes& s)
+ {
+ if (arraySizes == nullptr)
+ newArraySizes(s);
+ else
+ arraySizes->addOuterSizes(s);
+ }
+ void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); }
+ void setImplicitArraySize(int s) { arraySizes->setImplicitSize(s); }
+
+ // Recursively make the implicit array size the explicit array size, through the type tree.
+ void adoptImplicitArraySizes()
+ {
+ if (isImplicitlySizedArray())
+ changeOuterArraySize(getImplicitArraySize());
+ if (isStruct()) {
+ for (int i = 0; i < (int)structure->size(); ++i)
+ (*structure)[i].type->adoptImplicitArraySizes();
+ }
+ }
+
+ const char* getBasicString() const
+ {
+ return TType::getBasicString(basicType);
+ }
+
+ static const char* getBasicString(TBasicType t)
+ {
+ switch (t) {
+ case EbtVoid: return "void";
+ case EbtFloat: return "float";
+ case EbtDouble: return "double";
+ case EbtInt: return "int";
+ case EbtUint: return "uint";
+ case EbtInt64: return "int64_t";
+ case EbtUint64: return "uint64_t";
+ case EbtBool: return "bool";
+ case EbtAtomicUint: return "atomic_uint";
+ case EbtSampler: return "sampler/image";
+ case EbtStruct: return "structure";
+ case EbtBlock: return "block";
+ default: return "unknown type";
+ }
+ }
+
+ TString getCompleteString() const
+ {
+ const int maxSize = GlslangMaxTypeLength;
+ char buf[maxSize];
+ char* p = &buf[0];
+ char* end = &buf[maxSize];
+
+ if (qualifier.hasLayout()) {
+ // To reduce noise, skip this if the only layout is an xfb_buffer
+ // with no triggering xfb_offset.
+ TQualifier noXfbBuffer = qualifier;
+ noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd;
+ if (noXfbBuffer.hasLayout()) {
+ p += snprintf(p, end - p, "layout(");
+ if (qualifier.hasAnyLocation()) {
+ p += snprintf(p, end - p, "location=%d ", qualifier.layoutLocation);
+ if (qualifier.hasComponent())
+ p += snprintf(p, end - p, "component=%d ", qualifier.layoutComponent);
+ if (qualifier.hasIndex())
+ p += snprintf(p, end - p, "index=%d ", qualifier.layoutIndex);
+ }
+ if (qualifier.hasSet())
+ p += snprintf(p, end - p, "set=%d ", qualifier.layoutSet);
+ if (qualifier.hasBinding())
+ p += snprintf(p, end - p, "binding=%d ", qualifier.layoutBinding);
+ if (qualifier.hasStream())
+ p += snprintf(p, end - p, "stream=%d ", qualifier.layoutStream);
+ if (qualifier.hasMatrix())
+ p += snprintf(p, end - p, "%s ", TQualifier::getLayoutMatrixString(qualifier.layoutMatrix));
+ if (qualifier.hasPacking())
+ p += snprintf(p, end - p, "%s ", TQualifier::getLayoutPackingString(qualifier.layoutPacking));
+ if (qualifier.hasOffset())
+ p += snprintf(p, end - p, "offset=%d ", qualifier.layoutOffset);
+ if (qualifier.hasAlign())
+ p += snprintf(p, end - p, "align=%d ", qualifier.layoutAlign);
+ if (qualifier.hasFormat())
+ p += snprintf(p, end - p, "%s ", TQualifier::getLayoutFormatString(qualifier.layoutFormat));
+ if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset())
+ p += snprintf(p, end - p, "xfb_buffer=%d ", qualifier.layoutXfbBuffer);
+ if (qualifier.hasXfbOffset())
+ p += snprintf(p, end - p, "xfb_offset=%d ", qualifier.layoutXfbOffset);
+ if (qualifier.hasXfbStride())
+ p += snprintf(p, end - p, "xfb_stride=%d ", qualifier.layoutXfbStride);
+ if (qualifier.hasAttachment())
+ p += snprintf(p, end - p, "input_attachment_index=%d ", qualifier.layoutAttachment);
+ if (qualifier.hasSpecConstantId())
+ p += snprintf(p, end - p, "constant_id=%d ", qualifier.layoutSpecConstantId);
+ if (qualifier.layoutPushConstant)
+ p += snprintf(p, end - p, "push_constant ");
+ p += snprintf(p, end - p, ") ");
+ }
+ }
+
+ if (qualifier.invariant)
+ p += snprintf(p, end - p, "invariant ");
+ if (qualifier.noContraction)
+ p += snprintf(p, end - p, "noContraction ");
+ if (qualifier.centroid)
+ p += snprintf(p, end - p, "centroid ");
+ if (qualifier.smooth)
+ p += snprintf(p, end - p, "smooth ");
+ if (qualifier.flat)
+ p += snprintf(p, end - p, "flat ");
+ if (qualifier.nopersp)
+ p += snprintf(p, end - p, "noperspective ");
+ if (qualifier.patch)
+ p += snprintf(p, end - p, "patch ");
+ if (qualifier.sample)
+ p += snprintf(p, end - p, "sample ");
+ if (qualifier.coherent)
+ p += snprintf(p, end - p, "coherent ");
+ if (qualifier.volatil)
+ p += snprintf(p, end - p, "volatile ");
+ if (qualifier.restrict)
+ p += snprintf(p, end - p, "restrict ");
+ if (qualifier.readonly)
+ p += snprintf(p, end - p, "readonly ");
+ if (qualifier.writeonly)
+ p += snprintf(p, end - p, "writeonly ");
+ if (qualifier.specConstant)
+ p += snprintf(p, end - p, "specialization-constant ");
+ p += snprintf(p, end - p, "%s ", getStorageQualifierString());
+ if (isArray()) {
+ for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) {
+ int size = arraySizes->getDimSize(i);
+ if (size == 0)
+ p += snprintf(p, end - p, "implicitly-sized array of ");
+ else
+ p += snprintf(p, end - p, "%d-element array of ", arraySizes->getDimSize(i));
+ }
+ }
+ if (qualifier.precision != EpqNone)
+ p += snprintf(p, end - p, "%s ", getPrecisionQualifierString());
+ if (isMatrix())
+ p += snprintf(p, end - p, "%dX%d matrix of ", matrixCols, matrixRows);
+ else if (isVector())
+ p += snprintf(p, end - p, "%d-component vector of ", vectorSize);
+
+ *p = 0;
+ TString s(buf);
+ s.append(getBasicTypeString());
+
+ if (qualifier.builtIn != EbvNone) {
+ s.append(" ");
+ s.append(getBuiltInVariableString());
+ }
+
+ // Add struct/block members
+ if (structure) {
+ s.append("{");
+ for (size_t i = 0; i < structure->size(); ++i) {
+ if (! (*structure)[i].type->hiddenMember()) {
+ s.append((*structure)[i].type->getCompleteString());
+ s.append(" ");
+ s.append((*structure)[i].type->getFieldName());
+ if (i < structure->size() - 1)
+ s.append(", ");
+ }
+ }
+ s.append("}");
+ }
+
+ return s;
+ }
+
+ TString getBasicTypeString() const
+ {
+ if (basicType == EbtSampler)
+ return sampler.getString();
+ else
+ return getBasicString();
+ }
+
+ const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); }
+ const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); }
+ const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); }
+ const TTypeList* getStruct() const { return structure; }
+ TTypeList* getWritableStruct() const { return structure; } // This should only be used when known to not be sharing with other threads
+
+ int computeNumComponents() const
+ {
+ int components = 0;
+
+ if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) {
+ for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++)
+ components += ((*tl).type)->computeNumComponents();
+ } else if (matrixCols)
+ components = matrixCols * matrixRows;
+ else
+ components = vectorSize;
+
+ if (arraySizes != nullptr) {
+ components *= arraySizes->getCumulativeSize();
+ }
+
+ return components;
+ }
+
+ // append this type's mangled name to the passed in 'name'
+ void appendMangledName(TString& name)
+ {
+ buildMangledName(name);
+ name += ';' ;
+ }
+
+ // Do two structure types match? They could be declared independently,
+ // in different places, but still might satisfy the definition of matching.
+ // From the spec:
+ //
+ // "Structures must have the same name, sequence of type names, and
+ // type definitions, and member names to be considered the same type.
+ // This rule applies recursively for nested or embedded types."
+ //
+ bool sameStructType(const TType& right) const
+ {
+ // Most commonly, they are both nullptr, or the same pointer to the same actual structure
+ if (structure == right.structure)
+ return true;
+
+ // Both being nullptr was caught above, now they both have to be structures of the same number of elements
+ if (structure == nullptr || right.structure == nullptr ||
+ structure->size() != right.structure->size())
+ return false;
+
+ // Structure names have to match
+ if (*typeName != *right.typeName)
+ return false;
+
+ // Compare the names and types of all the members, which have to match
+ for (unsigned int i = 0; i < structure->size(); ++i) {
+ if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName())
+ return false;
+
+ if (*(*structure)[i].type != *(*right.structure)[i].type)
+ return false;
+ }
+
+ return true;
+ }
+
+ // See if two types match, in all aspects except arrayness
+ bool sameElementType(const TType& right) const
+ {
+ return basicType == right.basicType && sameElementShape(right);
+ }
+
+ // See if two type's arrayness match
+ bool sameArrayness(const TType& right) const
+ {
+ return ((arraySizes == nullptr && right.arraySizes == nullptr) ||
+ (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes));
+ }
+
+ // See if two type's arrayness match in everything except their outer dimension
+ bool sameInnerArrayness(const TType& right) const
+ {
+ assert(arraySizes != nullptr && right.arraySizes != nullptr);
+ return arraySizes->sameInnerArrayness(*right.arraySizes);
+ }
+
+ // See if two type's elements match in all ways except basic type
+ bool sameElementShape(const TType& right) const
+ {
+ return sampler == right.sampler &&
+ vectorSize == right.vectorSize &&
+ matrixCols == right.matrixCols &&
+ matrixRows == right.matrixRows &&
+ vector1 == right.vector1 &&
+ sameStructType(right);
+ }
+
+ // See if two types match in all ways (just the actual type, not qualification)
+ bool operator==(const TType& right) const
+ {
+ return sameElementType(right) && sameArrayness(right);
+ }
+
+ bool operator!=(const TType& right) const
+ {
+ return ! operator==(right);
+ }
+
+protected:
+ // Require consumer to pick between deep copy and shallow copy.
+ TType(const TType& type);
+ TType& operator=(const TType& type);
+
+ void buildMangledName(TString&);
+
+ TBasicType basicType : 8;
+ int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate.
+ int matrixCols : 4;
+ int matrixRows : 4;
+ bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar.
+ // GLSL 4.5 never has a 1-component vector; so this will always be false until such
+ // functionality is added.
+ // HLSL does have a 1-component vectors, so this will be true to disambiguate
+ // from a scalar.
+ TSampler sampler;
+ TQualifier qualifier;
+
+ TArraySizes* arraySizes; // nullptr unless an array; can be shared across types
+ TTypeList* structure; // nullptr unless this is a struct; can be shared across types
+ TString *fieldName; // for structure field names
+ TString *typeName; // for structure type name
+};
+
+} // end namespace glslang
+
+#endif // _TYPES_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/arrays.h b/chromium/third_party/glslang/src/glslang/Include/arrays.h
new file mode 100644
index 00000000000..a50088d2dbe
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/arrays.h
@@ -0,0 +1,318 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2013 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Implement types for tracking GLSL arrays, arrays of arrays, etc.
+//
+
+#ifndef _ARRAYS_INCLUDED
+#define _ARRAYS_INCLUDED
+
+namespace glslang {
+
+// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
+const int UnsizedArraySize = 0;
+
+class TIntermTyped;
+extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
+
+// Specialization constants need both a nominal size and a node that defines
+// the specialization constant being used. Array types are the same when their
+// size and specialization constant nodes are the same.
+struct TArraySize {
+ unsigned int size;
+ TIntermTyped* node; // nullptr means no specialization constant node
+ bool operator==(const TArraySize& rhs) const
+ {
+ if (size != rhs.size)
+ return false;
+ if (node == nullptr || rhs.node == nullptr)
+ return node == rhs.node;
+
+ return SameSpecializationConstants(node, rhs.node);
+ }
+};
+
+//
+// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
+// It has generic-container semantics, while TArraySizes has array-of-array semantics.
+// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
+//
+struct TSmallArrayVector {
+ //
+ // TODO: memory: TSmallArrayVector is intended to be smaller.
+ // Almost all arrays could be handled by two sizes each fitting
+ // in 16 bits, needing a real vector only in the cases where there
+ // are more than 3 sizes or a size needing more than 16 bits.
+ //
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TSmallArrayVector() : sizes(nullptr) { }
+ virtual ~TSmallArrayVector() { dealloc(); }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TSmallArrayVector& operator=(const TSmallArrayVector& from)
+ {
+ if (from.sizes == nullptr)
+ sizes = nullptr;
+ else {
+ alloc();
+ *sizes = *from.sizes;
+ }
+
+ return *this;
+ }
+
+ int size() const
+ {
+ if (sizes == nullptr)
+ return 0;
+ return (int)sizes->size();
+ }
+
+ unsigned int frontSize() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().size;
+ }
+
+ TIntermTyped* frontNode() const
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ return sizes->front().node;
+ }
+
+ void changeFront(unsigned int s)
+ {
+ assert(sizes != nullptr);
+ // this should only happen for implicitly sized arrays, not specialization constants
+ assert(sizes->front().node == nullptr);
+ sizes->front().size = s;
+ }
+
+ void push_back(unsigned int e, TIntermTyped* n)
+ {
+ alloc();
+ TArraySize pair = { e, n };
+ sizes->push_back(pair);
+ }
+
+ void push_front(const TSmallArrayVector& newDims)
+ {
+ alloc();
+ sizes->insert(sizes->begin(), newDims.sizes->begin(), newDims.sizes->end());
+ }
+
+ void pop_front()
+ {
+ assert(sizes != nullptr && sizes->size() > 0);
+ if (sizes->size() == 1)
+ dealloc();
+ else
+ sizes->erase(sizes->begin());
+ }
+
+ // 'this' should currently not be holding anything, and copyNonFront
+ // will make it hold a copy of all but the first element of rhs.
+ // (This would be useful for making a type that is dereferenced by
+ // one dimension.)
+ void copyNonFront(const TSmallArrayVector& rhs)
+ {
+ assert(sizes == nullptr);
+ if (rhs.size() > 1) {
+ alloc();
+ sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
+ }
+ }
+
+ unsigned int getDimSize(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].size;
+ }
+
+ void setDimSize(int i, unsigned int size) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ assert((*sizes)[i].node == nullptr);
+ (*sizes)[i].size = size;
+ }
+
+ TIntermTyped* getDimNode(int i) const
+ {
+ assert(sizes != nullptr && (int)sizes->size() > i);
+ return (*sizes)[i].node;
+ }
+
+ bool operator==(const TSmallArrayVector& rhs) const
+ {
+ if (sizes == nullptr && rhs.sizes == nullptr)
+ return true;
+ if (sizes == nullptr || rhs.sizes == nullptr)
+ return false;
+ return *sizes == *rhs.sizes;
+ }
+ bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
+
+protected:
+ TSmallArrayVector(const TSmallArrayVector&);
+
+ void alloc()
+ {
+ if (sizes == nullptr)
+ sizes = new TVector<TArraySize>;
+ }
+ void dealloc()
+ {
+ delete sizes;
+ sizes = nullptr;
+ }
+
+ TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
+};
+
+//
+// Represent an array, or array of arrays, to arbitrary depth. This is not
+// done through a hierarchy of types in a type tree, rather all contiguous arrayness
+// in the type hierarchy is localized into this single cumulative object.
+//
+// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
+// for the vast majority of types that are non-array types.
+//
+// Order Policy: these are all identical:
+// - left to right order within a contiguous set of ...[..][..][..]... in the source language
+// - index order 0, 1, 2, ... within the 'sizes' member below
+// - outer-most to inner-most
+//
+struct TArraySizes {
+ POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
+
+ TArraySizes() : implicitArraySize(1) { }
+
+ // For breaking into two non-shared copies, independently modifiable.
+ TArraySizes& operator=(const TArraySizes& from)
+ {
+ implicitArraySize = from.implicitArraySize;
+ sizes = from.sizes;
+
+ return *this;
+ }
+
+ // translate from array-of-array semantics to container semantics
+ int getNumDims() const { return sizes.size(); }
+ int getDimSize(int dim) const { return sizes.getDimSize(dim); }
+ TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
+ void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
+ int getOuterSize() const { return sizes.frontSize(); }
+ TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
+ int getCumulativeSize() const
+ {
+ int size = 1;
+ for (int d = 0; d < sizes.size(); ++d) {
+ // this only makes sense in paths that have a known array size
+ assert(sizes.getDimSize(d) != UnsizedArraySize);
+ size *= sizes.getDimSize(d);
+ }
+ return size;
+ }
+ void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
+ void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
+ void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
+ void addInnerSize(TArraySize pair) { sizes.push_back(pair.size, pair.node); }
+ void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
+ int getImplicitSize() const { return (int)implicitArraySize; }
+ void setImplicitSize(int s) { implicitArraySize = s; }
+ bool isInnerImplicit() const
+ {
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
+ return true;
+ }
+
+ return false;
+ }
+ bool isImplicit() const { return getOuterSize() == UnsizedArraySize || isInnerImplicit(); }
+ void addOuterSizes(const TArraySizes& s) { sizes.push_front(s.sizes); }
+ void dereference() { sizes.pop_front(); }
+ void copyDereferenced(const TArraySizes& rhs)
+ {
+ assert(sizes.size() == 0);
+ if (rhs.sizes.size() > 1)
+ sizes.copyNonFront(rhs.sizes);
+ }
+
+ bool sameInnerArrayness(const TArraySizes& rhs) const
+ {
+ if (sizes.size() != rhs.sizes.size())
+ return false;
+
+ for (int d = 1; d < sizes.size(); ++d) {
+ if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
+ sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns true if any of the dimensions of the array is sized with a node
+ // instead of a front-end compile-time constant.
+ bool containsNode()
+ {
+ for (int d = 0; d < sizes.size(); ++d) {
+ if (sizes.getDimNode(d) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+
+ bool operator==(const TArraySizes& rhs) { return sizes == rhs.sizes; }
+ bool operator!=(const TArraySizes& rhs) { return sizes != rhs.sizes; }
+
+protected:
+ TSmallArrayVector sizes;
+
+ TArraySizes(const TArraySizes&);
+
+ // for tracking maximum referenced index, before an explicit size is given
+ // applies only to the outer-most dimension
+ int implicitArraySize;
+};
+
+} // end namespace glslang
+
+#endif // _ARRAYS_INCLUDED_
diff --git a/chromium/third_party/glslang/src/glslang/Include/intermediate.h b/chromium/third_party/glslang/src/glslang/Include/intermediate.h
new file mode 100644
index 00000000000..1759c867301
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/intermediate.h
@@ -0,0 +1,1090 @@
+//
+//Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
+//Copyright (C) 2012-2016 LunarG, Inc.
+//
+//All rights reserved.
+//
+//Redistribution and use in source and binary forms, with or without
+//modification, are permitted provided that the following conditions
+//are met:
+//
+// Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+//POSSIBILITY OF SUCH DAMAGE.
+//
+
+//
+// Definition of the in-memory high-level intermediate representation
+// of shaders. This is a tree that parser creates.
+//
+// Nodes in the tree are defined as a hierarchy of classes derived from
+// TIntermNode. Each is a node in a tree. There is no preset branching factor;
+// each node can have it's own type of list of children.
+//
+
+#ifndef __INTERMEDIATE_H
+#define __INTERMEDIATE_H
+
+#include "../Include/Common.h"
+#include "../Include/Types.h"
+#include "../Include/ConstantUnion.h"
+
+namespace glslang {
+
+//
+// Operators used by the high-level (parse tree) representation.
+//
+enum TOperator {
+ EOpNull, // if in a node, should only mean a node is still being built
+ EOpSequence, // denotes a list of statements, or parameters, etc.
+ EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST
+ EOpFunctionCall,
+ EOpFunction, // For function definition
+ EOpParameters, // an aggregate listing the parameters to a function
+
+ //
+ // Unary operators
+ //
+
+ EOpNegative,
+ EOpLogicalNot,
+ EOpVectorLogicalNot,
+ EOpBitwiseNot,
+
+ EOpPostIncrement,
+ EOpPostDecrement,
+ EOpPreIncrement,
+ EOpPreDecrement,
+
+ EOpConvIntToBool,
+ EOpConvUintToBool,
+ EOpConvFloatToBool,
+ EOpConvDoubleToBool,
+ EOpConvInt64ToBool,
+ EOpConvUint64ToBool,
+ EOpConvBoolToFloat,
+ EOpConvIntToFloat,
+ EOpConvUintToFloat,
+ EOpConvDoubleToFloat,
+ EOpConvInt64ToFloat,
+ EOpConvUint64ToFloat,
+ EOpConvUintToInt,
+ EOpConvFloatToInt,
+ EOpConvBoolToInt,
+ EOpConvDoubleToInt,
+ EOpConvInt64ToInt,
+ EOpConvUint64ToInt,
+ EOpConvIntToUint,
+ EOpConvFloatToUint,
+ EOpConvBoolToUint,
+ EOpConvDoubleToUint,
+ EOpConvInt64ToUint,
+ EOpConvUint64ToUint,
+ EOpConvIntToDouble,
+ EOpConvUintToDouble,
+ EOpConvFloatToDouble,
+ EOpConvBoolToDouble,
+ EOpConvInt64ToDouble,
+ EOpConvUint64ToDouble,
+ EOpConvBoolToInt64,
+ EOpConvIntToInt64,
+ EOpConvUintToInt64,
+ EOpConvFloatToInt64,
+ EOpConvDoubleToInt64,
+ EOpConvUint64ToInt64,
+ EOpConvBoolToUint64,
+ EOpConvIntToUint64,
+ EOpConvUintToUint64,
+ EOpConvFloatToUint64,
+ EOpConvDoubleToUint64,
+ EOpConvInt64ToUint64,
+
+ //
+ // binary operations
+ //
+
+ EOpAdd,
+ EOpSub,
+ EOpMul,
+ EOpDiv,
+ EOpMod,
+ EOpRightShift,
+ EOpLeftShift,
+ EOpAnd,
+ EOpInclusiveOr,
+ EOpExclusiveOr,
+ EOpEqual,
+ EOpNotEqual,
+ EOpVectorEqual,
+ EOpVectorNotEqual,
+ EOpLessThan,
+ EOpGreaterThan,
+ EOpLessThanEqual,
+ EOpGreaterThanEqual,
+ EOpComma,
+
+ EOpVectorTimesScalar,
+ EOpVectorTimesMatrix,
+ EOpMatrixTimesVector,
+ EOpMatrixTimesScalar,
+
+ EOpLogicalOr,
+ EOpLogicalXor,
+ EOpLogicalAnd,
+
+ EOpIndexDirect,
+ EOpIndexIndirect,
+ EOpIndexDirectStruct,
+
+ EOpVectorSwizzle,
+
+ EOpMethod,
+
+ //
+ // Built-in functions mapped to operators
+ //
+
+ EOpRadians,
+ EOpDegrees,
+ EOpSin,
+ EOpCos,
+ EOpTan,
+ EOpAsin,
+ EOpAcos,
+ EOpAtan,
+ EOpSinh,
+ EOpCosh,
+ EOpTanh,
+ EOpAsinh,
+ EOpAcosh,
+ EOpAtanh,
+
+ EOpPow,
+ EOpExp,
+ EOpLog,
+ EOpExp2,
+ EOpLog2,
+ EOpSqrt,
+ EOpInverseSqrt,
+
+ EOpAbs,
+ EOpSign,
+ EOpFloor,
+ EOpTrunc,
+ EOpRound,
+ EOpRoundEven,
+ EOpCeil,
+ EOpFract,
+ EOpModf,
+ EOpMin,
+ EOpMax,
+ EOpClamp,
+ EOpMix,
+ EOpStep,
+ EOpSmoothStep,
+
+ EOpIsNan,
+ EOpIsInf,
+
+ EOpFma,
+
+ EOpFrexp,
+ EOpLdexp,
+
+ EOpFloatBitsToInt,
+ EOpFloatBitsToUint,
+ EOpIntBitsToFloat,
+ EOpUintBitsToFloat,
+ EOpDoubleBitsToInt64,
+ EOpDoubleBitsToUint64,
+ EOpInt64BitsToDouble,
+ EOpUint64BitsToDouble,
+ EOpPackSnorm2x16,
+ EOpUnpackSnorm2x16,
+ EOpPackUnorm2x16,
+ EOpUnpackUnorm2x16,
+ EOpPackSnorm4x8,
+ EOpUnpackSnorm4x8,
+ EOpPackUnorm4x8,
+ EOpUnpackUnorm4x8,
+ EOpPackHalf2x16,
+ EOpUnpackHalf2x16,
+ EOpPackDouble2x32,
+ EOpUnpackDouble2x32,
+ EOpPackInt2x32,
+ EOpUnpackInt2x32,
+ EOpPackUint2x32,
+ EOpUnpackUint2x32,
+
+ EOpLength,
+ EOpDistance,
+ EOpDot,
+ EOpCross,
+ EOpNormalize,
+ EOpFaceForward,
+ EOpReflect,
+ EOpRefract,
+
+ EOpDPdx, // Fragment only
+ EOpDPdy, // Fragment only
+ EOpFwidth, // Fragment only
+ EOpDPdxFine, // Fragment only
+ EOpDPdyFine, // Fragment only
+ EOpFwidthFine, // Fragment only
+ EOpDPdxCoarse, // Fragment only
+ EOpDPdyCoarse, // Fragment only
+ EOpFwidthCoarse, // Fragment only
+
+ EOpInterpolateAtCentroid, // Fragment only
+ EOpInterpolateAtSample, // Fragment only
+ EOpInterpolateAtOffset, // Fragment only
+
+ EOpMatrixTimesMatrix,
+ EOpOuterProduct,
+ EOpDeterminant,
+ EOpMatrixInverse,
+ EOpTranspose,
+
+ EOpFtransform,
+
+ EOpNoise,
+
+ EOpEmitVertex, // geometry only
+ EOpEndPrimitive, // geometry only
+ EOpEmitStreamVertex, // geometry only
+ EOpEndStreamPrimitive, // geometry only
+
+ EOpBarrier,
+ EOpMemoryBarrier,
+ EOpMemoryBarrierAtomicCounter,
+ EOpMemoryBarrierBuffer,
+ EOpMemoryBarrierImage,
+ EOpMemoryBarrierShared, // compute only
+ EOpGroupMemoryBarrier, // compute only
+
+ EOpBallot,
+ EOpReadInvocation,
+ EOpReadFirstInvocation,
+
+ EOpAnyInvocation,
+ EOpAllInvocations,
+ EOpAllInvocationsEqual,
+
+ EOpAtomicAdd,
+ EOpAtomicMin,
+ EOpAtomicMax,
+ EOpAtomicAnd,
+ EOpAtomicOr,
+ EOpAtomicXor,
+ EOpAtomicExchange,
+ EOpAtomicCompSwap,
+
+ EOpAtomicCounterIncrement,
+ EOpAtomicCounterDecrement,
+ EOpAtomicCounter,
+
+ EOpAny,
+ EOpAll,
+
+ //
+ // Branch
+ //
+
+ EOpKill, // Fragment only
+ EOpReturn,
+ EOpBreak,
+ EOpContinue,
+ EOpCase,
+ EOpDefault,
+
+ //
+ // Constructors
+ //
+
+ EOpConstructGuardStart,
+ EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed
+ EOpConstructUint,
+ EOpConstructInt64,
+ EOpConstructUint64,
+ EOpConstructBool,
+ EOpConstructFloat,
+ EOpConstructDouble,
+ EOpConstructVec2,
+ EOpConstructVec3,
+ EOpConstructVec4,
+ EOpConstructDVec2,
+ EOpConstructDVec3,
+ EOpConstructDVec4,
+ EOpConstructBVec2,
+ EOpConstructBVec3,
+ EOpConstructBVec4,
+ EOpConstructIVec2,
+ EOpConstructIVec3,
+ EOpConstructIVec4,
+ EOpConstructUVec2,
+ EOpConstructUVec3,
+ EOpConstructUVec4,
+ EOpConstructI64Vec2,
+ EOpConstructI64Vec3,
+ EOpConstructI64Vec4,
+ EOpConstructU64Vec2,
+ EOpConstructU64Vec3,
+ EOpConstructU64Vec4,
+ EOpConstructMat2x2,
+ EOpConstructMat2x3,
+ EOpConstructMat2x4,
+ EOpConstructMat3x2,
+ EOpConstructMat3x3,
+ EOpConstructMat3x4,
+ EOpConstructMat4x2,
+ EOpConstructMat4x3,
+ EOpConstructMat4x4,
+ EOpConstructDMat2x2,
+ EOpConstructDMat2x3,
+ EOpConstructDMat2x4,
+ EOpConstructDMat3x2,
+ EOpConstructDMat3x3,
+ EOpConstructDMat3x4,
+ EOpConstructDMat4x2,
+ EOpConstructDMat4x3,
+ EOpConstructDMat4x4,
+ EOpConstructStruct,
+ EOpConstructTextureSampler,
+ EOpConstructGuardEnd,
+
+ //
+ // moves
+ //
+
+ EOpAssign,
+ EOpAddAssign,
+ EOpSubAssign,
+ EOpMulAssign,
+ EOpVectorTimesMatrixAssign,
+ EOpVectorTimesScalarAssign,
+ EOpMatrixTimesScalarAssign,
+ EOpMatrixTimesMatrixAssign,
+ EOpDivAssign,
+ EOpModAssign,
+ EOpAndAssign,
+ EOpInclusiveOrAssign,
+ EOpExclusiveOrAssign,
+ EOpLeftShiftAssign,
+ EOpRightShiftAssign,
+
+ //
+ // Array operators
+ //
+
+ EOpArrayLength, // "Array" distinguishes from length(v) built-in function, but it applies to vectors and matrices as well.
+
+ //
+ // Image operations
+ //
+
+ EOpImageGuardBegin,
+
+ EOpImageQuerySize,
+ EOpImageQuerySamples,
+ EOpImageLoad,
+ EOpImageStore,
+ EOpImageAtomicAdd,
+ EOpImageAtomicMin,
+ EOpImageAtomicMax,
+ EOpImageAtomicAnd,
+ EOpImageAtomicOr,
+ EOpImageAtomicXor,
+ EOpImageAtomicExchange,
+ EOpImageAtomicCompSwap,
+
+ EOpSubpassLoad,
+ EOpSubpassLoadMS,
+ EOpSparseImageLoad,
+
+ EOpImageGuardEnd,
+
+ //
+ // Texture operations
+ //
+
+ EOpTextureGuardBegin,
+
+ EOpTextureQuerySize,
+ EOpTextureQueryLod,
+ EOpTextureQueryLevels,
+ EOpTextureQuerySamples,
+ EOpTexture,
+ EOpTextureProj,
+ EOpTextureLod,
+ EOpTextureOffset,
+ EOpTextureFetch,
+ EOpTextureFetchOffset,
+ EOpTextureProjOffset,
+ EOpTextureLodOffset,
+ EOpTextureProjLod,
+ EOpTextureProjLodOffset,
+ EOpTextureGrad,
+ EOpTextureGradOffset,
+ EOpTextureProjGrad,
+ EOpTextureProjGradOffset,
+ EOpTextureGather,
+ EOpTextureGatherOffset,
+ EOpTextureGatherOffsets,
+ EOpTextureClamp,
+ EOpTextureOffsetClamp,
+ EOpTextureGradClamp,
+ EOpTextureGradOffsetClamp,
+
+ EOpSparseTextureGuardBegin,
+
+ EOpSparseTexture,
+ EOpSparseTextureLod,
+ EOpSparseTextureOffset,
+ EOpSparseTextureFetch,
+ EOpSparseTextureFetchOffset,
+ EOpSparseTextureLodOffset,
+ EOpSparseTextureGrad,
+ EOpSparseTextureGradOffset,
+ EOpSparseTextureGather,
+ EOpSparseTextureGatherOffset,
+ EOpSparseTextureGatherOffsets,
+ EOpSparseTexelsResident,
+ EOpSparseTextureClamp,
+ EOpSparseTextureOffsetClamp,
+ EOpSparseTextureGradClamp,
+ EOpSparseTextureGradOffsetClamp,
+
+ EOpSparseTextureGuardEnd,
+
+ EOpTextureGuardEnd,
+
+ //
+ // Integer operations
+ //
+
+ EOpAddCarry,
+ EOpSubBorrow,
+ EOpUMulExtended,
+ EOpIMulExtended,
+ EOpBitfieldExtract,
+ EOpBitfieldInsert,
+ EOpBitFieldReverse,
+ EOpBitCount,
+ EOpFindLSB,
+ EOpFindMSB,
+
+ //
+ // HLSL operations
+ //
+
+ EOpClip,
+ EOpIsFinite,
+ EOpLog10,
+ EOpRcp,
+ EOpSaturate,
+ EOpSinCos,
+ EOpGenMul, // mul(x,y) on any of mat/vec/scalars
+ EOpDst,
+};
+
+class TIntermTraverser;
+class TIntermOperator;
+class TIntermAggregate;
+class TIntermUnary;
+class TIntermBinary;
+class TIntermConstantUnion;
+class TIntermSelection;
+class TIntermSwitch;
+class TIntermBranch;
+class TIntermTyped;
+class TIntermMethod;
+class TIntermSymbol;
+
+} // end namespace glslang
+
+//
+// Base class for the tree nodes
+//
+// (Put outside the glslang namespace, as it's used as part of the external interface.)
+//
+class TIntermNode {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+
+ TIntermNode() { loc.init(); }
+ virtual const glslang::TSourceLoc& getLoc() const { return loc; }
+ virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; }
+ virtual void traverse(glslang::TIntermTraverser*) = 0;
+ virtual glslang::TIntermTyped* getAsTyped() { return 0; }
+ virtual glslang::TIntermOperator* getAsOperator() { return 0; }
+ virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; }
+ virtual glslang::TIntermAggregate* getAsAggregate() { return 0; }
+ virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; }
+ virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; }
+ virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; }
+ virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; }
+ virtual glslang::TIntermMethod* getAsMethodNode() { return 0; }
+ virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; }
+ virtual glslang::TIntermBranch* getAsBranchNode() { return 0; }
+
+ virtual const glslang::TIntermTyped* getAsTyped() const { return 0; }
+ virtual const glslang::TIntermOperator* getAsOperator() const { return 0; }
+ virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; }
+ virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; }
+ virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; }
+ virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; }
+ virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; }
+ virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; }
+ virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; }
+ virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; }
+ virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; }
+ virtual ~TIntermNode() { }
+protected:
+ glslang::TSourceLoc loc;
+};
+
+namespace glslang {
+
+//
+// This is just to help yacc.
+//
+struct TIntermNodePair {
+ TIntermNode* node1;
+ TIntermNode* node2;
+};
+
+//
+// Intermediate class for nodes that have a type.
+//
+class TIntermTyped : public TIntermNode {
+public:
+ TIntermTyped(const TType& t) { type.shallowCopy(t); }
+ TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); }
+ virtual TIntermTyped* getAsTyped() { return this; }
+ virtual const TIntermTyped* getAsTyped() const { return this; }
+ virtual void setType(const TType& t) { type.shallowCopy(t); }
+ virtual const TType& getType() const { return type; }
+ virtual TType& getWritableType() { return type; }
+
+ virtual TBasicType getBasicType() const { return type.getBasicType(); }
+ virtual TQualifier& getQualifier() { return type.getQualifier(); }
+ virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
+ virtual void propagatePrecision(TPrecisionQualifier);
+ virtual int getVectorSize() const { return type.getVectorSize(); }
+ virtual int getMatrixCols() const { return type.getMatrixCols(); }
+ virtual int getMatrixRows() const { return type.getMatrixRows(); }
+ virtual bool isMatrix() const { return type.isMatrix(); }
+ virtual bool isArray() const { return type.isArray(); }
+ virtual bool isVector() const { return type.isVector(); }
+ virtual bool isScalar() const { return type.isScalar(); }
+ virtual bool isStruct() const { return type.isStruct(); }
+ TString getCompleteString() const { return type.getCompleteString(); }
+
+protected:
+ TType type;
+};
+
+//
+// Handle for, do-while, and while loops.
+//
+class TIntermLoop : public TIntermNode {
+public:
+ TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) :
+ body(aBody),
+ test(aTest),
+ terminal(aTerminal),
+ first(testFirst) { }
+ virtual void traverse(TIntermTraverser*);
+ TIntermNode* getBody() const { return body; }
+ TIntermTyped* getTest() const { return test; }
+ TIntermTyped* getTerminal() const { return terminal; }
+ bool testFirst() const { return first; }
+protected:
+ TIntermNode* body; // code to loop over
+ TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
+ TIntermTyped* terminal; // exists for for-loops
+ bool first; // true for while and for, not for do-while
+};
+
+//
+// Handle case, break, continue, return, and kill.
+//
+class TIntermBranch : public TIntermNode {
+public:
+ TIntermBranch(TOperator op, TIntermTyped* e) :
+ flowOp(op),
+ expression(e) { }
+ virtual TIntermBranch* getAsBranchNode() { return this; }
+ virtual const TIntermBranch* getAsBranchNode() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ TOperator getFlowOp() const { return flowOp; }
+ TIntermTyped* getExpression() const { return expression; }
+protected:
+ TOperator flowOp;
+ TIntermTyped* expression;
+};
+
+//
+// Represent method names before seeing their calling signature
+// or resolving them to operations. Just an expression as the base object
+// and a textural name.
+//
+class TIntermMethod : public TIntermTyped {
+public:
+ TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { }
+ virtual TIntermMethod* getAsMethodNode() { return this; }
+ virtual const TIntermMethod* getAsMethodNode() const { return this; }
+ virtual const TString& getMethodName() const { return method; }
+ virtual TIntermTyped* getObject() const { return object; }
+ virtual void traverse(TIntermTraverser*);
+protected:
+ TIntermTyped* object;
+ TString method;
+};
+
+//
+// Nodes that correspond to symbols or constants in the source code.
+//
+class TIntermSymbol : public TIntermTyped {
+public:
+ // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
+ // per process threadPoolAllocator, then it causes increased memory usage per compile
+ // it is essential to use "symbol = sym" to assign to symbol
+ TIntermSymbol(int i, const TString& n, const TType& t)
+ : TIntermTyped(t), id(i), constSubtree(nullptr)
+ { name = n; }
+ virtual int getId() const { return id; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermSymbol* getAsSymbolNode() { return this; }
+ virtual const TIntermSymbol* getAsSymbolNode() const { return this; }
+ void setConstArray(const TConstUnionArray& c) { constArray = c; }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
+ TIntermTyped* getConstSubtree() const { return constSubtree; }
+protected:
+ int id; // the unique id of the symbol this node represents
+ TString name; // the name of the symbol this node represents
+ TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
+ TIntermTyped* constSubtree;
+};
+
+class TIntermConstantUnion : public TIntermTyped {
+public:
+ TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { }
+ const TConstUnionArray& getConstArray() const { return constArray; }
+ virtual TIntermConstantUnion* getAsConstantUnion() { return this; }
+ virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const;
+ virtual TIntermTyped* fold(TOperator, const TType&) const;
+ void setLiteral() { literal = true; }
+ void setExpression() { literal = false; }
+ bool isLiteral() const { return literal; }
+protected:
+ const TConstUnionArray constArray;
+ bool literal; // true if node represents a literal in the source code
+};
+
+// Represent the independent aspects of a texturing TOperator
+struct TCrackedTextureOp {
+ bool query;
+ bool proj;
+ bool lod;
+ bool fetch;
+ bool offset;
+ bool offsets;
+ bool gather;
+ bool grad;
+ bool subpass;
+ bool lodClamp;
+};
+
+//
+// Intermediate class for node types that hold operators.
+//
+class TIntermOperator : public TIntermTyped {
+public:
+ virtual TIntermOperator* getAsOperator() { return this; }
+ virtual const TIntermOperator* getAsOperator() const { return this; }
+ TOperator getOp() const { return op; }
+ virtual bool promote() { return true; }
+ bool modifiesState() const;
+ bool isConstructor() const;
+ bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
+ bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
+ bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
+ bool isSparseImage() const { return op == EOpSparseImageLoad; }
+
+ // Crack the op into the individual dimensions of texturing operation.
+ void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const
+ {
+ cracked.query = false;
+ cracked.proj = false;
+ cracked.lod = false;
+ cracked.fetch = false;
+ cracked.offset = false;
+ cracked.offsets = false;
+ cracked.gather = false;
+ cracked.grad = false;
+ cracked.subpass = false;
+ cracked.lodClamp = false;
+
+ switch (op) {
+ case EOpImageQuerySize:
+ case EOpImageQuerySamples:
+ case EOpTextureQuerySize:
+ case EOpTextureQueryLod:
+ case EOpTextureQueryLevels:
+ case EOpTextureQuerySamples:
+ case EOpSparseTexelsResident:
+ cracked.query = true;
+ break;
+ case EOpTexture:
+ case EOpSparseTexture:
+ break;
+ case EOpTextureClamp:
+ case EOpSparseTextureClamp:
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureProj:
+ cracked.proj = true;
+ break;
+ case EOpTextureLod:
+ case EOpSparseTextureLod:
+ cracked.lod = true;
+ break;
+ case EOpTextureOffset:
+ case EOpSparseTextureOffset:
+ cracked.offset = true;
+ break;
+ case EOpTextureOffsetClamp:
+ case EOpSparseTextureOffsetClamp:
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureFetch:
+ case EOpSparseTextureFetch:
+ cracked.fetch = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureFetchOffset:
+ case EOpSparseTextureFetchOffset:
+ cracked.fetch = true;
+ cracked.offset = true;
+ if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
+ cracked.lod = true;
+ break;
+ case EOpTextureProjOffset:
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureLodOffset:
+ case EOpSparseTextureLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ break;
+ case EOpTextureProjLod:
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjLodOffset:
+ cracked.offset = true;
+ cracked.lod = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGrad:
+ case EOpSparseTextureGrad:
+ cracked.grad = true;
+ break;
+ case EOpTextureGradClamp:
+ case EOpSparseTextureGradClamp:
+ cracked.grad = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGradOffset:
+ case EOpSparseTextureGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureProjGrad:
+ cracked.grad = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureProjGradOffset:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.proj = true;
+ break;
+ case EOpTextureGradOffsetClamp:
+ case EOpSparseTextureGradOffsetClamp:
+ cracked.grad = true;
+ cracked.offset = true;
+ cracked.lodClamp = true;
+ break;
+ case EOpTextureGather:
+ case EOpSparseTextureGather:
+ cracked.gather = true;
+ break;
+ case EOpTextureGatherOffset:
+ case EOpSparseTextureGatherOffset:
+ cracked.gather = true;
+ cracked.offset = true;
+ break;
+ case EOpTextureGatherOffsets:
+ case EOpSparseTextureGatherOffsets:
+ cracked.gather = true;
+ cracked.offsets = true;
+ break;
+ case EOpSubpassLoad:
+ case EOpSubpassLoadMS:
+ cracked.subpass = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+protected:
+ TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o) {}
+ TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o) {}
+ TOperator op;
+};
+
+//
+// Nodes for all the basic binary math operators.
+//
+class TIntermBinary : public TIntermOperator {
+public:
+ TIntermBinary(TOperator o) : TIntermOperator(o) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setLeft(TIntermTyped* n) { left = n; }
+ virtual void setRight(TIntermTyped* n) { right = n; }
+ virtual TIntermTyped* getLeft() const { return left; }
+ virtual TIntermTyped* getRight() const { return right; }
+ virtual TIntermBinary* getAsBinaryNode() { return this; }
+ virtual const TIntermBinary* getAsBinaryNode() const { return this; }
+ virtual bool promote();
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* left;
+ TIntermTyped* right;
+};
+
+//
+// Nodes for unary math operators.
+//
+class TIntermUnary : public TIntermOperator {
+public:
+ TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {}
+ TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual void setOperand(TIntermTyped* o) { operand = o; }
+ virtual TIntermTyped* getOperand() { return operand; }
+ virtual const TIntermTyped* getOperand() const { return operand; }
+ virtual TIntermUnary* getAsUnaryNode() { return this; }
+ virtual const TIntermUnary* getAsUnaryNode() const { return this; }
+ virtual bool promote();
+ virtual void updatePrecision();
+protected:
+ TIntermTyped* operand;
+};
+
+typedef TVector<TIntermNode*> TIntermSequence;
+typedef TVector<int> TQualifierList;
+//
+// Nodes that operate on an arbitrary sized set of children.
+//
+class TIntermAggregate : public TIntermOperator {
+public:
+ TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(0) { }
+ TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(0) { }
+ ~TIntermAggregate() { delete pragmaTable; }
+ virtual TIntermAggregate* getAsAggregate() { return this; }
+ virtual const TIntermAggregate* getAsAggregate() const { return this; }
+ virtual void setOperator(TOperator o) { op = o; }
+ virtual TIntermSequence& getSequence() { return sequence; }
+ virtual const TIntermSequence& getSequence() const { return sequence; }
+ virtual void setName(const TString& n) { name = n; }
+ virtual const TString& getName() const { return name; }
+ virtual void traverse(TIntermTraverser*);
+ virtual void setUserDefined() { userDefined = true; }
+ virtual bool isUserDefined() { return userDefined; }
+ virtual TQualifierList& getQualifierList() { return qualifier; }
+ virtual const TQualifierList& getQualifierList() const { return qualifier; }
+ void setOptimize(bool o) { optimize = o; }
+ void setDebug(bool d) { debug = d; }
+ bool getOptimize() const { return optimize; }
+ bool getDebug() const { return debug; }
+ void addToPragmaTable(const TPragmaTable& pTable);
+ const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
+protected:
+ TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
+ TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
+ TIntermSequence sequence;
+ TQualifierList qualifier;
+ TString name;
+ bool userDefined; // used for user defined function names
+ bool optimize;
+ bool debug;
+ TPragmaTable* pragmaTable;
+};
+
+//
+// For if tests.
+//
+class TIntermSelection : public TIntermTyped {
+public:
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) :
+ TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB) {}
+ TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) :
+ TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB) {}
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermTyped* getCondition() const { return condition; }
+ virtual TIntermNode* getTrueBlock() const { return trueBlock; }
+ virtual TIntermNode* getFalseBlock() const { return falseBlock; }
+ virtual TIntermSelection* getAsSelectionNode() { return this; }
+ virtual const TIntermSelection* getAsSelectionNode() const { return this; }
+protected:
+ TIntermTyped* condition;
+ TIntermNode* trueBlock;
+ TIntermNode* falseBlock;
+};
+
+//
+// For switch statements. Designed use is that a switch will have sequence of nodes
+// that are either case/default nodes or a *single* node that represents all the code
+// in between (if any) consecutive case/defaults. So, a traversal need only deal with
+// 0 or 1 nodes per case/default statement.
+//
+class TIntermSwitch : public TIntermNode {
+public:
+ TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b) { }
+ virtual void traverse(TIntermTraverser*);
+ virtual TIntermNode* getCondition() const { return condition; }
+ virtual TIntermAggregate* getBody() const { return body; }
+ virtual TIntermSwitch* getAsSwitchNode() { return this; }
+ virtual const TIntermSwitch* getAsSwitchNode() const { return this; }
+protected:
+ TIntermTyped* condition;
+ TIntermAggregate* body;
+};
+
+enum TVisit
+{
+ EvPreVisit,
+ EvInVisit,
+ EvPostVisit
+};
+
+//
+// For traversing the tree. User should derive from this,
+// put their traversal specific data in it, and then pass
+// it to a Traverse method.
+//
+// When using this, just fill in the methods for nodes you want visited.
+// Return false from a pre-visit to skip visiting that node's subtree.
+//
+// Explicitly set postVisit to true if you want post visiting, otherwise,
+// filled in methods will only be called at pre-visit time (before processing
+// the subtree). Similarly for inVisit for in-order visiting of nodes with
+// multiple children.
+//
+// If you only want post-visits, explicitly turn off preVisit (and inVisit)
+// and turn on postVisit.
+//
+// In general, for the visit*() methods, return true from interior nodes
+// to have the traversal continue on to children.
+//
+// If you process children yourself, or don't want them processed, return false.
+//
+class TIntermTraverser {
+public:
+ POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator())
+ TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) :
+ preVisit(preVisit),
+ inVisit(inVisit),
+ postVisit(postVisit),
+ rightToLeft(rightToLeft),
+ depth(0),
+ maxDepth(0) { }
+ virtual ~TIntermTraverser() { }
+
+ virtual void visitSymbol(TIntermSymbol*) { }
+ virtual void visitConstantUnion(TIntermConstantUnion*) { }
+ virtual bool visitBinary(TVisit, TIntermBinary*) { return true; }
+ virtual bool visitUnary(TVisit, TIntermUnary*) { return true; }
+ virtual bool visitSelection(TVisit, TIntermSelection*) { return true; }
+ virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; }
+ virtual bool visitLoop(TVisit, TIntermLoop*) { return true; }
+ virtual bool visitBranch(TVisit, TIntermBranch*) { return true; }
+ virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; }
+
+ int getMaxDepth() const { return maxDepth; }
+
+ void incrementDepth(TIntermNode *current)
+ {
+ depth++;
+ maxDepth = (std::max)(maxDepth, depth);
+ path.push_back(current);
+ }
+
+ void decrementDepth()
+ {
+ depth--;
+ path.pop_back();
+ }
+
+ TIntermNode *getParentNode()
+ {
+ return path.size() == 0 ? NULL : path.back();
+ }
+
+ const bool preVisit;
+ const bool inVisit;
+ const bool postVisit;
+ const bool rightToLeft;
+
+protected:
+ TIntermTraverser& operator=(TIntermTraverser&);
+
+ int depth;
+ int maxDepth;
+
+ // All the nodes from root to the current node's parent during traversing.
+ TVector<TIntermNode *> path;
+};
+
+// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if
+// sized with the same symbol, involving no operations"
+inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2)
+{
+ return node1->getAsSymbolNode() && node2->getAsSymbolNode() &&
+ node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId();
+}
+
+} // end namespace glslang
+
+#endif // __INTERMEDIATE_H
diff --git a/chromium/third_party/glslang/src/glslang/Include/revision.h b/chromium/third_party/glslang/src/glslang/Include/revision.h
new file mode 100644
index 00000000000..e338849d452
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/revision.h
@@ -0,0 +1,6 @@
+// This header is generated by the make-revision script.
+// For the version, it uses the latest git tag followed by the number of commits.
+// For the date, it uses the current date (when then script is run).
+
+#define GLSLANG_REVISION "SPIRV99.947"
+#define GLSLANG_DATE "15-Feb-2016"
diff --git a/chromium/third_party/glslang/src/glslang/Include/revision.template b/chromium/third_party/glslang/src/glslang/Include/revision.template
new file mode 100644
index 00000000000..1d189e9372f
--- /dev/null
+++ b/chromium/third_party/glslang/src/glslang/Include/revision.template
@@ -0,0 +1,13 @@
+// The file revision.h should be updated to the latest version, somehow, on
+// check-in, if glslang has changed.
+//
+// revision.template is the source for revision.h when using SubWCRev as the
+// method of updating revision.h. You don't have to do it this way, the
+// requirement is only that revision.h gets updated.
+//
+// revision.h is under source control so that not all consumers of glslang
+// source have to figure out how to create revision.h just to get a build
+// going. However, if it is not updated, it can be a version behind.
+
+#define GLSLANG_REVISION "$WCREV$"
+#define GLSLANG_DATE "$WCDATE$"