summaryrefslogtreecommitdiffstats
path: root/UnknownVersion/include
diff options
context:
space:
mode:
Diffstat (limited to 'UnknownVersion/include')
-rw-r--r--UnknownVersion/include/EABase/config/eacompiler.h512
-rw-r--r--UnknownVersion/include/EABase/config/eacompilertraits.h1034
-rw-r--r--UnknownVersion/include/EABase/config/eaplatform.h571
-rw-r--r--UnknownVersion/include/EABase/eabase.h884
-rw-r--r--UnknownVersion/include/EABase/earesult.h78
-rw-r--r--UnknownVersion/include/EASTL/algorithm.h2981
-rw-r--r--UnknownVersion/include/EASTL/allocator.h344
-rw-r--r--UnknownVersion/include/EASTL/bitset.h1777
-rw-r--r--UnknownVersion/include/EASTL/bonus/sort_extra.h482
-rw-r--r--UnknownVersion/include/EASTL/core_allocator_adapter.h296
-rw-r--r--UnknownVersion/include/EASTL/fixed_allocator.h467
-rw-r--r--UnknownVersion/include/EASTL/fixed_hash_map.h419
-rw-r--r--UnknownVersion/include/EASTL/fixed_hash_set.h422
-rw-r--r--UnknownVersion/include/EASTL/fixed_list.h342
-rw-r--r--UnknownVersion/include/EASTL/fixed_map.h358
-rw-r--r--UnknownVersion/include/EASTL/fixed_set.h360
-rw-r--r--UnknownVersion/include/EASTL/fixed_string.h539
-rw-r--r--UnknownVersion/include/EASTL/fixed_substring.h300
-rw-r--r--UnknownVersion/include/EASTL/fixed_vector.h333
-rw-r--r--UnknownVersion/include/EASTL/functional.h936
-rw-r--r--UnknownVersion/include/EASTL/hash_map.h337
-rw-r--r--UnknownVersion/include/EASTL/hash_set.h273
-rw-r--r--UnknownVersion/include/EASTL/heap.h592
-rw-r--r--UnknownVersion/include/EASTL/internal/config.h1205
-rw-r--r--UnknownVersion/include/EASTL/internal/eastl_rw.h47
-rw-r--r--UnknownVersion/include/EASTL/internal/fixed_pool.h1397
-rw-r--r--UnknownVersion/include/EASTL/internal/generic_iterator.h242
-rw-r--r--UnknownVersion/include/EASTL/internal/hashtable.h2261
-rw-r--r--UnknownVersion/include/EASTL/internal/red_black_tree.h1917
-rw-r--r--UnknownVersion/include/EASTL/internal/type_compound.h485
-rw-r--r--UnknownVersion/include/EASTL/internal/type_fundamental.h187
-rw-r--r--UnknownVersion/include/EASTL/internal/type_pod.h306
-rw-r--r--UnknownVersion/include/EASTL/internal/type_properties.h283
-rw-r--r--UnknownVersion/include/EASTL/internal/type_transformations.h244
-rw-r--r--UnknownVersion/include/EASTL/iterator.h621
-rw-r--r--UnknownVersion/include/EASTL/list.h1863
-rw-r--r--UnknownVersion/include/EASTL/map.h520
-rw-r--r--UnknownVersion/include/EASTL/memory.h698
-rw-r--r--UnknownVersion/include/EASTL/set.h567
-rw-r--r--UnknownVersion/include/EASTL/sort.h912
-rw-r--r--UnknownVersion/include/EASTL/string.h3498
-rw-r--r--UnknownVersion/include/EASTL/type_traits.h359
-rw-r--r--UnknownVersion/include/EASTL/utility.h312
-rw-r--r--UnknownVersion/include/EASTL/vector.h1649
-rw-r--r--UnknownVersion/include/EASTL/vector_map.h248
45 files changed, 34458 insertions, 0 deletions
diff --git a/UnknownVersion/include/EABase/config/eacompiler.h b/UnknownVersion/include/EABase/config/eacompiler.h
new file mode 100644
index 0000000..97b65ef
--- /dev/null
+++ b/UnknownVersion/include/EABase/config/eacompiler.h
@@ -0,0 +1,512 @@
+/*
+Copyright (C) 2009 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*-----------------------------------------------------------------------------
+ * config/eacompiler.h
+ *
+ * Copyright (c) 2002 - 2005 Electronic Arts Inc. All rights reserved.
+ * Maintained by Paul Pedriana, Maxis
+ *
+ * Modified to support Clang++ (v2.8) by Austin Seipp, 2010.
+ *
+ *-----------------------------------------------------------------------------
+ * Currently supported defines include:
+ * EA_COMPILER_GNUC
+ * EA_COMPILER_CLANG
+ * EA_COMPILER_ARM
+ * EA_COMPILER_EDG
+ * EA_COMPILER_SN
+ * EA_COMPILER_MSVC
+ * EA_COMPILER_METROWERKS
+ * EA_COMPILER_INTEL
+ * EA_COMPILER_BORLANDC
+ * EA_COMPILER_IBM
+ *
+ * EA_COMPILER_VERSION = <integer>
+ * EA_COMPILER_NAME = <string>
+ * EA_COMPILER_STRING = <string>
+ *
+ * EA_COMPILER_NO_STATIC_CONSTANTS
+ * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ * EA_COMPILER_NO_MEMBER_TEMPLATES
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ * EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ * EA_COMPILER_NO_VOID_RETURNS
+ * EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ * EA_COMPILER_NO_DEDUCED_TYPENAME
+ * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+ * EA_COMPILER_NO_RTTI
+ * EA_COMPILER_NO_EXCEPTIONS
+ * EA_COMPILER_NO_UNWIND
+ * EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ * EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ * EA_COMPILER_NO_STATIC_FUNCTION_INIT
+ *
+ *-----------------------------------------------------------------------------
+ *
+ * Documentation
+ * EA_COMPILER_NO_STATIC_CONSTANTS
+ * Code such as this is legal, but some compilers fail to compile it:
+ * struct A{ static const a = 1; };
+ *
+ * EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+ * Some compilers fail to allow template specialization, such as with this:
+ * template<class U> void DoSomething(U u);
+ * void DoSomething(int x);
+ *
+ * EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+ * Some compilers fail to allow partial template specialization, such as with this:
+ * template <class T, class Allocator> class vector{ }; // Primary templated class.
+ * template <class Allocator> class vector<bool, Allocator>{ }; // Partially specialized version.
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATES
+ * Some compilers fail to allow member template functions such as this:
+ * struct A{ template<class U> void DoSomething(U u); };
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+ * Some compilers fail to allow member template specialization, such as with this:
+ * struct A{
+ * template<class U> void DoSomething(U u);
+ * void DoSomething(int x);
+ * };
+ *
+ * EA_COMPILER_NO_TEMPLATE_TEMPLATES
+ * Code such as this is legal:
+ * template<typename T, template<typename> class U>
+ * U<T> SomeFunction(const U<T> x) { return x.DoSomething(); }
+ *
+ * EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+ * Some compilers fail to compile templated friends, as with this:
+ * struct A{ template<class U> friend class SomeFriend; };
+ * This is described in the C++ Standard at 14.5.3.
+ *
+ * EA_COMPILER_NO_VOID_RETURNS
+ * This is legal C++:
+ * void DoNothing1(){ };
+ * void DoNothing2(){ return DoNothing1(); }
+ *
+ * EA_COMPILER_NO_COVARIANT_RETURN_TYPE
+ * See the C++ standard sec 10.3,p5.
+ *
+ * EA_COMPILER_NO_DEDUCED_TYPENAME
+ * Some compilers don't support the use of 'typename' for
+ * dependent types in deduced contexts, as with this:
+ * template <class T> void Function(T, typename T::type);
+ *
+ * EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP
+ * Also known as Koenig lookup. Basically, if you have a function
+ * that is a namespace and you call that function without prefixing
+ * it with the namespace the compiler should look at any arguments
+ * you pass to that function call and search their namespace *first*
+ * to see if the given function exists there.
+ *
+ * EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+ * <exception> is in namespace std. Some std libraries fail to
+ * put the contents of <exception> in namespace std. The following
+ * code should normally be legal:
+ * void Function(){ std::terminate(); }
+ *
+ * EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+ * Some compilers fail to execute DoSomething() properly, though they
+ * succeed in compiling it, as with this:
+ * template <int i>
+ * bool DoSomething(int j){ return i == j; };
+ * DoSomething<1>(2);
+ *
+ * EA_COMPILER_NO_EXCEPTIONS
+ * The compiler is configured to disallow the use of try/throw/catch
+ * syntax (often to improve performance). Use of such syntax in this
+ * case will cause a compilation error.
+ *
+ * EA_COMPILER_NO_UNWIND
+ * The compiler is configured to allow the use of try/throw/catch
+ * syntax and behaviour but disables the generation of stack unwinding
+ * code for responding to exceptions (often to improve performance).
+ *
+ *---------------------------------------------------------------------------*/
+
+#ifndef INCLUDED_eacompiler_H
+#define INCLUDED_eacompiler_H
+
+#ifndef INCLUDED_eaplatform_H
+# include "EABase/config/eaplatform.h"
+#endif
+
+ // Note: This is used to generate the EA_COMPILER_STRING macros
+#ifndef INTERNAL_STRINGIZE
+# define INTERNAL_STRINGIZE(x) INTERNAL_PRIMITIVE_STRINGIZE(x)
+#endif
+#ifndef INTERNAL_PRIMITIVE_STRINGIZE
+# define INTERNAL_PRIMITIVE_STRINGIZE(x) #x
+#endif
+
+ // Note: this is for compatibility with non-clang compilers
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+ // Note: this is for compatibility with non-clang compilers
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+ // Note: this is for compatibility with non-clang compilers
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+ // EDG (EDG compiler front-end, used by other compilers such as SN)
+#if defined(__EDG_VERSION__)
+# define EA_COMPILER_EDG
+#endif
+
+
+ // SN
+#if defined(__SNC__) // SN Systems compiler
+ // Note that there are two versions of the SN compiler, one that is
+ // GNUC-based and a newer one which is based on an EDG (Edison Design
+ // Group) front-end with a back-end code generator made by SN.
+ // The EDG-based SN compiler uses "GCC compatibility mode" and thus
+ // defines __GNUC__ but isn't really GNUC. Also, as of this writing
+ // it appears that the SN compiler may arrive with MSVC-compatibility
+ // mode in addition as well. Thus, we define EA_COMPILER_SN
+ // separately from other EA_COMPILER defines it is possible that both
+ // may be defined at the same time. Note that while the newer EDG-based
+ // SN compiler may emulate other compilers, it doesn't act exactly
+ // the same.
+# define EA_COMPILER_SN
+#endif
+
+
+ // Airplay SDK (third party mobile middleware compiler)
+#if defined(__S3E__)
+# define EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE
+#endif
+
+
+ // SNC (SN Systems)
+#if defined(__SNC__)
+# define EA_COMPILER_NAME "SNC"
+
+# ifdef __GNUC__ // If SN is using GCC-compatibility mode (which it usually is)...
+# define EA_COMPILER_GNUC
+# define EA_COMPILER_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) // We intentionally report the GCC version here. SN
+# define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, GCC version " INTERNAL_STRINGIZE( __GNUC__ ) "." INTERNAL_STRINGIZE( __GNUC_MINOR__ ) ", SNC version " INTERNAL_STRINGIZE( __SN_VER__ ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ )
+# else
+# define EA_COMPILER_VERSION __SN_VER__
+# define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( EA_COMPILER_VERSION ) ", EDG version " INTERNAL_STRINGIZE( __EDG_VERSION__ )
+# endif
+
+ // GCC (a.k.a. GNUC)
+#elif defined(__GNUC__) // GCC compilers exist for many platforms.
+# define EA_COMPILER_GNUC
+# define EA_COMPILER_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+# define EA_COMPILER_NAME "GCC"
+# define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( __GNUC__ ) "." INTERNAL_STRINGIZE( __GNUC_MINOR__ )
+
+# if (__GNUC__ == 2) && (__GNUC_MINOR__ < 95) // If GCC < 2.95...
+# define EA_COMPILER_NO_MEMBER_TEMPLATES
+# endif
+# if (__GNUC__ == 2) && (__GNUC_MINOR__ <= 97) // If GCC <= 2.97...
+# define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+# endif
+# if (__GNUC__ == 3) && ((__GNUC_MINOR__ == 1) || (__GNUC_MINOR__ == 2)) // If GCC 3.1 or 3.2 (but not pre 3.1 or post 3.2)...
+# define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS
+# endif
+
+ // Clang (quite compatible with GCC)
+#elif defined(__clang__)
+# define EA_COMPILER_CLANG
+# define EA_COMPILER_VERSION (__clang_major__ * 1000 + __clang_minor__)
+# define EA_COMPILER_NAME "Clang++"
+# define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE( __clang_version__ )
+
+ // Borland C++
+#elif defined(__BORLANDC__)
+# define EA_COMPILER_BORLANDC
+# define EA_COMPILER_VERSION __BORLANDC__
+# define EA_COMPILER_NAME "Borland C"
+ //#define EA_COMPILER_STRING (defined below)
+
+# if (__BORLANDC__ <= 0x0550) // If Borland C++ Builder 4 and 5...
+# define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+# endif
+# if (__BORLANDC__ >= 0x561) && (__BORLANDC__ < 0x600)
+# define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION
+# endif
+
+
+ // Intel C++ (via EDG front-end)
+#elif defined(__ICL) || defined(__ICC)
+# define EA_COMPILER_INTEL
+# if defined(__ICL)
+# define EA_COMPILER_VERSION __ICL
+# elif defined(__ICC)
+# define EA_COMPILER_VERSION __ICC
+# endif
+# define EA_COMPILER_NAME "Intel C++"
+ //#define EA_COMPILER_STRING (defined below)
+
+ // Intel is based ont the EDG (Edison Design Group) front end and
+ // all recent versions are very compliant to the C++ standard.
+
+
+ // Metrowerks
+#elif defined(__MWERKS__) || defined(__CWCC__) // Metrowerks compilers exist for many platforms.
+# define EA_COMPILER_METROWERKS
+# ifdef __MWERKS__
+# define EA_COMPILER_VERSION __MWERKS__
+# else
+# define EA_COMPILER_VERSION __CWCC__
+# endif
+# define EA_COMPILER_NAME "Metrowerks"
+ //#define EA_COMPILER_STRING (defined below)
+
+# if (__MWERKS__ <= 0x2407) // If less than v7.x...
+# define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION
+# endif
+# if (__MWERKS__ <= 0x3003) // If less than v8.x...
+# define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS
+# endif
+
+
+ // Microsoft VC++
+#elif defined(_MSC_VER) && !(defined(__S3E__) && defined(__arm__) && defined(__aarch64__)) // S3E is a mobile SDK which mistakenly masquerades as VC++ on ARM.
+# define EA_COMPILER_MSVC
+# define EA_COMPILER_VERSION _MSC_VER
+# define EA_COMPILER_NAME "Microsoft Visual C++"
+ //#define EA_COMPILER_STRING (defined below)
+
+# if (_MSC_VER <= 1200) // If VC6.x and earlier...
+# if (_MSC_VER < 1200)
+# define EA_COMPILER_MSVCOLD
+# else
+# define EA_COMPILER_MSVC6
+# endif
+
+# if (_MSC_VER < 1200) // If VC5.x or earlier...
+# define EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+# endif
+# define EA_COMPILER_NO_EXPLICIT_FUNCTION_TEMPLATE_ARGUMENTS // The compiler compiles this OK, but executes it wrong. Fixed in VC7.0
+# define EA_COMPILER_NO_VOID_RETURNS // The compiler fails to compile such cases. Fixed in VC7.0
+# define EA_COMPILER_NO_EXCEPTION_STD_NAMESPACE // The compiler fails to compile such cases. Fixed in VC7.0
+# define EA_COMPILER_NO_DEDUCED_TYPENAME // The compiler fails to compile such cases. Fixed in VC7.0
+# define EA_COMPILER_NO_STATIC_CONSTANTS // The compiler fails to compile such cases. Fixed in VC7.0
+# define EA_COMPILER_NO_COVARIANT_RETURN_TYPE // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1
+# define EA_COMPILER_NO_TEMPLATE_TEMPLATES // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS // The compiler fails to compile such cases. Fixed in VC7.1
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATES // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%?
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION // VC6.x supports member templates properly 95% of the time. So do we flag the remaining 5%?
+
+# elif (_MSC_VER <= 1300) // If VC7.0 and earlier...
+# define EA_COMPILER_MSVC7
+
+# define EA_COMPILER_NO_COVARIANT_RETURN_TYPE // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_ARGUMENT_DEPENDENT_LOOKUP // The compiler compiles this OK, but executes it wrong. Fixed in VC7.1
+# define EA_COMPILER_NO_TEMPLATE_TEMPLATES // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_MEMBER_TEMPLATE_FRIENDS // The compiler fails to compile such cases. Fixed in VC7.1
+# define EA_COMPILER_NO_MEMBER_FUNCTION_SPECIALIZATION // This is the case only for VC7.0 and not VC6 or VC7.1+. Fixed in VC7.1
+ //#define EA_COMPILER_NO_MEMBER_TEMPLATES // VC7.0 supports member templates properly 95% of the time. So do we flag the remaining 5%?
+
+# elif (_MSC_VER < 1400) // If VC7.1 ...
+ // The VC7.1 and later compiler is fairly close to the C++ standard
+ // and thus has no compiler limitations that we are concerned about.
+# define EA_COMPILER_MSVC7_2003
+# define EA_COMPILER_MSVC7_1
+
+# else // _MSC_VER of 1400 means VC8 (VS2005), 1500 means VC9 (VS2008)
+# define EA_COMPILER_MSVC8_2005
+# define EA_COMPILER_MSVC8_0
+
+# endif
+
+
+ // IBM
+#elif defined(__xlC__)
+# define EA_COMPILER_IBM
+# define EA_COMPILER_NAME "IBM XL C"
+# define EA_COMPILER_VERSION __xlC__
+# define EA_COMPILER_STRING "IBM XL C compiler, version " INTERNAL_STRINGIZE( __xlC__ )
+
+
+ // ARM compiler
+# if defined(__ARMCC_VERSION)
+ // Note that this refers to the ARM compiler (armcc or armcpp), but there
+ // are other compilers that target ARM processors, such as GCC and Microsoft VC++.
+ // If you want to detect compiling for the ARM processor, check for EA_PROCESSOR_ARM
+ // being defined.
+# define EA_COMPILER_ARM
+# define EA_COMPILER_VERSION __ARMCC_VERSION
+# define EA_COMPILER_NAME __CC_ARM
+ //#define EA_COMPILER_STRING (defined below)
+
+# endif
+
+ // Green Hills
+#elif defined(__ghs__)
+# define EA_COMPILER_GHS
+# define EA_COMPILER_VERSION __GHS_VERSION_NUMBER
+# define EA_COMPILER_NAME "Green Hills"
+ //#define EA_COMPILER_STRING (defined below)
+
+
+ // Unknown
+#else // Else the compiler is unknown
+
+# define EA_COMPILER_VERSION 0
+# define EA_COMPILER_NAME "Unknown"
+
+#endif
+
+#ifndef EA_COMPILER_STRING
+# define EA_COMPILER_STRING EA_COMPILER_NAME " compiler, version " INTERNAL_STRINGIZE(EA_COMPILER_VERSION)
+#endif
+
+
+ // Deprecated definitions
+ // For backwards compatibility, should be supported for at least the life of EABase v2.0.x.
+#ifndef EA_COMPILER_NO_TEMPLATE_PARTIAL_SPECIALIZATION
+# define EA_COMPILER_PARTIAL_TEMPLATE_SPECIALIZATION
+#endif
+#ifndef EA_COMPILER_NO_TEMPLATE_SPECIALIZATION
+# define EA_COMPILER_TEMPLATE_SPECIALIZATION
+#endif
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATES
+# define EA_COMPILER_MEMBER_TEMPLATES
+#endif
+#ifndef EA_COMPILER_NO_MEMBER_TEMPLATE_SPECIALIZATION
+# define EA_COMPILER_MEMBER_TEMPLATE_SPECIALIZATION
+#endif
+
+
+
+ // EA_COMPILER_NO_RTTI
+ //
+ // If EA_COMPILER_NO_RTTI is defined, then RTTI (run-time type information)
+ // is not available (possibly due to being disabled by the user).
+ //
+#if defined(__SNC__) && !defined(__RTTI)
+# define EA_COMPILER_NO_RTTI
+#elif defined(__GXX_ABI_VERSION) && !defined(__GXX_RTTI)
+# define EA_COMPILER_NO_RTTI
+#elif defined(__clang__) && !__has_feature(cxx_rtti)
+# define EA_COMPILER_NO_RTTI
+#elif defined(_MSC_VER) && !defined(_CPPRTTI)
+# define EA_COMPILER_NO_RTTI
+#elif defined(__MWERKS__)
+# if !__option(RTTI)
+# define EA_COMPILER_NO_RTTI
+# endif
+#endif
+
+
+
+ // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND
+ //
+ // If EA_COMPILER_NO_EXCEPTIONS is defined, then the compiler is
+ // configured to not recognize C++ exception-handling statements
+ // such as try/catch/throw. Thus, when EA_COMPILER_NO_EXCEPTIONS is
+ // defined, code that attempts to use exception handling statements
+ // will usually cause a compilation error. If is often desirable
+ // for projects to disable exception handling because exception
+ // handling causes extra code and/or data generation which might
+ // not be needed, especially if it is known that exceptions won't
+ // be happening. When writing code that is to be portable between
+ // systems of which some enable exception handling while others
+ // don't, check for EA_COMPILER_NO_EXCEPTIONS being defined.
+ //
+#if defined(EA_COMPILER_GNUC) && defined(_NO_EX) // GCC on some platforms (e.g. PS3) defines _NO_EX when exceptions are disabled.
+# define EA_COMPILER_NO_EXCEPTIONS
+
+#elif (defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_INTEL) || defined(EA_COMPILER_SN)) && !defined(__EXCEPTIONS) // GCC and most EDG-based compilers define __EXCEPTIONS when exception handling is enabled.
+# define EA_COMPILER_NO_EXCEPTIONS
+
+#elif defined(EA_COMPILER_CLANG) && !__has_feature(cxx_exceptions)
+# define EA_COMPILER_NO_EXCEPTIONS
+
+#elif defined(EA_COMPILER_METROWERKS)
+# if !__option(exceptions)
+# define EA_COMPILER_NO_EXCEPTIONS
+# endif
+
+ // Borland and Micrsoft use the _CPUUNWIND define to denote that
+ // exception stack unwinding code generation is disabled. The result
+ // is that you can call try/catch/throw and that exceptions will be
+ // caught handled, but that no automatic object destruction will
+ // happen between a throw and the resulting catch. We thus don't
+ // want to define EA_COMPILER_NO_EXCEPTIONS, but perhaps users might
+ // be interesting in knowing that unwinding is disabled.
+#elif (defined(EA_COMPILER_BORLAND) || defined(EA_COMPILER_MSVC)) && !defined(_CPPUNWIND)
+# define EA_COMPILER_NO_UNWIND
+
+#endif // EA_COMPILER_NO_EXCEPTIONS / EA_COMPILER_NO_UNWIND
+
+
+
+ // EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+ //
+ // If defined, then the compiler doesn't provide a Standard C++ library.
+ //
+#if defined(EA_PLATFORM_ANDROID)
+# define EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+#endif
+
+
+ // EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ //
+ // If defined, it means that global or static C++ variables will be
+ // constructed. Not all compiler/platorm combinations support this.
+ // User code that needs to be portable must avoid having C++ variables
+ // that construct before main.
+ //
+ //#if defined(EA_PLATFORM_MOBILE)
+ // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ //#endif
+
+
+
+ // EA_COMPILER_NO_STATIC_FUNCTION_INIT
+ //
+ // If defined, it means that functions marked as startup functions
+ // (e.g. __attribute__((constructor)) in GCC) are supported. It may
+ // be that some compiler/platform combinations don't support this.
+ //
+ //#if defined(XXX) // So far, all compiler/platforms we use support this.
+ // #define EA_COMPILER_NO_STATIC_VARIABLE_INIT
+ //#endif
+
+
+
+#endif // INCLUDED_eacompiler_H
diff --git a/UnknownVersion/include/EABase/config/eacompilertraits.h b/UnknownVersion/include/EABase/config/eacompilertraits.h
new file mode 100644
index 0000000..f2aa3f7
--- /dev/null
+++ b/UnknownVersion/include/EABase/config/eacompilertraits.h
@@ -0,0 +1,1034 @@
+/*
+Copyright (C) 2009 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*-----------------------------------------------------------------------------
+ * config/eacompilertraits.h
+ *
+ * Copyright (c) 2002 - 2005 Electronic Arts Inc. All rights reserved.
+ * Maintained by Paul Pedriana, Maxis
+ *
+ *-----------------------------------------------------------------------------
+ * Currently supported defines include:
+ * EA_COMPILER_HAS_C99_TYPES
+ * EA_COMPILER_HAS_CHAR_16_32
+ * EA_COMPILER_IS_ANSIC
+ * EA_COMPILER_IS_C99
+ * EA_COMPILER_IS_CPLUSPLUS
+ * EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+ * EA_COMPILER_HAS_MOVE_SEMANTICS
+ * EA_COMPILER_MANAGED_CPP
+ *
+ * EA_ALIGN_OF()
+ * EA_ALIGN() / EA_PREFIX_ALIGN() / EA_POSTFIX_ALIGN()
+ * EA_ALIGNED()
+ * EA_PACKED()
+ *
+ * EA_LIKELY()
+ * EA_UNLIKELY()
+ * EA_INIT_PRIORITY()
+ * EA_MAY_ALIAS()
+ * EA_ASSUME()
+ * EA_PURE
+ * EA_WEAK
+ *
+ * EA_WCHAR_T_NON_NATIVE
+ * EA_WCHAR_SIZE = <n bytes>
+ *
+ * EA_RESTRICT
+ * EA_DEPRECATED / EA_PREFIX_DEPRECATED / EA_POSTFIX_DEPRECATED
+ * EA_FORCE_INLINE / EA_PREFIX_FORCE_INLINE / EA_POSTFIX_FORCE_INLINE
+ * EA_NO_INLINE / EA_PREFIX_NO_INLINE / EA_POSTFIX_NO_INLINE
+ * EA_NO_VTABLE / EA_CLASS_NO_VTABLE / EA_STRUCT_NO_VTABLE
+ * EA_PASCAL
+ * EA_PASCAL_FUNC()
+ * EA_SSE = [0 | 1]
+ * EA_IMPORT
+ * EA_EXPORT
+ * EA_PRAGMA_ONCE_SUPPORTED
+ * EA_OVERRIDE
+ * EA_SEALED
+ * EA_ABSTRACT
+ *
+ * Todo:
+ * Find a way to reliably detect wchar_t size at preprocessor time and
+ * implement it below for EA_WCHAR_SIZE.
+ *
+ * Todo:
+ * Find out how to support EA_PASCAL and EA_PASCAL_FUNC for systems in
+ * which it hasn't yet been found out for.
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eacompilertraits_H
+#define INCLUDED_eacompilertraits_H
+
+#ifndef INCLUDED_eaplatform_H
+# include "EABase/config/eaplatform.h"
+#endif
+
+#ifndef INCLUDED_eacompiler_H
+# include "EABase/config/eacompiler.h"
+#endif
+
+ // Metrowerks uses #defines in its core C header files to define
+ // the kind of information we need below (e.g. C99 compatibility)
+#if defined(__MWERKS__)
+ // Defining the following causes C99 compilers to enable the macros
+ // associated with the defines. The C99 standard specifies that you
+ // should define these as such.
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# endif
+
+# ifndef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS
+# endif
+
+# include <stddef.h>
+#endif
+
+#if defined(__SNC__) || defined(EA_PLATFORM_PS3) || defined(__S3E__)
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# endif
+
+# ifndef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS
+# endif
+
+# include <stdint.h>
+
+# if !defined(EA_COMPILER_HAS_INTTYPES)
+# if !defined(__S3E__)
+# define EA_COMPILER_HAS_INTTYPES
+# endif
+# endif
+#endif
+
+ // Determine if this compiler is ANSI C compliant and if it is C99 compliant.
+#if defined(__STDC__)
+# define EA_COMPILER_IS_ANSIC // The compiler claims to be ANSI C
+
+ // Is the compiler a C99 compiler or equivalent?
+ // From ISO/IEC 9899:1999:
+ // 6.10.8 Predefined macro names
+ // __STDC_VERSION__ The integer constant 199901L. (150)
+ //
+ // 150) This macro was not specified in ISO/IEC 9899:1990 and was
+ // specified as 199409L in ISO/IEC 9899/AMD1:1995. The intention
+ // is that this will remain an integer constant of type long int
+ // that is increased with each revision of this International Standard.
+ //
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
+# define EA_COMPILER_IS_C99
+# endif
+#endif
+
+ // Some compilers (e.g. GCC) define __USE_ISOC99 if they are not
+ // strictly C99 compilers (or are simply C++ compilers) but are set
+ // to use C99 functionality. Metrowerks defines _MSL_C99 as 1 in
+ // this case, but 0 otherwise.
+#if (defined(__USE_ISOC99) || (defined(_MSL_C99) && (_MSL_C99 == 1))) && !defined(EA_COMPILER_IS_C99)
+# define EA_COMPILER_IS_C99
+#endif
+
+ // Metrowerks defines C99 types (e.g. intptr_t) instrinsically when in C99 mode (-lang C99 on the command line).
+#if (defined(_MSL_C99) && (_MSL_C99 == 1))
+# define EA_COMPILER_HAS_C99_TYPES
+#endif
+
+#if defined(__GNUC__)
+# if (((__GNUC__ * 100) + __GNUC_MINOR__) >= 302) // Also, GCC defines _HAS_C9X.
+# define EA_COMPILER_HAS_C99_TYPES // The compiler is not necessarily a C99 compiler, but it defines C99 types.
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# endif
+# ifndef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS // This tells the GCC compiler that we want it to use its native C99 types.
+# endif
+# endif
+# if (defined(__GXX_EXPERIMENTAL_CXX0X__) && !defined(EA_COMPILER_IS_CPLUSPLUS_11_ENABLED))
+# define EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# define EA_COMPILER_HAS_MOVE_SEMANTICS
+# endif
+
+# define EASTL_GNU_VERSION_NUM (__GNUC__ * 100) + __GNUC_MINOR__
+# ifdef EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# if EASTL_GNU_VERSION_NUM >= 404
+# define EA_COMPILER_HAS_CHAR_16_32
+# define EA_COMPILER_HAS_VARIADIC_TEMPLATES
+# endif
+# endif
+#endif
+
+#if defined(__clang__)
+# if (defined(__GXX_EXPERIMENTAL_CXX0X__) && !defined(EA_COMPILER_IS_CPLUSPLUS_11_ENABLED))
+# define EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# define EA_COMPILER_HAS_MOVE_SEMANTICS
+# endif
+
+# define EASTL_CLANG_VERSION_NUM (__clang_major__ * 10) + __clang_minor__
+# if EASTL_CLANG_VERSION_NUM >= 29
+# define EA_COMPILER_HAS_C99_TYPES
+# endif
+# ifdef EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# if EASTL_CLANG_VERSION_NUM >= 29
+# define EA_COMPILER_HAS_CHAR_16_32
+# define EA_COMPILER_HAS_VARIADIC_TEMPLATES
+# endif
+# endif
+
+# ifndef __STDC_LIMIT_MACROS
+# define __STDC_LIMIT_MACROS
+# endif
+# ifndef __STDC_CONSTANT_MACROS
+# define __STDC_CONSTANT_MACROS // This tells the GCC compiler that we want it to use its native C99 types.
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# if (_MSC_VER >= 1600)
+# define EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# define EA_COMPILER_HAS_MOVE_SEMANTICS
+# endif
+#endif
+
+#if defined(__ghs__)
+# if __GHS_VERSION_NUMBER >= 201505
+# define EA_COMPILER_HAS_CHAR_16_32
+# define EA_COMPILER_HAS_VARIADIC_TEMPLATES
+# define EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# endif
+#endif
+
+#ifdef __cplusplus
+# define EA_COMPILER_IS_CPLUSPLUS
+# if (__cplusplus > 199711L)
+# define EA_COMPILER_IS_CPLUSPLUS_11_ENABLED
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_COMPILER_MANAGED_CPP
+ // Defined if this is being compiled with Managed C++ extensions
+#ifdef EA_COMPILER_MSVC
+# if EA_COMPILER_VERSION >= 1300
+# ifdef _MANAGED
+# define EA_COMPILER_MANAGED_CPP
+# endif
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // alignment expressions
+ //
+ // Here we define
+ // EA_ALIGN_OF(type) // Returns size_t.
+ // EA_ALIGN(n) // Used as a prefix. n is byte alignment, with being a power of two. Most of the time you can use this and avoid using EA_PREFIX_ALIGN/EA_POSTFIX_ALIGN.
+ // EA_PREFIX_ALIGN(n) // n is byte alignment, with being a power of two. You should need this only for unusual compilers.
+ // EA_POSTFIX_ALIGN(n) // Valid values for n are 1, 2, 4, 8, etc. You should need this only for unusual compilers.
+ // EA_ALIGNED(t, v, n) // Type, variable, alignment. Used to align an instance. You should need this only for unusual compilers.
+ // EA_PACKED // Specifies that the given structure be packed (and not have its members aligned).
+ //
+ // Example usage:
+ // size_t x = EA_ALIGN_OF(int); Non-aligned equivalents. Meaning
+ // EA_PREFIX_ALIGN(8) int x = 5; int x = 5; Align x on 8 for compilers that require prefix attributes. Can just use EA_ALIGN instead.
+ // EA_ALIGN(8) int x; int x; Align x on 8 for compilers that allow prefix attributes.
+ // int x EA_POSTFIX_ALIGN(8); int x; Align x on 8 for compilers that require postfix attributes.
+ // int x EA_POSTFIX_ALIGN(8) = 5; int x = 5; Align x on 8 for compilers that require postfix attributes.
+ // int x EA_POSTFIX_ALIGN(8)(5); int x(5); Align x on 8 for compilers that require postfix attributes.
+ // struct EA_PREFIX_ALIGN(8) X { int x; } EA_POSTFIX_ALIGN(8); struct X { int x; }; Define X as a struct which is aligned on 8 when used.
+ // EA_ALIGNED(int, x, 8) = 5; int x = 5; Align x on 8.
+ // EA_ALIGNED(int, x, 16)(5); int x(5); Align x on 16.
+ // EA_ALIGNED(int, x[3], 16); int x[3]; Align x array on 16.
+ // EA_ALIGNED(int, x[3], 16) = { 1, 2, 3 }; int x[3] = { 1, 2, 3 }; Align x array on 16.
+ // int x[3] EA_PACKED; int x[3]; Pack the 3 ints of the x array. GCC doesn't seem to support packing of int arrays.
+ // struct EA_ALIGN(32) X { int x; int y; }; struct X { int x; }; Define A as a struct which is aligned on 32 when used.
+ // EA_ALIGN(32) struct X { int x; int y; } Z; struct X { int x; } Z; Define A as a struct, and align the instance Z on 32.
+ // struct X { int x EA_PACKED; int y EA_PACKED; }; struct X { int x; int y; }; Pack the x and y members of struct X.
+ // struct X { int x; int y; } EA_PACKED; struct X { int x; int y; }; Pack the members of struct X.
+ // typedef EA_ALIGNED(int, int16, 16); int16 n16; typedef int int16; int16 n16; Define int16 as an int which is aligned on 16.
+ // typedef EA_ALIGNED(X, X16, 16); X16 x16; typedef X X16; X16 x16; Define X16 as an X which is aligned on 16.
+
+ // SNC (EDG) intends to be compatible with GCC but has a bug whereby it
+ // fails to support calling a constructor in an aligned declaration when
+ // using postfix alignment attributes. Prefix works for alignment, but does not align
+ // the size like postfix does. Prefix also fails on templates. So gcc style post fix
+ // is still used, but the user will need to use EA_POSTFIX_ALIGN before the constructor parameters.
+ // this note by Paul and Frank
+#if defined(EA_COMPILER_SN) && defined(__GNUC__) // If using the SN compiler in GCC compatibility mode...
+# define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+# define EA_ALIGN(n) __attribute__((aligned(n)))
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+# define EA_PACKED __attribute__((packed))
+
+ // GCC 2.x doesn't support prefix attributes.
+#elif defined(__GNUC__) && (__GNUC__ < 3)
+# define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+# define EA_ALIGN(n)
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+# define EA_PACKED __attribute__((packed))
+
+ // GCC 3.x+ and IBM C support prefix attributes.
+#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__xlC__)
+# define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+# define EA_ALIGN(n) __attribute__((aligned(n)))
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+# define EA_PACKED __attribute__((packed))
+
+#elif defined(EA_COMPILER_CLANG)
+ // Checks for clang-supported attributes
+# if __has_attribute(aligned)
+# define CLANG_ALIGNED(n) __attribute__((aligned(n)))
+# else
+# define CLANG_ALIGNED(n)
+# endif
+
+# if __has_attribute(packed)
+# define CLANG_PACKED __attribute__((packed))
+# else
+# define CLANG_PACKED
+# endif
+
+ // Now we define the alignment stuff
+# define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+# define EA_ALIGN(n) CLANG_ALIGNED(n)
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n) CLANG_ALIGNED(n)
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+# define EA_PACKED CLANG_PACKED
+
+ // Make sure we get no macro naming conflicts
+# undef CLANG_ALIGNED
+# undef CLANG_PACKED
+
+ // Metrowerks supports prefix attributes.
+ // Metrowerks does not support packed alignment attributes.
+#elif defined(EA_COMPILER_METROWERKS)
+# define EA_ALIGN_OF(type) ((size_t)__alignof__(type))
+# define EA_ALIGN(n) __attribute__((aligned(n)))
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n) __attribute__((aligned(n)))
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable __attribute__((aligned(n)))
+# define EA_PACKED
+
+ // Microsoft supports prefix alignment via __declspec, but the alignment value must be a literal number, not just a constant expression.
+ // Contrary to VC7.x and earlier documentation, __declspec(align) works on stack variables. VC8+ (VS2005+) documents correctly.
+ // Microsoft does not support packed alignment attributes; you must use #pragma pack.
+#elif defined(EA_COMPILER_INTEL) || defined(EA_PLATFORM_XBOX) || (defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1300))
+# define EA_ALIGN_OF(type) ((size_t)__alignof(type))
+# define EA_ALIGN(n) __declspec(align(n))
+# define EA_PREFIX_ALIGN(n) __declspec(align(n))
+# define EA_POSTFIX_ALIGN(n)
+# define EA_ALIGNED(variable_type, variable, n) __declspec(align(n)) variable_type variable
+# define EA_PACKED
+
+ // Arm brand compiler
+#elif defined(__ARMCC_VERSION)
+# define EA_ALIGN_OF(type) ((size_t)__ALIGNOF__(type))
+# define EA_ALIGN(n) __align(n)
+# define EA_PREFIX_ALIGN(n) __align(n)
+# define EA_POSTFIX_ALIGN(n)
+# define EA_ALIGNED(variable_type, variable, n) __align(n) variable_type variable
+# define EA_PACKED __packed
+
+#else // Unusual compilers
+ // There is nothing we can do about some of these. This is not as bad a problem as it seems.
+ // If the given platform/compiler doesn't support alignment specifications, then it's somewhat
+ // likely that alignment doesn't matter for that platform. Otherwise they would have defined
+ // functionality to manipulate alignment.
+# define EA_ALIGN(n)
+# define EA_PREFIX_ALIGN(n)
+# define EA_POSTFIX_ALIGN(n)
+# define EA_ALIGNED(variable_type, variable, n) variable_type variable
+# define EA_PACKED
+
+# ifdef __cplusplus
+ template <typename T> struct EAAlignOf1 { enum { s = sizeof (T), value = s ^ (s & (s - 1)) }; };
+ template <typename T> struct EAAlignOf2;
+ template <int size_diff> struct helper { template <typename T> struct Val { enum { value = size_diff }; }; };
+ template <> struct helper<0> { template <typename T> struct Val { enum { value = EAAlignOf2<T>::value }; }; };
+ template <typename T> struct EAAlignOf2 { struct Big { T x; char c; };
+ enum { diff = sizeof (Big) - sizeof (T), value = helper<diff>::template Val<Big>::value }; };
+ template <typename T> struct EAAlignof3 { enum { x = EAAlignOf2<T>::value, y = EAAlignOf1<T>::value, value = x < y ? x : y }; };
+# define EA_ALIGN_OF(type) ((size_t)EAAlignof3<type>::value)
+
+# else
+ // C implementation of EA_ALIGN_OF
+ // This implementation works for most cases, but doesn't directly work
+ // for types such as function pointer declarations. To work with those
+ // types you need to typedef the type and then use the typedef in EA_ALIGN_OF.
+# define EA_ALIGN_OF(type) ((size_t)offsetof(struct { char c; type m; }, m))
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_LIKELY / EA_UNLIKELY
+ //
+ // Defined as a macro which gives a hint to the compiler for branch
+ // prediction. GCC gives you the ability to manually give a hint to
+ // the compiler about the result of a comparison, though it's often
+ // best to compile shipping code with profiling feedback under both
+ // GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+ // are times when you feel very sure that a boolean expression will
+ // usually evaluate to either true or false and can help the compiler
+ // by using an explicity directive...
+ //
+ // Example usage:
+ // if(EA_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+ // { ... }
+ //
+ // Example usage:
+ // if(EA_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+ // { ... }
+ //
+#ifndef EA_LIKELY
+# if (defined(__GNUC__) && (__GNUC__ >= 3)) || \
+ (defined(__clang__) && __has_builtin(__builtin_expect)) || \
+ defined(__MWERKS__) // Metrowerks supports __builtin_expect, but with some platforms (e.g. Wii) it appears to ignore it.
+
+# if defined(__cplusplus)
+# define EA_LIKELY(x) __builtin_expect(!!(x), true)
+# define EA_UNLIKELY(x) __builtin_expect(!!(x), false)
+# else
+# define EA_LIKELY(x) __builtin_expect(!!(x), 1)
+# define EA_UNLIKELY(x) __builtin_expect(!!(x), 0)
+# endif
+# else
+# define EA_LIKELY(x) (x)
+# define EA_UNLIKELY(x) (x)
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_INIT_PRIORITY
+ //
+ // This is simply a wrapper for the GCC init_priority attribute that allows
+ // multiplatform code to be easier to read. This attribute doesn't apply
+ // to VC++ because VC++ uses file-level pragmas to control init ordering.
+ //
+ // Example usage:
+ // SomeClass gSomeClass EA_INIT_PRIORITY(2000);
+ //
+#if !defined(EA_INIT_PRIORITY)
+# if defined(__GNUC__)
+# define EA_INIT_PRIORITY(x) __attribute__ ((init_priority (x)))
+# else
+# define EA_INIT_PRIORITY(x)
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_MAY_ALIAS
+ //
+ // Defined as a macro that wraps the GCC may_alias attribute. This attribute
+ // has no significance for VC++ because VC++ doesn't support the concept of
+ // strict aliasing. Users should avoid writing code that breaks strict
+ // aliasing rules; EA_MAY_ALIAS is for cases with no alternative.
+ //
+ // Example usage:
+ // void* EA_MAY_ALIAS gPtr = NULL;
+ //
+ // Example usage:
+ // typedef void* EA_MAY_ALIAS pvoid_may_alias;
+ // pvoid_may_alias gPtr = NULL;
+ //
+#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303)
+# define EA_MAY_ALIAS __attribute__((__may_alias__))
+#elif defined(EA_COMPILER_CLANG) && __has_attribute(__may_alias__)
+# define EA_MAY_ALIAS __attribute__((__may_alias__))
+#else
+# define EA_MAY_ALIAS
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ASSUME
+ //
+ // This acts the same as the VC++ __assume directive and is implemented
+ // simply as a wrapper around it to allow portable usage of it and to take
+ // advantage of it if and when it appears in other compilers.
+ //
+ // Example usage:
+ // void Function(int a) {
+ // switch(a) {
+ // case 1:
+ // DoSomething(1);
+ // break;
+ // case 2:
+ // DoSomething(-1);
+ // break;
+ // default:
+ // EA_ASSUME(0); // This tells the optimizer that the default cannot be reached.
+ // }
+ // }
+ //
+#ifndef EA_ASSUME
+# if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later (including XBox, and XBox 360)...
+# define EA_ASSUME(x) __assume(x)
+# else
+# define EA_ASSUME(x)
+# endif
+#endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_PURE
+ //
+ // This acts the same as the GCC __attribute__ ((pure)) directive and is
+ // implemented simply as a wrapper around it to allow portable usage of
+ // it and to take advantage of it if and when it appears in other compilers.
+ //
+ // A "pure" function is one that has no effects except its return value and
+ // its return value is a function of only the function's parameters or
+ // non-volatile global variables. Any parameter or global variable access
+ // must be read-only. Loop optimization and subexpression elimination can be
+ // applied to such functions. A common example is strlen(): Given identical
+ // inputs, the function's return value (its only effect) is invariant across
+ // multiple invocations and thus can be pulled out of a loop and called but once.
+ //
+ // Example usage:
+ // EA_PURE void Function();
+ //
+#ifndef EA_PURE
+# if defined(EA_COMPILER_GNUC)
+# define EA_PURE __attribute__((pure))
+# elif defined(EA_COMPILER_CLANG) && __has_attribute(pure)
+# define EA_PURE __attribute__((pure))
+# elif defined(__ARMCC_VERSION) // Arm brand compiler for ARM CPU
+# define EA_PURE __pure
+# else
+# define EA_PURE
+# endif
+#endif
+
+
+
+ // ------------------------------------------------------------------------
+ // EA_WEAK
+ // EA_WEAK_SUPPORTED -- defined as 0 or 1.
+ //
+ // GCC
+ // The weak attribute causes the declaration to be emitted as a weak
+ // symbol rather than a global. This is primarily useful in defining
+ // library functions which can be overridden in user code, though it
+ // can also be used with non-function declarations.
+ //
+ // VC++
+ // At link time, if multiple definitions of a COMDAT are seen, the linker
+ // picks one and discards the rest. If the linker option /OPT:REF
+ // is selected, then COMDAT elimination will occur to remove all the
+ // unreferenced data items in the linker output.
+ //
+ // Example usage:
+ // EA_WEAK void Function();
+ //
+#ifndef EA_WEAK
+# if defined(_MSC_VER) && (_MSC_VER >= 1300) // If VC7.0 and later (including XBox)...
+# define EA_WEAK __declspec(selectany)
+# define EA_WEAK_SUPPORTED 1
+# elif defined(_MSC_VER) || (defined(__GNUC__) && defined(__CYGWIN__))
+# define EA_WEAK
+# define EA_WEAK_SUPPORTED 0
+# elif defined(__ARMCC_VERSION) // Arm brand compiler for ARM CPU
+# define EA_WEAK __weak
+# define EA_WEAK_SUPPORTED 1
+# else // GCC and IBM compilers, others.
+# define EA_WEAK __attribute__((weak))
+# define EA_WEAK_SUPPORTED 1
+# endif
+#endif
+
+
+
+ // ------------------------------------------------------------------------
+ // wchar_t
+ // Here we define:
+ // EA_WCHAR_T_NON_NATIVE
+ // EA_WCHAR_SIZE = <sizeof(wchar_t)>
+ //
+#ifndef EA_WCHAR_T_NON_NATIVE
+ // Compilers that always implement wchar_t as native include:
+ // COMEAU, new SN, and other EDG-based compilers.
+ // GCC
+ // Borland
+ // SunPro
+ // IBM Visual Age
+# if defined(EA_COMPILER_INTEL)
+# if (EA_COMPILER_VERSION < 700)
+# define EA_WCHAR_T_NON_NATIVE 1
+# else
+# if (!defined(_WCHAR_T_DEFINED) && !defined(_WCHAR_T))
+# define EA_WCHAR_T_NON_NATIVE 1
+# endif
+# endif
+# elif defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND)
+# ifndef _NATIVE_WCHAR_T_DEFINED
+# define EA_WCHAR_T_NON_NATIVE 1
+# endif
+# elif defined(EA_COMPILER_METROWERKS)
+# if !__option(wchar_type)
+# define EA_WCHAR_T_NON_NATIVE 1
+# endif
+# elif defined(__SNC__) && !defined(__cplusplus) // If compiling C under SNC...
+# define EA_WCHAR_T_NON_NATIVE 1
+# endif
+#endif
+
+#ifndef EA_WCHAR_SIZE // If the user hasn't specified that it is a given size...
+# if defined(__WCHAR_MAX__) // GCC defines this for most platforms.
+# if (__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 4294967295)
+# define EA_WCHAR_SIZE 4
+# elif (__WCHAR_MAX__ == 32767) || (__WCHAR_MAX__ == 65535)
+# define EA_WCHAR_SIZE 2
+# elif (__WCHAR_MAX__ == 127) || (__WCHAR_MAX__ == 255)
+# define EA_WCHAR_SIZE 1
+# else
+# define EA_WCHAR_SIZE 4
+# endif
+# elif defined(WCHAR_MAX) // The SN and Arm compilers define this.
+# if (WCHAR_MAX == 2147483647) || (WCHAR_MAX == 4294967295)
+# define EA_WCHAR_SIZE 4
+# elif (WCHAR_MAX == 32767) || (WCHAR_MAX == 65535)
+# define EA_WCHAR_SIZE 2
+# elif (WCHAR_MAX == 127) || (WCHAR_MAX == 255)
+# define EA_WCHAR_SIZE 1
+# else
+# define EA_WCHAR_SIZE 4
+# endif
+# elif defined(_WCMAX) // The SN and Arm compilers define this.
+# if (_WCMAX == 2147483647) || (_WCMAX == 4294967295)
+# define EA_WCHAR_SIZE 4
+# elif (_WCMAX == 32767) || (_WCMAX == 65535)
+# define EA_WCHAR_SIZE 2
+# elif (_WCMAX == 127) || (_WCMAX == 255)
+# define EA_WCHAR_SIZE 1
+# else
+# define EA_WCHAR_SIZE 4
+# endif
+# elif defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_PS3) || defined(EA_PLATFORM_PS3_SPU)
+ // It is standard on Unix to have wchar_t be int32_t or uint32_t.
+ // All versions of GNUC default to a 32 bit wchar_t, but has been used
+ // with the -fshort-wchar GCC command line option to force it to 16 bit.
+ // If you know that the compiler is set to use a wchar_t of other than
+ // the default, you need to manually define EA_WCHAR_SIZE for the build.
+# define EA_WCHAR_SIZE 4
+# else
+ // It is standard on Windows to have wchar_t be uint16_t.
+ // Metrowerks and the new EDG-based SN compilers define wchar_t
+ // as uint16_t. Given that there is currently no known way to tell at preprocessor
+ // time what the size of wchar_t is, we declare it to be 2.
+ // If you have EA_WCHAR_SIZE != sizeof(wchar_t), then your
+ // code might not be broken, but it also won't work with wchar libraries
+ // and data from other parts of EA. Under GCC, you can force wchar_t
+ // to two bytes with the -fshort-wchar compiler argument.
+# define EA_WCHAR_SIZE 2
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_RESTRICT
+ //
+ // The C99 standard defines a new keyword, restrict, which allows for the
+ // improvement of code generation regarding memory usage. Compilers can
+ // generate significantly faster code when you are able to use restrict.
+ //
+ // Example usage:
+ // void DoSomething(char* EA_RESTRICT p1, char* EA_RESTRICT p2);
+ //
+#ifndef EA_RESTRICT
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+# define EA_RESTRICT __restrict
+# elif defined(EA_COMPILER_GNUC)
+# define EA_RESTRICT __restrict // GCC defines 'restrict' (as opposed to __restrict) in C99 mode only.
+# elif defined(EA_COMPILER_CLANG)
+# define EA_RESTRICT __restrict
+# elif defined(EA_COMPILER_GHS)
+# define EA_RESTRICT restrict
+# elif defined(__ARMCC_VERSION)
+# define EA_RESTRICT __restrict
+# elif defined(__MWERKS__)
+# if __option(c99)
+# define EA_RESTRICT restrict
+# else
+# define EA_RESTRICT
+# endif
+# elif defined(EA_COMPILER_IS_C99)
+# define EA_RESTRICT restrict
+# else
+ // If the compiler didn't support restricted pointers, defining EA_RESTRICT
+ // away would result in compiling and running fine but you just wouldn't
+ // the same level of optimization. On the other hand, all the major compilers
+ // support restricted pointers.
+# define EA_RESTRICT
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_DEPRECATED // Used as a prefix.
+ // EA_PREFIX_DEPRECATED // You should need this only for unusual compilers.
+ // EA_POSTFIX_DEPRECATED // You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // EA_DEPRECATED void Function();
+ //
+ // or for maximum portability:
+ // EA_PREFIX_DEPRECATED void Function() EA_POSTFIX_DEPRECATED;
+ //
+#ifndef EA_DEPRECATED
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later...
+# define EA_DEPRECATED __declspec(deprecated)
+# elif defined(EA_COMPILER_MSVC)
+# define EA_DEPRECATED
+# else
+# define EA_DEPRECATED __attribute__((deprecated))
+# endif
+#endif
+
+#ifndef EA_PREFIX_DEPRECATED
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION > 1300) // If VC7 (VS2003) or later...
+# define EA_PREFIX_DEPRECATED __declspec(deprecated)
+# define EA_POSTFIX_DEPRECATED
+# elif defined(EA_COMPILER_MSVC)
+# define EA_PREFIX_DEPRECATED
+# define EA_POSTFIX_DEPRECATED
+# else
+# define EA_PREFIX_DEPRECATED
+# define EA_POSTFIX_DEPRECATED __attribute__((deprecated))
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_FORCE_INLINE // Used as a prefix.
+ // EA_PREFIX_FORCE_INLINE // You should need this only for unusual compilers.
+ // EA_POSTFIX_FORCE_INLINE // You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // EA_FORCE_INLINE void Foo(); // Implementation elsewhere.
+ // EA_PREFIX_FORCE_INLINE void Foo() EA_POSTFIX_FORCE_INLINE; // Implementation elsewhere.
+ //
+ // Note that when the prefix version of this function is used, it replaces
+ // the regular C++ 'inline' statement. Thus you should not use both the
+ // C++ inline statement and this macro with the same function declaration.
+ //
+ // To force inline usage under GCC 3.1+, you use this:
+ // inline void Foo() __attribute__((always_inline));
+ // or
+ // inline __attribute__((always_inline)) void Foo();
+ //
+ // The CodeWarrior compiler doesn't have the concept of forcing inlining per function.
+ //
+#ifndef EA_FORCE_INLINE
+# if defined(EA_COMPILER_MSVC)
+# define EA_FORCE_INLINE __forceinline
+# elif defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301)
+# if defined(__cplusplus)
+# define EA_FORCE_INLINE inline __attribute__((always_inline))
+# else
+# define EA_FORCE_INLINE __inline__ __attribute__((always_inline))
+# endif
+# elif defined(EA_COMPILER_CLANG) && __has_attribute(always_inline)
+# if defined(__cplusplus)
+# define EA_FORCE_INLINE inline __attribute__((always_inline))
+# else
+# define EA_FORCE_INLINE __inline__ __attribute__((always_inline))
+# endif
+# else
+# if defined(__cplusplus)
+# define EA_FORCE_INLINE inline
+# else
+# define EA_FORCE_INLINE __inline
+# endif
+# endif
+#endif
+
+#if defined(EA_COMPILER_SN) && defined(EA_PLATFORM_PS3) // SN's implementation of always_inline is broken and sometimes fails to link the function.
+# define EA_PREFIX_FORCE_INLINE inline
+# define EA_POSTFIX_FORCE_INLINE
+#elif defined(EA_COMPILER_GNUC) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 301)
+# define EA_PREFIX_FORCE_INLINE inline
+# define EA_POSTFIX_FORCE_INLINE __attribute__((always_inline))
+#else
+# define EA_PREFIX_FORCE_INLINE inline
+# define EA_POSTFIX_FORCE_INLINE
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_INLINE // Used as a prefix.
+ // EA_PREFIX_NO_INLINE // You should need this only for unusual compilers.
+ // EA_POSTFIX_NO_INLINE // You should need this only for unusual compilers.
+ //
+ // Example usage:
+ // EA_NO_INLINE void Foo(); // Implementation elsewhere.
+ // EA_PREFIX_NO_INLINE void Foo() EA_POSTFIX_NO_INLINE; // Implementation elsewhere.
+ //
+ // That this declaration is incompatbile with C++ 'inline' and any
+ // variant of EA_FORCE_INLINE.
+ //
+ // To disable inline usage under VC++ priof to VS2005, you need to use this:
+ // #pragma inline_depth(0) // Disable inlining.
+ // void Foo() { ... }
+ // #pragma inline_depth() // Restore to default.
+ //
+ // Since there is no easy way to disable inlining on a function-by-function
+ // basis in VC++ prior to VS2005, the best strategy is to write platform-specific
+ // #ifdefs in the code or to disable inlining for a given module and enable
+ // functions individually with EA_FORCE_INLINE.
+ //
+#ifndef EA_NO_INLINE
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+# define EA_NO_INLINE __declspec(noinline)
+# elif defined(EA_COMPILER_MSVC)
+# define EA_NO_INLINE
+# else
+# define EA_NO_INLINE __attribute__((noinline))
+# endif
+#endif
+
+#if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // If VC8 (VS2005) or later...
+# define EA_PREFIX_NO_INLINE __declspec(noinline)
+# define EA_POSTFIX_NO_INLINE
+#elif defined(EA_COMPILER_MSVC)
+# define EA_PREFIX_NO_INLINE
+# define EA_POSTFIX_NO_INLINE
+#else
+# define EA_PREFIX_NO_INLINE
+# define EA_POSTFIX_NO_INLINE __attribute__((noinline))
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_NO_VTABLE
+ //
+ // Example usage:
+ // class EA_NO_VTABLE X {
+ // virtual void InterfaceFunction();
+ // };
+ //
+ // EA_CLASS_NO_VTABLE(X) {
+ // virtual void InterfaceFunction();
+ // };
+ //
+#ifdef EA_COMPILER_MSVC
+# define EA_NO_VTABLE __declspec(novtable)
+# define EA_CLASS_NO_VTABLE(x) class __declspec(novtable) x
+# define EA_STRUCT_NO_VTABLE(x) struct __declspec(novtable) x
+#else
+# define EA_NO_VTABLE
+# define EA_CLASS_NO_VTABLE(x) class x
+# define EA_STRUCT_NO_VTABLE(x) struct x
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_PASCAL
+ //
+ // Also known on PC platforms as stdcall.
+ // This convention causes the compiler to assume that the called function
+ // will pop off the stack space used to pass arguments, unless it takes a
+ // variable number of arguments.
+ //
+ // Example usage:
+ // this:
+ // void DoNothing(int x);
+ // void DoNothing(int x){}
+ // would be written as this:
+ // void EA_PASCAL_FUNC(DoNothing(int x));
+ // void EA_PASCAL_FUNC(DoNothing(int x)){}
+ //
+#ifndef EA_PASCAL
+# if defined(EA_COMPILER_MSVC)
+# define EA_PASCAL __stdcall
+# elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86)
+# define EA_PASCAL __attribute__((stdcall))
+# elif defined(EA_COMPILER_METROWERKS) && defined(EA_PLATFORM_WINDOWS)
+ // You need to make sure you have the Metrowerks "ANSI keywords only'
+ // compilation option disabled for the pascal keyword to work.
+# define EA_PASCAL pascal
+# else
+ // Some compilers simply don't support pascal calling convention.
+ // As a result, there isn't an issue here, since the specification of
+ // pascal calling convention is for the purpose of disambiguating the
+ // calling convention that is applied.
+# define EA_PASCAL
+# endif
+#endif
+
+#ifndef EA_PASCAL_FUNC
+# if defined(EA_COMPILER_MSVC)
+# define EA_PASCAL_FUNC(funcname_and_paramlist) __stdcall funcname_and_paramlist
+# elif defined(EA_COMPILER_GNUC) && defined(EA_PROCESSOR_X86)
+# define EA_PASCAL_FUNC(funcname_and_paramlist) __attribute__((stdcall)) funcname_and_paramlist
+# elif defined(EA_COMPILER_METROWERKS) && defined(EA_PLATFORM_WINDOWS)
+# define EA_PASCAL_FUNC(funcname_and_paramlist) pascal funcname_and_paramlist
+# else
+# define EA_PASCAL_FUNC(funcname_and_paramlist) funcname_and_paramlist
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_SSE
+ // Visual C Processor Packs define _MSC_FULL_VER and are needed for SSE
+ // Intel C also has SSE support.
+ // EA_SSE is used to select FPU or SSE versions in hw_select.inl
+#ifndef EA_SSE
+# if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_CLANG)
+# if defined(__SSE2__)
+# define EA_SSE 2
+# elif defined(__SSE__) && __SSE__
+# define EA_SSE 1
+# else
+# define EA_SSE 0
+# endif
+# elif defined(EA_PROCESSOR_X86) && defined(_MSC_FULL_VER) && !defined(__NOSSE__) && defined(_M_IX86_FP)
+# define EA_SSE _M_IX86_FP
+# elif defined(EA_PROCESSOR_X86) && defined(EA_COMPILER_INTEL) && !defined(__NOSSE__)
+# define EA_SSE 1
+# else
+# define EA_SSE 0
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_IMPORT
+ // import declaration specification
+ // specifies that the declared symbol is imported from another dynamic library.
+#ifndef EA_IMPORT
+# if defined(EA_COMPILER_MSVC)
+# define EA_IMPORT __declspec(dllimport)
+# else
+# define EA_IMPORT
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_EXPORT
+ // export declaration specification
+ // specifies that the declared symbol is exported from the current dynamic library.
+ // this is not the same as the C++ export keyword.
+#ifndef EA_EXPORT
+# if defined(EA_COMPILER_MSVC)
+# define EA_EXPORT __declspec(dllexport)
+# else
+# define EA_EXPORT
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_PRAGMA_ONCE_SUPPORTED
+ //
+ // This is a wrapper for the #pragma once preprocessor directive.
+ // It allows for some compilers (in particular VC++) to implement signifcantly
+ // faster include file preprocessing. #pragma once can be used to replace
+ // header include guards or to augment them. However, #pragma once isn't
+ // necessarily supported by all compilers and isn't guaranteed to be so in
+ // the future, so using #pragma once to replace traditional include guards
+ // is not strictly portable. Note that a direct #define for #pragma once is
+ // impossible with VC++, due to limitations, but can be done with other
+ // compilers/preprocessors via _Pragma("once").
+ //
+ // Example usage (which includes traditional header guards for portability):
+ // #ifndef SOMEPACKAGE_SOMEHEADER_H
+ // #define SOMEPACKAGE_SOMEHEADER_H
+ //
+ // #if defined(EA_PRAGMA_ONCE_SUPPORTED)
+ // #pragma once
+ // #endif
+ //
+ // <user code>
+ //
+ // #endif
+ //
+#if defined(_MSC_VER) || defined(__MWERKS__) || defined(__GNUC__) || defined(__SNC__) || defined(__ICC) || defined(__ICL) || defined(__clang__)
+# define EA_PRAGMA_ONCE_SUPPORTED 1
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_OVERRIDE
+ //
+ // See http://msdn.microsoft.com/en-us/library/41w3sh1c.aspx for more information.
+ //
+#ifndef EA_OVERRIDE
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later
+# define EA_OVERRIDE override
+# else
+# define EA_OVERRIDE
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_SEALED
+ //
+ // See http://msdn.microsoft.com/en-us/library/49k3w2fx%28VS.71%29.aspx for more information.
+ //
+#ifndef EA_SEALED
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later
+# define EA_SEALED sealed
+# else
+# define EA_SEALED
+# endif
+#endif
+
+
+ // ------------------------------------------------------------------------
+ // EA_ABSTRACT
+ //
+ // See http://msdn.microsoft.com/en-us/library/49k3w2fx%28VS.71%29.aspx for more information.
+ //
+#ifndef EA_ABSTRACT
+# if defined(EA_COMPILER_MSVC) && (EA_COMPILER_VERSION >= 1400) // VS2005 (VC8) and later
+# define EA_ABSTRACT abstract
+# else
+# define EA_ABSTRACT
+# endif
+#endif
+
+
+#endif // Header include guard
diff --git a/UnknownVersion/include/EABase/config/eaplatform.h b/UnknownVersion/include/EABase/config/eaplatform.h
new file mode 100644
index 0000000..c79e2ad
--- /dev/null
+++ b/UnknownVersion/include/EABase/config/eaplatform.h
@@ -0,0 +1,571 @@
+/*
+Copyright (C) 2009 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*-----------------------------------------------------------------------------
+ * config/eaplatform.h
+ *
+ * Copyright (c) 2002 - 2005 Electronic Arts Inc. All rights reserved.
+ * Maintained by Paul Pedriana, Maxis
+ *
+ *-----------------------------------------------------------------------------
+ * Currently supported platform indentification defines include:
+ * EA_PLATFORM_PS3
+ * EA_PLATFORM_PS3_PPU
+ * EA_PLATFORM_PS3_SPU
+ * EA_PLATFORM_XENON (a.k.a. XBox2)
+ * EA_PLATFORM_MAC
+ * EA_PLATFORM_OSX
+ * EA_PLATFORM_LINUX
+ * EA_PLATFORM_WINDOWS
+ * EA_PLATFORM_WIN32
+ * EA_PLATFORM_WIN64
+ * EA_PLATFORM_HPUX
+ * EA_PLATFORM_SUN
+ * EA_PLATFORM_LRB (Larrabee)
+ * EA_PLATFORM_UNIX (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX)
+ * EA_PLATFORM_CYGWIN (pseudo-platform; may be defined along with another platform like EA_PLATFORM_LINUX)
+ * EA_PLATFORM_MINGW (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS)
+ * EA_PLATFORM_MICROSOFT (pseudo-platform; may be defined along with another platform like EA_PLATFORM_WINDOWS)
+ *
+ * Other definitions emanated from this file inclue:
+ * EA_PLATFORM_NAME = <string>
+ * EA_PLATFORM_DESCRIPTION = <string>
+ * EA_PROCESSOR_XXX
+ * EA_SYSTEM_LITTLE_ENDIAN | EA_SYSTEM_BIG_ENDIAN
+ * EA_ASM_STYLE_ATT | EA_ASM_STYLE_INTEL | EA_ASM_STYLE_MOTOROLA
+ * EA_PLATFORM_PTR_SIZE = <integer size in bytes>
+ * EA_PLATFORM_WORD_SIZE = <integer size in bytes>
+ *
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eaplatform_H
+#define INCLUDED_eaplatform_H
+
+
+// Cygwin
+// This is a pseudo-platform which will be defined along with EA_PLATFORM_LINUX when
+// using the Cygwin build environment.
+#if defined(__CYGWIN__)
+ #define EA_PLATFORM_CYGWIN
+ #define EA_PLATFORM_DESKTOP
+#endif
+
+// MinGW
+// This is a pseudo-platform which will be defined along with EA_PLATFORM_WINDOWS when
+// using the MinGW Windows build environment.
+#if defined(__MINGW32__) || defined(__MINGW64__)
+ #define EA_PLATFORM_MINGW
+ #define EA_PLATFORM_DESKTOP
+#endif
+
+// PlayStation 3 PPU (Primary Processing Unit)
+#if defined(EA_PLATFORM_PS3_PPU) || defined(EA_PLATFORM_PS3) || defined(__PU__) || defined(__PPU__)
+ #undef EA_PLATFORM_PS3_PPU
+ #define EA_PLATFORM_PS3_PPU 1
+ #undef EA_PLATFORM_PS3
+ #define EA_PLATFORM_PS3 1
+ #define EA_PLATFORM_NAME "PS3"
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_64
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "PS3 on PowerPC"
+ #define EA_PLATFORM_CONSOLE
+
+// PlayStation 3 SPU (Synergistic Processing Unit)
+#elif defined(EA_PLATFORM_PS3_SPU) || defined(__SPU__)
+ #undef EA_PLATFORM_PS3_SPU
+ #define EA_PLATFORM_PS3_SPU 1
+ #define EA_PLATFORM_NAME "PS3 SPU"
+ #define EA_PROCESSOR_SPU
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "PS3 SPU on SPU"
+ #define EA_PLATFORM_CONSOLE
+
+// XBox
+// _XBOX is defined by the VC++ project, not the compiler. There is no way
+// to tell if the compiler is compiling for XBox unless _XBOX is #defined
+// in the project files or otherwise. _M_IX86 is the VC++ way of detecting
+// an x86 target, which would mean XBox and not Xenon (a.k.a. XBox2).
+#elif defined(EA_PLATFORM_XBOX) || (defined(_XBOX) && defined(_M_IX86))
+ #undef EA_PLATFORM_XBOX
+ #define EA_PLATFORM_XBOX 1
+ #define EA_PLATFORM_NAME "XBox"
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "XBox on X86"
+ #if defined(_MSC_VER) || defined(__ICL)
+ #define EA_ASM_STYLE_INTEL
+ #endif
+ #define EA_PLATFORM_CONSOLE
+
+// Xenon (XBox 360)
+// The Xenon compiler doesn't define anything in particular to indicate that the
+// target is the Xenon platform. The Xenon SDK, however, expects that XBOX and
+// _XBOX are #defined, so the project build file must make sure these are defined.
+// Since the Xenon compiler in fact defines _M_PPC, we can use this information
+// to infer that Xenon is the target if neither _XENON nor _XBOX2 are specifically
+// defined by the project build file.
+#elif defined(EA_PLATFORM_XENON) || defined(_XENON) || defined(_XBOX2) || ((defined(_XBOX) || defined(XBOX)) && defined(_M_PPC))
+ #undef EA_PLATFORM_XENON
+ #define EA_PLATFORM_XENON 1
+ #define EA_PLATFORM_NAME "Xenon"
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_64
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Xenon on PowerPC"
+ #if defined(_MSC_VER) || defined(__ICL)
+ #define EA_ASM_STYLE_INTEL
+ #endif
+ #define EA_PLATFORM_CONSOLE
+ #define EA_PLATFORM_MICROSOFT 1
+
+// Larrabee // This part to be removed once __LRB__ is supported by the Larrabee compiler in 2009.
+#elif defined(EA_PLATFORM_LRB) || defined(__LRB__) || (defined(__EDG__) && defined(__ICC) && defined(__x86_64__))
+ #undef EA_PLATFORM_LRB
+ #define EA_PLATFORM_LRB 1
+ #define EA_PLATFORM_NAME "Larrabee"
+ #define EA_PLATFORM_DESCRIPTION "Larrabee on LRB1"
+ #define EA_PROCESSOR_X86_64
+ #if defined(BYTE_ORDER) && (BYTE_ORDER == 4321)
+ #define EA_SYSTEM_BIG_ENDIAN
+ #else
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #endif
+ #define EA_PROCESSOR_LRB
+ #define EA_PROCESSOR_LRB1 // Larrabee version 1
+ #define EA_ASM_STYLE_ATT // Both types of asm style
+ #define EA_ASM_STYLE_INTEL // are supported.
+ #define EA_PLATFORM_DESKTOP
+
+// Android (Google phone OS)
+#elif defined(EA_PLATFORM_ANDROID) || defined(__ANDROID__)
+ #undef EA_PLATFORM_ANDROID
+ #define EA_PLATFORM_ANDROID 1
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "Android"
+ #define EA_ASM_STYLE_ATT
+ #if defined(__arm__) || defined(__aarch64__)
+ #define EA_PROCESSOR_ARM
+ #define EA_PLATFORM_DESCRIPTION "Android on ARM"
+ #else
+ #error Unknown processor
+ #endif
+ #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+// Palm OS for Mobile (Linux variant)
+#elif defined(EA_PLATFORM_PALM)
+ #undef EA_PLATFORM_PALM
+ #define EA_PLATFORM_PALM 1
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "Palm"
+ #define EA_POSIX_THREADS_AVAILABLE 1
+ #define EA_ASM_STYLE_ATT
+ #if defined(__arm__)
+ #define EA_PROCESSOR_ARM
+ #define EA_PLATFORM_DESCRIPTION "Palm on ARM"
+ #else
+ #error Unknown processor
+ #endif
+ #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+// Airplay
+#elif defined(EA_PLATFORM_AIRPLAY) || defined(__S3E__)
+ #undef EA_PLATFORM_AIRPLAY
+ #define EA_PLATFORM_AIRPLAY
+ #define EA_PLATFORM_NAME "Airplay"
+ #if defined(__arm__)
+ #define EA_PROCESSOR_ARM
+ #define EA_PLATFORM_DESCRIPTION "Airplay on ARM"
+ #define EA_ASM_STYLE_ATT
+ #undef _MSC_VER
+ #elif defined(_M_IX86)
+ #define EA_PROCESSOR_X86
+ #define EA_PLATFORM_DESCRIPTION "Airplay on x86"
+ #define EA_ASM_STYLE_INTEL
+ #else
+ #error Unknown processor
+ #endif
+ #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #if defined(HAVE_BIG_ENDIAN) || (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && (__BYTE_ORDER == __BIG_ENDIAN)))
+ #define EA_SYSTEM_BIG_ENDIAN
+ #else
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #endif
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+// Samsung Bada OS for Mobile (Linux variant)
+#elif defined(EA_PLATFORM_BADA)
+ #undef EA_PLATFORM_BADA
+ #define EA_PLATFORM_BADA 1
+ //#define EA_PLATFORM_LINUX 1 // The underlying OS is Linux, but the app mostly doesn't see this.
+ //#define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "bada"
+ #define EA_ASM_STYLE_ATT
+ #if defined(__arm__)
+ #define EA_PROCESSOR_ARM
+ #define EA_PLATFORM_DESCRIPTION "bada on ARM"
+ #elif defined(__i386__)
+ #define EA_PLATFORM_BADA_SIMULATOR
+ #define EA_PROCESSOR_X86
+ #define EA_PLATFORM_DESCRIPTION "bada simulator on x86"
+ #else
+ #error Unknown processor
+ #endif
+ #if !defined(EA_SYSTEM_BIG_ENDIAN) && !defined(EA_SYSTEM_LITTLE_ENDIAN)
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+#elif defined(__APPLE__) && __APPLE__
+ #include <TargetConditionals.h>
+
+ // Apple family of operating systems.
+ #define EA_PLATFORM_APPLE
+
+ // iPhone
+ // TARGET_OS_IPHONE will be undefined on an unknown compiler, and will be defined on gcc.
+ #if defined(EA_PLATFORM_IPHONE) || defined(__IPHONE__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR)
+ #undef EA_PLATFORM_IPHONE
+ #define EA_PLATFORM_IPHONE 1
+ #define EA_PLATFORM_NAME "iPhone"
+ #define EA_ASM_STYLE_ATT
+ #define EA_POSIX_THREADS_AVAILABLE 1
+ #if defined(__arm__)
+ #define EA_PROCESSOR_ARM
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "iPhone on ARM"
+ #elif defined(__i386__)
+ #define EA_PLATFORM_IPHONE_SIMULATOR
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "iPhone simulator on x86"
+ #else
+ #error Unknown processor
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+ // Macintosh OSX
+ // TARGET_OS_MAC is defined by the Metrowerks and older AppleC compilers.
+ // Howerver, TARGET_OS_MAC is defined to be 1 in all cases.
+ // __i386__ and __intel__ are defined by the GCC compiler.
+ // __dest_os is defined by the Metrowerks compiler.
+ // __MACH__ is defined by the Metrowerks and GCC compilers.
+ // powerc and __powerc are defined by the Metrowerks and GCC compilers.
+ #elif defined(EA_PLATFORM_OSX) || defined(__MACH__) || (defined(__MSL__) && (__dest_os == __mac_os_x))
+ #undef EA_PLATFORM_OSX
+ #define EA_PLATFORM_OSX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "OSX"
+ #if defined(__i386__) || defined(__intel__)
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "OSX on x86"
+ #elif defined(__x86_64) || defined(__amd64)
+ #define EA_PROCESSOR_X86_64
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "OSX on x86-64"
+ #elif defined(__arm__)
+ #define EA_PROCESSOR_ARM
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "OSX on ARM"
+ #elif defined(__POWERPC64__) || defined(__powerpc64__)
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_64
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC 64"
+ #elif defined(__POWERPC__) || defined(__powerpc__)
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_32
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "OSX on PowerPC"
+ #else
+ #error Unknown processor
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT
+ #else
+ #define EA_ASM_STYLE_MOTOROLA
+ #endif
+ #define EA_PLATFORM_DESKTOP
+
+ #else
+ #error Unknown Apple Platform
+ #endif
+
+// Linux
+// __linux and __linux__ are defined by the GCC and Borland compiler.
+// __i386__ and __intel__ are defined by the GCC compiler.
+// __i386__ is defined by the Metrowerks compiler.
+// _M_IX86 is defined by the Borland compiler.
+// __sparc__ is defined by the GCC compiler.
+// __powerpc__ is defined by the GCC compiler.
+#elif defined(EA_PLATFORM_LINUX) || (defined(__linux) || defined(__linux__))
+ #undef EA_PLATFORM_LINUX
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "Linux"
+ #if defined(__i386__) || defined(__intel__) || defined(_M_IX86)
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Linux on x86"
+ #elif defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Linux on x86-64"
+ #elif defined(__powerpc64__)
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_64
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC 64"
+ #elif defined(__powerpc__)
+ #define EA_PROCESSOR_POWERPC
+ #define EA_PROCESSOR_POWERPC_32
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Linux on PowerPC"
+ #elif defined(__arm__) || defined(__aarch64__)
+ #define EA_PROCESSOR_ARM
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Linux on ARM"
+ #else
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT
+ #endif
+ #define EA_PLATFORM_DESKTOP
+
+// Win CE (Windows mobile)
+#elif defined(EA_PLATFORM_WINCE) || defined(_WIN32_WCE)
+ #undef EA_PLATFORM_WINCE
+ #define EA_PLATFORM_WINCE 1
+ #define EA_PLATFORM_NAME "WinCE"
+ #define EA_ASM_STYLE_INTEL
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #if defined(_M_ARM) // Also there is _M_ARMT
+ #define EA_PROCESSOR_ARM
+ #define EA_PLATFORM_DESCRIPTION "Windows CE on ARM"
+ #elif defined(_M_IX86)
+ #define EA_PROCESSOR_X86
+ #define EA_PLATFORM_DESCRIPTION "Windows CE on X86"
+ #else //Possibly other Windows CE variants
+ #error Unknown processor
+ #endif
+ #define EA_PLATFORM_MOBILE
+
+// Windows
+// _WIN32 is defined by the VC++, Intel and GCC compilers.
+// _WIN64 is defined by the VC++, Intel and GCC compilers.
+// __WIN32__ is defined by the Borland compiler.
+// __INTEL__ is defined by the Metrowerks compiler.
+// _M_IX86, _M_AMD64 and _M_IA64 are defined by the VC++, Intel, and Borland compilers.
+// _X86_, _AMD64_, and _IA64_ are defined by the Metrowerks compiler.
+// _M_ARM is defined by the VC++ compiler.
+#elif (defined(EA_PLATFORM_WINDOWS) || (defined(_WIN32) || defined(__WIN32__) || defined(_WIN64) || (defined(__MWERKS__) && defined(_X86_)))) && !defined(_XBOX)
+ #undef EA_PLATFORM_WINDOWS
+ #define EA_PLATFORM_WINDOWS 1
+ #define EA_PLATFORM_NAME "Windows"
+ #ifdef _WIN64 // VC++ defines both _WIN32 and _WIN64 when compiling for Win64.
+ #define EA_PLATFORM_WIN64
+ #else
+ #define EA_PLATFORM_WIN32
+ #endif
+ #if defined(_M_AMD64) || defined(_AMD64_) || defined(__x86_64__)
+ #define EA_PROCESSOR_X86_64
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Windows on X86-64"
+ #elif defined(_M_IX86) || defined(_X86_)
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Windows on X86"
+ #elif defined(_M_IA64) || defined(_IA64_)
+ #define EA_PROCESSOR_IA64
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Windows on IA-64"
+ #elif defined(_M_ARM)
+ #define EA_PROCESSOR_ARM
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "Windows CE on ARM"
+ #else //Possibly other Windows CE variants
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #if defined(__GNUC__)
+ #define EA_ASM_STYLE_ATT
+ #elif defined(_MSC_VER) || defined(__BORLANDC__) || defined(__ICL)
+ #define EA_ASM_STYLE_INTEL
+ #endif
+ #define EA_PLATFORM_DESKTOP
+ #define EA_PLATFORM_MICROSOFT 1
+
+// Sun (Solaris)
+// __SUNPRO_CC is defined by the Sun compiler.
+// __sun is defined by the GCC compiler.
+// __i386 is defined by the Sun and GCC compilers.
+// __sparc is defined by the Sun and GCC compilers.
+#elif defined(EA_PLATFORM_SUN) || (defined(__SUNPRO_CC) || defined(__sun))
+ #undef EA_PLATFORM_SUN
+ #define EA_PLATFORM_SUN 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "SUN"
+ #if defined(__i386)
+ #define EA_PROCESSOR_X86
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "SUN on x86"
+ #elif defined(__sparc)
+ #define EA_PROCESSOR_SPARC
+ #define EA_SYSTEM_BIG_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "SUN on Sparc"
+ #else
+ #error Unknown processor
+ #error Unknown endianness
+ #endif
+ #define EA_PLATFORM_DESKTOP
+
+#elif defined(__ghs__)
+ #undef EA_PLATFORM_INTEGRITY
+ #define EA_PLATFORM_INTEGRITY 1
+ #undef EA_PLATFORM_LINUX
+ #define EA_PLATFORM_LINUX 1
+ #define EA_PLATFORM_UNIX 1
+ #define EA_PLATFORM_NAME "INTEGRITY"
+ #define EA_PROCESSOR_ARM
+ #define EA_SYSTEM_LITTLE_ENDIAN
+ #define EA_PLATFORM_DESCRIPTION "INTEGRITY on ARM"
+ #define EA_PLATFORM_DESKTOP
+
+#else
+ #error Unknown platform
+ #error Unknown processor
+ #error Unknown endianness
+#endif
+
+
+
+// EA_PLATFORM_PTR_SIZE
+// Platform pointer size; same as sizeof(void*).
+// This is not the same as sizeof(int), as int is usually 32 bits on
+// even 64 bit platforms.
+//
+// _WIN64 is defined by Win64 compilers, such as VC++.
+// _M_IA64 is defined by VC++ and Intel compilers for IA64 processors.
+// __LP64__ is defined by HP compilers for the LP64 standard.
+// _LP64 is defined by the GCC and Sun compilers for the LP64 standard.
+// __ia64__ is defined by the GCC compiler for IA64 processors.
+// __arch64__ is defined by the Sparc compiler for 64 bit processors.
+// __mips64__ is defined by the GCC compiler for MIPS processors.
+// __powerpc64__ is defined by the GCC compiler for PowerPC processors.
+// __64BIT__ is defined by the AIX compiler for 64 bit processors.
+// __sizeof_ptr is defined by the ARM compiler (armcc, armcpp).
+//
+#ifndef EA_PLATFORM_PTR_SIZE
+ #if defined(__WORDSIZE) // Defined by some variations of GCC.
+ #define EA_PLATFORM_PTR_SIZE ((__WORDSIZE) / 8)
+ #elif defined(_WIN64) || defined(__LP64__) || defined(_LP64) || defined(_M_IA64) || defined(__ia64__) || defined(__arch64__) || defined(__mips64__) || defined(__64BIT__)
+ #define EA_PLATFORM_PTR_SIZE 8
+ #elif defined(__CC_ARM) && (__sizeof_ptr == 8)
+ #define EA_PLATFORM_PTR_SIZE 8
+ #else
+ #define EA_PLATFORM_PTR_SIZE 4
+ #endif
+#endif
+
+
+
+// EA_PLATFORM_WORD_SIZE
+// This defines the size of a machine word. This will be the same as
+// the size of registers on the machine but not necessarily the same
+// as the size of pointers on the machine. A number of 64 bit platforms
+// have 64 bit registers but 32 bit pointers.
+//
+#ifndef EA_PLATFORM_WORD_SIZE
+ #if defined(EA_PLATFORM_XENON) || defined(EA_PLATFORM_PS3)
+ #define EA_PLATFORM_WORD_SIZE 8
+ #else
+ #define EA_PLATFORM_WORD_SIZE EA_PLATFORM_PTR_SIZE
+ #endif
+#endif
+
+
+
+// Disabled until and unless deemed useful:
+//
+// Platform integer types
+// These definitions allow us to define other things properly, such as
+// sized integer types. In order to bring some order to this chaos,
+// we follow a variation of the standard LP64 conventions defined at:
+// http://www.opengroup.org/public/tech/aspen/lp64_wp.htm
+//
+// #if defined(EA_PLATFORM_LINUX) || defined(EA_PLATFORM_OSX) || defined(EA_PLATFORM_XBOX) || defined(EA_PLATFORM_XENON)
+// #define EA_PLATFORM_ILP32_LL64 // int, long, ptr = 32 bits; long long = 64 bits.
+//
+// #elif defined(EA_PLATFORM_SUN) || defined(EA_PLATFORM_SGI)
+// #if (EA_PLATFORM_WORD_SIZE == 32)
+// #define ILP32_LL64 // int, long, ptr = 32 bits; long long = 64 bits.
+// #else // 64 bit platform
+// #define EA_PLATFORM_I32_LLLP64 // int = 32 bits; long, long long, ptr = 64 bits.
+// #endif
+//
+// #elif defined(EA_PLATFORM_WINDOWS)
+// #if (EA_PLATFORM_WORD_SIZE == 32)
+// #define ILP32_LL64 // int, long, ptr = 32 bits; long long = 64 bits.
+// #else // 64 bit platform
+// #if defined(__MWERKS__) || defined(__GNUC__)
+// #define EA_PLATFORM_I32_LLLP64 // int = 32 bits; long, long long, ptr = 64 bits.
+// #else // MSVC
+// #define EA_PLATFORM_IL32_LLP64 // int, long = 32 bits; long long, ptr = 64 bits.
+// #endif
+// #endif
+// #endif
+
+
+#endif // INCLUDED_eaplatform_H
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EABase/eabase.h b/UnknownVersion/include/EABase/eabase.h
new file mode 100644
index 0000000..5ba11cc
--- /dev/null
+++ b/UnknownVersion/include/EABase/eabase.h
@@ -0,0 +1,884 @@
+/*
+Copyright (C) 2009 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*-----------------------------------------------------------------------------
+ * eabase.h
+ *
+ * Copyright (c) 2002 - 2005 Electronic Arts Inc. All rights reserved.
+ * Maintained by Paul Pedriana, Maxis
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_eabase_H
+#define INCLUDED_eabase_H
+
+
+// Identify the compiler and declare the EA_COMPILER_xxxx defines
+#ifndef INCLUDED_eacompiler_H
+# include "EABase/config/eacompiler.h"
+#endif
+
+// Identify traits which this compiler supports, or does not support
+#ifndef INCLUDED_eacompilertraits_H
+# include "EABase/config/eacompilertraits.h"
+#endif
+
+// Identify the platform and declare the EA_xxxx defines
+#ifndef INCLUDED_eaplatform_H
+# include "EABase/config/eaplatform.h"
+#endif
+
+# if defined(_MSC_VER) && (_MSC_VER >= 1600)
+#include <yvals.h>
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EABASE_VERSION
+//
+// We more or less follow the conventional EA packaging approach to versioning
+// here. A primary distinction here is that minor versions are defined as two
+// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic
+// here is that the value is a counter and not a floating point fraction.
+// Note that the major version doesn't have leading zeros.
+//
+// Example version strings:
+// "0.91.00" // Major version 0, minor version 91, patch version 0.
+// "1.00.00" // Major version 1, minor and patch version 0.
+// "3.10.02" // Major version 3, minor version 10, patch version 02.
+// "12.03.01" // Major version 12, minor version 03, patch version
+//
+// Example usage:
+// printf("EABASE version: %s", EABASE_VERSION);
+// printf("EABASE version: %d.%d.%d", EABASE_VERSION_N / 10000 % 100, EABASE_VERSION_N / 100 % 100, EABASE_VERSION_N % 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EABASE_VERSION
+# define EABASE_VERSION "2.00.22"
+# define EABASE_VERSION_N 20022
+#endif
+
+
+
+// ------------------------------------------------------------------------
+// The C++ standard defines size_t as a built-in type. Some compilers are
+// not standards-compliant in this respect, so we need an additional include.
+// The case is similar with wchar_t under C++.
+
+#if defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_MSVC) || defined(EA_WCHAR_T_NON_NATIVE)
+# include <stddef.h>
+#endif
+
+
+// ------------------------------------------------------------------------
+// Ensure this header file is only processed once (with certain compilers)
+// GCC doesn't need such a pragma because it has special recognition for
+// include guards (such as that above) and effectively implements the same
+// thing without having to resort to non-portable pragmas. It is possible
+// that the decision to use pragma once here is ill-advised, perhaps because
+// some compilers masquerade as MSVC but don't implement all features.
+#if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_METROWERKS)
+# pragma once
+#endif
+
+
+// ------------------------------------------------------------------------
+// By default, GCC on certain platforms defines NULL as ((void*)0), which is the
+// C definition. This causes all sort of problems for C++ code, so it is
+// worked around by undefining NULL.
+
+#if defined(NULL)
+# undef NULL
+#endif
+
+
+// ------------------------------------------------------------------------
+// Define the NULL pointer. This is normally defined in <stddef.h>, but we
+// don't want to force a global dependency on that header, so the definition
+// is duplicated here.
+
+#if defined(__cplusplus)
+# define NULL 0
+#else
+# define NULL ((void*)0)
+#endif
+
+
+// ------------------------------------------------------------------------
+// C98/99 Standard typedefs. From the ANSI ISO/IEC 9899 standards document
+// Most recent versions of the gcc-compiler come with these defined in
+// inttypes.h or stddef.h. Determining if they are predefined can be
+// tricky, so we expect some problems on non-standard compilers
+
+// ------------------------------------------------------------------------
+// We need to test this after we potentially include stddef.h, otherwise we
+// would have put this into the compilertraits header.
+#if !defined(EA_COMPILER_HAS_INTTYPES) && (!defined(_MSC_VER) || (_MSC_VER > 1500)) && (defined(EA_COMPILER_IS_C99) || defined(INT8_MIN) || defined(EA_COMPILER_HAS_C99_TYPES) || defined(_SN_STDINT_H))
+# define EA_COMPILER_HAS_INTTYPES
+#endif
+
+
+#ifdef EA_COMPILER_HAS_INTTYPES // If the compiler supports inttypes...
+ // ------------------------------------------------------------------------
+ // Include the stdint header to define and derive the required types.
+ // Additionally include inttypes.h as many compilers, including variations
+ // of GCC define things in inttypes.h that the C99 standard says goes
+ // in stdint.h.
+ //
+ // The C99 standard specifies that inttypes.h only define printf/scanf
+ // format macros if __STDC_FORMAT_MACROS is defined before #including
+ // inttypes.h. For consistency, we do that here.
+# ifndef __STDC_FORMAT_MACROS
+# define __STDC_FORMAT_MACROS
+# endif
+# if !defined(__psp__) && defined(__GNUC__) // The GCC compiler defines standard int types (e.g. uint32_t) but not PRId8, etc.
+# include <inttypes.h> // PRId8, SCNd8, etc.
+# endif
+# include <stdint.h> // int32_t, INT64_C, UINT8_MAX, etc.
+# include <math.h> // float_t, double_t, etc.
+# include <float.h> // FLT_EVAL_METHOD.
+
+# if !defined(FLT_EVAL_METHOD) && (defined(__FLT_EVAL_METHOD__) || defined(_FEVAL)) // GCC 3.x defines __FLT_EVAL_METHOD__ instead of the C99 standard FLT_EVAL_METHOD.
+# ifdef __FLT_EVAL_METHOD__
+# define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+# else
+# define FLT_EVAL_METHOD _FEVAL
+# endif
+# endif
+
+ // MinGW GCC (up to at least v4.3.0-20080502) mistakenly neglects to define float_t and double_t.
+ // This appears to be an acknowledged bug as of March 2008 and is scheduled to be fixed.
+ // Similarly, Android uses a mix of custom standard library headers which don't define float_t and double_t.
+# if defined(__MINGW32__) || defined(EA_PLATFORM_ANDROID)
+# if defined(__FLT_EVAL_METHOD__)
+# if(__FLT_EVAL_METHOD__== 0)
+ typedef float float_t;
+ typedef double double_t;
+# elif(__FLT_EVAL_METHOD__ == 1)
+ typedef double float_t;
+ typedef double double_t;
+# elif(__FLT_EVAL_METHOD__ == 2)
+ typedef long double float_t;
+ typedef long double double_t;
+# endif
+# else
+ typedef float float_t;
+ typedef double double_t;
+# endif
+# endif
+
+ // Airplay's pretty broken for these types (at least as of 4.1)
+# if defined __S3E__
+
+ typedef float float_t;
+ typedef double double_t;
+
+# undef INT32_C
+# undef UINT32_C
+# undef INT64_C
+# undef UINT64_C
+# define INT32_C(x) x##L
+# define UINT32_C(x) x##UL
+# define INT64_C(x) x##LL
+# define UINT64_C(x) x##ULL
+
+# define EA_PRI_64_LENGTH_SPECIFIER "ll"
+# define EA_SCN_64_LENGTH_SPECIFIER "ll"
+
+# define SCNd16 "hd"
+# define SCNi16 "hi"
+# define SCNo16 "ho"
+# define SCNu16 "hu"
+# define SCNx16 "hx"
+
+# define SCNd32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+# define SCNi32 "i"
+# define SCNo32 "o"
+# define SCNu32 "u"
+# define SCNx32 "x"
+
+# define SCNd64 EA_SCN_64_LENGTH_SPECIFIER "d"
+# define SCNi64 EA_SCN_64_LENGTH_SPECIFIER "i"
+# define SCNo64 EA_SCN_64_LENGTH_SPECIFIER "o"
+# define SCNu64 EA_SCN_64_LENGTH_SPECIFIER "u"
+# define SCNx64 EA_SCN_64_LENGTH_SPECIFIER "x"
+
+# define PRIdPTR PRId32 // Usage of pointer values will generate warnings with
+# define PRIiPTR PRIi32 // some compilers because they are defined in terms of
+# define PRIoPTR PRIo32 // integers. However, you can't simply use "p" because
+# define PRIuPTR PRIu32 // 'p' is interpreted in a specific and often different
+# define PRIxPTR PRIx32 // way by the library.
+# define PRIXPTR PRIX32
+
+# define PRId8 "hhd"
+# define PRIi8 "hhi"
+# define PRIo8 "hho"
+# define PRIu8 "hhu"
+# define PRIx8 "hhx"
+# define PRIX8 "hhX"
+
+# define PRId16 "hd"
+# define PRIi16 "hi"
+# define PRIo16 "ho"
+# define PRIu16 "hu"
+# define PRIx16 "hx"
+# define PRIX16 "hX"
+
+# define PRId32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+# define PRIi32 "i"
+# define PRIo32 "o"
+# define PRIu32 "u"
+# define PRIx32 "x"
+# define PRIX32 "X"
+
+# define PRId64 EA_PRI_64_LENGTH_SPECIFIER "d"
+# define PRIi64 EA_PRI_64_LENGTH_SPECIFIER "i"
+# define PRIo64 EA_PRI_64_LENGTH_SPECIFIER "o"
+# define PRIu64 EA_PRI_64_LENGTH_SPECIFIER "u"
+# define PRIx64 EA_PRI_64_LENGTH_SPECIFIER "x"
+# define PRIX64 EA_PRI_64_LENGTH_SPECIFIER "X"
+# endif
+
+ // The CodeSourcery definitions of PRIxPTR and SCNxPTR are broken for 32 bit systems.
+# if defined(__SIZEOF_SIZE_T__) && (__SIZEOF_SIZE_T__ == 4) && (defined(__have_long64) || defined(__have_longlong64) || defined(__S3E__))
+# undef PRIdPTR
+# define PRIdPTR "d"
+# undef PRIiPTR
+# define PRIiPTR "i"
+# undef PRIoPTR
+# define PRIoPTR "o"
+# undef PRIuPTR
+# define PRIuPTR "u"
+# undef PRIxPTR
+# define PRIxPTR "x"
+# undef PRIXPTR
+# define PRIXPTR "X"
+
+# undef SCNdPTR
+# define SCNdPTR "d"
+# undef SCNiPTR
+# define SCNiPTR "i"
+# undef SCNoPTR
+# define SCNoPTR "o"
+# undef SCNuPTR
+# define SCNuPTR "u"
+# undef SCNxPTR
+# define SCNxPTR "x"
+# endif
+#else // else we must implement types ourselves.
+
+# if !defined(__S3E__)
+# if !defined(__BIT_TYPES_DEFINED__) && !defined(__int8_t_defined)
+ typedef signed char int8_t; //< 8 bit signed integer
+# endif
+# if !defined( __int8_t_defined )
+ typedef signed short int16_t; //< 16 bit signed integer
+ typedef signed int int32_t; //< 32 bit signed integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed.
+# define __int8_t_defined
+# endif
+ typedef unsigned char uint8_t; //< 8 bit unsigned integer
+ typedef unsigned short uint16_t; //< 16 bit unsigned integer
+# if !defined( __uint32_t_defined )
+ typedef unsigned int uint32_t; //< 32 bit unsigned integer. This works for both 32 bit and 64 bit platforms, as we assume the LP64 is followed.
+# define __uint32_t_defined
+# endif
+# endif
+
+ // According to the C98/99 standard, FLT_EVAL_METHOD defines control the
+ // width used for floating point _t types.
+# if defined(__MWERKS__) && ((defined(_MSL_C99) && (_MSL_C99 == 1)) || (__MWERKS__ < 0x4000))
+ // Metrowerks defines FLT_EVAL_METHOD and
+ // float_t/double_t under this condition.
+# elif defined(FLT_EVAL_METHOD)
+# if (FLT_EVAL_METHOD == 0)
+ typedef float float_t;
+ typedef double double_t;
+# elif (FLT_EVAL_METHOD == 1)
+ typedef double float_t;
+ typedef double double_t;
+# elif (FLT_EVAL_METHOD == 2)
+ typedef long double float_t;
+ typedef long double double_t;
+# endif
+# else
+# define FLT_EVAL_METHOD 0
+ typedef float float_t;
+ typedef double double_t;
+# endif
+
+# if defined(EA_PLATFORM_LINUX) || defined(EA_PLATFORM_PS3) || defined(EA_PLATFORM_PS3_SPU)
+ typedef signed long long int64_t;
+ typedef unsigned long long uint64_t;
+
+# elif defined(EA_PLATFORM_SUN) || defined(EA_PLATFORM_SGI)
+# if (EA_PLATFORM_PTR_SIZE == 4)
+ typedef signed long long int64_t;
+ typedef unsigned long long uint64_t;
+# else
+ typedef signed long int64_t;
+ typedef unsigned long uint64_t;
+# endif
+
+# elif defined(EA_PLATFORM_WINDOWS) || defined(EA_PLATFORM_XBOX) || defined(EA_PLATFORM_XENON) || defined(EA_PLATFORM_MAC)
+# if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND) || defined(EA_COMPILER_INTEL)
+ typedef signed __int64 int64_t;
+ typedef unsigned __int64 uint64_t;
+# else // GCC, Metrowerks, etc.
+ typedef long long int64_t;
+ typedef unsigned long long uint64_t;
+# endif
+# elif defined(EA_PLATFORM_AIRPLAY)
+# else
+ typedef signed long long int64_t;
+ typedef unsigned long long uint64_t;
+# endif
+
+
+ // ------------------------------------------------------------------------
+ // macros for declaring constants in a portable way.
+ //
+ // e.g. int64_t x = INT64_C(1234567812345678);
+ // e.g. int64_t x = INT64_C(0x1111111122222222);
+ // e.g. uint64_t x = UINT64_C(0x1111111122222222);
+
+# ifndef INT8_C_DEFINED // If the user hasn't already defined these...
+# define INT8_C_DEFINED
+
+ // VC++ 7.0 and earlier don't handle the LL suffix.
+# if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND)
+# ifndef INT8_C
+# define INT8_C(x) int8_t(x) // x##i8 doesn't work satisfactorilly because -128i8 generates an out of range warning.
+# endif
+# ifndef UINT8_C
+# define UINT8_C(x) uint8_t(x)
+# endif
+# ifndef INT16_C
+# define INT16_C(x) int16_t(x) // x##i16 doesn't work satisfactorilly because -32768i8 generates an out of range warning.
+# endif
+# ifndef UINT16_C
+# define UINT16_C(x) uint16_t(x)
+# endif
+# ifndef INT32_C
+# define INT32_C(x) x##i32
+# endif
+# ifndef UINT32_C
+# define UINT32_C(x) x##ui32
+# endif
+# ifndef INT64_C
+# define INT64_C(x) x##i64
+# endif
+# ifndef UINT64_C
+# define UINT64_C(x) x##ui64
+# endif
+
+# elif !defined(__STDC_CONSTANT_MACROS) // __STDC_CONSTANT_MACROS is defined by GCC 3 and later when INT8_C(), etc. are defined.
+# define INT8_C(x) int8_t(x) // For the majority of compilers and platforms, long is 32 bits and long long is 64 bits.
+# define UINT8_C(x) uint8_t(x)
+# define INT16_C(x) int16_t(x)
+# define UINT16_C(x) uint16_t(x) // Possibly we should make this be uint16_t(x##u). Let's see how compilers react before changing this.
+# if defined(EA_PLATFORM_PS3) // PS3 defines long as 64 bit, so we cannot use any size suffix.
+# define INT32_C(x) int32_t(x)
+# define UINT32_C(x) uint32_t(x)
+# else // Else we are working on a platform whereby sizeof(long) == sizeof(int32_t).
+# define INT32_C(x) x##L
+# define UINT32_C(x) x##UL
+# endif
+# define INT64_C(x) x##LL // The way to deal with this is to compare ULONG_MAX to 0xffffffff and if not equal, then remove the L.
+# define UINT64_C(x) x##ULL // We need to follow a similar approach for LL.
+# endif
+# endif
+
+ // ------------------------------------------------------------------------
+ // type sizes
+# ifndef INT8_MAX_DEFINED // If the user hasn't already defined these...
+# define INT8_MAX_DEFINED
+
+ // The value must be 2^(n-1)-1
+# ifndef INT8_MAX
+# define INT8_MAX 127
+# endif
+# ifndef INT16_MAX
+# define INT16_MAX 32767
+# endif
+# ifndef INT32_MAX
+# define INT32_MAX 2147483647
+# endif
+# ifndef INT64_MAX
+# define INT64_MAX INT64_C(9223372036854775807)
+# endif
+
+ // The value must be either -2^(n-1) or 1-2(n-1).
+# ifndef INT8_MIN
+# define INT8_MIN -128
+# endif
+# ifndef INT16_MIN
+# define INT16_MIN -32768
+# endif
+# ifndef INT32_MIN
+# define INT32_MIN (-INT32_MAX - 1) // -2147483648
+# endif
+# ifndef INT64_MIN
+# define INT64_MIN (-INT64_MAX - 1) // -9223372036854775808
+# endif
+
+ // The value must be 2^n-1
+# ifndef UINT8_MAX
+# define UINT8_MAX 0xffU // 255
+# endif
+# ifndef UINT16_MAX
+# define UINT16_MAX 0xffffU // 65535
+# endif
+# ifndef UINT32_MAX
+# define UINT32_MAX UINT32_C(0xffffffff) // 4294967295
+# endif
+# ifndef UINT64_MAX
+# define UINT64_MAX UINT64_C(0xffffffffffffffff) // 18446744073709551615
+# endif
+# endif
+
+ // ------------------------------------------------------------------------
+ // sized printf and scanf format specifiers
+ // See the C99 standard, section 7.8.1 -- Macros for format specifiers.
+ //
+ // The C99 standard specifies that inttypes.h only define printf/scanf
+ // format macros if __STDC_FORMAT_MACROS is defined before #including
+ // inttypes.h. For consistency, we define both __STDC_FORMAT_MACROS and
+ // the printf format specifiers here. We also skip the "least/most"
+ // variations of these specifiers, as we've decided to do so with
+ // basic types.
+ //
+ // For 64 bit systems, we assume the LP64 standard is followed
+ // (as opposed to ILP64, etc.) For 32 bit systems, we assume the
+ // ILP32 standard is followed. See:
+ // http://www.opengroup.org/public/tech/aspen/lp64_wp.htm
+ // for information about this. Thus, on both 32 and 64 bit platforms,
+ // %l refers to 32 bit data while %ll refers to 64 bit data.
+
+# ifndef __STDC_FORMAT_MACROS
+# define __STDC_FORMAT_MACROS
+# endif
+
+# if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND) // VC++ 7.1+ understands long long as a data type but doesn't accept %ll as a printf specifier.
+# define EA_PRI_64_LENGTH_SPECIFIER "I64"
+# define EA_SCN_64_LENGTH_SPECIFIER "I64"
+# else
+# define EA_PRI_64_LENGTH_SPECIFIER "ll"
+# define EA_SCN_64_LENGTH_SPECIFIER "ll"
+# endif // It turns out that some platforms use %q to represent a 64 bit value, but these are not relevant to us at this time.
+
+ // Printf format specifiers
+# if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_METROWERKS) // || defined(EA_COMPILER_INTEL) ?
+# define PRId8 "hhd"
+# define PRIi8 "hhi"
+# define PRIo8 "hho"
+# define PRIu8 "hhu"
+# define PRIx8 "hhx"
+# define PRIX8 "hhX"
+# else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c.
+# define PRId8 "c" // This may not work properly but it at least will not crash. Try using 16 bit versions instead.
+# define PRIi8 "c" // "
+# define PRIo8 "o" // "
+# define PRIu8 "u" // "
+# define PRIx8 "x" // "
+# define PRIX8 "X" // "
+# endif
+
+# define PRId16 "hd"
+# define PRIi16 "hi"
+# define PRIo16 "ho"
+# define PRIu16 "hu"
+# define PRIx16 "hx"
+# define PRIX16 "hX"
+
+# define PRId32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+# define PRIi32 "i"
+# define PRIo32 "o"
+# define PRIu32 "u"
+# define PRIx32 "x"
+# define PRIX32 "X"
+
+# define PRId64 EA_PRI_64_LENGTH_SPECIFIER "d"
+# define PRIi64 EA_PRI_64_LENGTH_SPECIFIER "i"
+# define PRIo64 EA_PRI_64_LENGTH_SPECIFIER "o"
+# define PRIu64 EA_PRI_64_LENGTH_SPECIFIER "u"
+# define PRIx64 EA_PRI_64_LENGTH_SPECIFIER "x"
+# define PRIX64 EA_PRI_64_LENGTH_SPECIFIER "X"
+
+# if (EA_PLATFORM_PTR_SIZE == 4)
+# define PRIdPTR PRId32 // Usage of pointer values will generate warnings with
+# define PRIiPTR PRIi32 // some compilers because they are defined in terms of
+# define PRIoPTR PRIo32 // integers. However, you can't simply use "p" because
+# define PRIuPTR PRIu32 // 'p' is interpreted in a specific and often different
+# define PRIxPTR PRIx32 // way by the library.
+# define PRIXPTR PRIX32
+# elif (EA_PLATFORM_PTR_SIZE == 8)
+# define PRIdPTR PRId64
+# define PRIiPTR PRIi64
+# define PRIoPTR PRIo64
+# define PRIuPTR PRIu64
+# define PRIxPTR PRIx64
+# define PRIXPTR PRIX64
+# endif
+
+ // Scanf format specifiers
+# if defined(EA_COMPILER_IS_C99) || defined(EA_COMPILER_GNUC) || defined(EA_COMPILER_METROWERKS) // || defined(EA_COMPILER_INTEL) ?
+# define SCNd8 "hhd"
+# define SCNi8 "hhi"
+# define SCNo8 "hho"
+# define SCNu8 "hhu"
+# define SCNx8 "hhx"
+# else // VC++, Borland, etc. which have no way to specify 8 bit values other than %c.
+# define SCNd8 "c" // This will not work properly but it at least will not crash. Try using 16 bit versions instead.
+# define SCNi8 "c" // "
+# define SCNo8 "c" // "
+# define SCNu8 "c" // "
+# define SCNx8 "c" // "
+# endif
+
+# define SCNd16 "hd"
+# define SCNi16 "hi"
+# define SCNo16 "ho"
+# define SCNu16 "hu"
+# define SCNx16 "hx"
+
+# define SCNd32 "d" // This works for both 32 bit and 64 bit systems, as we assume LP64 conventions.
+# define SCNi32 "i"
+# define SCNo32 "o"
+# define SCNu32 "u"
+# define SCNx32 "x"
+
+# define SCNd64 EA_SCN_64_LENGTH_SPECIFIER "d"
+# define SCNi64 EA_SCN_64_LENGTH_SPECIFIER "i"
+# define SCNo64 EA_SCN_64_LENGTH_SPECIFIER "o"
+# define SCNu64 EA_SCN_64_LENGTH_SPECIFIER "u"
+# define SCNx64 EA_SCN_64_LENGTH_SPECIFIER "x"
+
+# if (EA_PLATFORM_PTR_SIZE == 4)
+# define SCNdPTR SCNd32 // Usage of pointer values will generate warnings with
+# define SCNiPTR SCNi32 // some compilers because they are defined in terms of
+# define SCNoPTR SCNo32 // integers. However, you can't simply use "p" because
+# define SCNuPTR SCNu32 // 'p' is interpreted in a specific and often different
+# define SCNxPTR SCNx32 // way by the library.
+# elif (EA_PLATFORM_PTR_SIZE == 8)
+# define SCNdPTR SCNd64
+# define SCNiPTR SCNi64
+# define SCNoPTR SCNo64
+# define SCNuPTR SCNu64
+# define SCNxPTR SCNx64
+# endif
+
+#endif
+
+
+// ------------------------------------------------------------------------
+// bool8_t
+// The definition of a bool8_t is controversial with some, as it doesn't
+// act just like built-in bool. For example, you can assign -100 to it.
+//
+#ifndef BOOL8_T_DEFINED // If the user hasn't already defined this...
+# define BOOL8_T_DEFINED
+# if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_METROWERKS) || (defined(EA_COMPILER_INTEL) && defined(EA_PLATFORM_WINDOWS)) || defined(EA_COMPILER_BORLAND)
+# if defined(__cplusplus)
+ typedef bool bool8_t;
+# else
+ typedef int8_t bool8_t;
+# endif
+# else // EA_COMPILER_GNUC generally uses 4 bytes per bool.
+ typedef int8_t bool8_t;
+# endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// intptr_t / uintptr_t
+// Integer type guaranteed to be big enough to hold
+// a native pointer ( intptr_t is defined in STDDEF.H )
+//
+#if !defined(_INTPTR_T_DEFINED) && !defined(_intptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES)
+# if (EA_PLATFORM_PTR_SIZE == 4)
+ typedef int32_t intptr_t;
+# elif (EA_PLATFORM_PTR_SIZE == 8)
+ typedef int64_t intptr_t;
+# endif
+
+# define _intptr_t_defined
+# define _INTPTR_T_DEFINED
+#endif
+
+#if !defined(_UINTPTR_T_DEFINED) && !defined(_uintptr_t_defined) && !defined(EA_COMPILER_HAS_C99_TYPES)
+# if (EA_PLATFORM_PTR_SIZE == 4)
+ typedef uint32_t uintptr_t;
+# elif (EA_PLATFORM_PTR_SIZE == 8)
+ typedef uint64_t uintptr_t;
+# endif
+
+# define _uintptr_t_defined
+# define _UINTPTR_T_DEFINED
+#endif
+
+#if !defined(EA_COMPILER_HAS_INTTYPES)
+# ifndef INTMAX_T_DEFINED
+# define INTMAX_T_DEFINED
+
+ // At this time, all supported compilers have int64_t as the max
+ // integer type. Some compilers support a 128 bit inteter type,
+ // but in those cases it is not a true int128_t but rather a
+ // crippled data type.
+ typedef int64_t intmax_t;
+ typedef uint64_t uintmax_t;
+# endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// ssize_t
+// signed equivalent to size_t.
+// This is defined by GCC but not by other compilers.
+//
+#if !defined(__GNUC__)
+ // As of this writing, all non-GCC compilers significant to us implement
+ // uintptr_t the same as size_t. However, this isn't guaranteed to be
+ // so for all compilers, as size_t may be based on int, long, or long long.
+# if defined(_MSC_VER) && (EA_PLATFORM_PTR_SIZE == 8)
+ typedef __int64 ssize_t;
+# elif !defined(__S3E__)
+ typedef long ssize_t;
+# endif
+#elif defined(EA_PLATFORM_UNIX) || defined(EA_PLATFORM_MINGW) || defined(__APPLE__) || defined(_BSD_SIZE_T_) // _BSD_SIZE_T_ indicates that Unix-like headers are present, even though it may not be a true Unix platform.
+# include <sys/types.h>
+#endif
+
+
+// ------------------------------------------------------------------------
+// Character types
+
+#if defined(EA_COMPILER_MSVC) || defined(EA_COMPILER_BORLAND)
+# if defined(EA_WCHAR_T_NON_NATIVE)
+ // In this case, wchar_t is not defined unless we include
+ // wchar.h or if the compiler makes it built-in.
+# ifdef EA_COMPILER_MSVC
+# pragma warning(push, 3)
+# endif
+# include <wchar.h>
+# ifdef EA_COMPILER_MSVC
+# pragma warning(pop)
+# endif
+# endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// char8_t -- Guaranteed to be equal to the compiler's char data type.
+// Some compilers implement char8_t as unsigned, though char
+// is usually set to be signed.
+//
+// char16_t -- This is set to be an unsigned 16 bit value. If the compiler
+// has wchar_t as an unsigned 16 bit value, then char16_t is
+// set to be the same thing as wchar_t in order to allow the
+// user to use char16_t with standard wchar_t functions.
+//
+// char32_t -- This is set to be an unsigned 32 bit value. If the compiler
+// has wchar_t as an unsigned 32 bit value, then char32_t is
+// set to be the same thing as wchar_t in order to allow the
+// user to use char32_t with standard wchar_t functions.
+//
+// VS2010 unilaterally defines char16_t and char32_t in its yvals.h header
+// unless _HAS_CHAR16_T_LANGUAGE_SUPPORT or _CHAR16T are defined.
+// However, VS2010 does not support the C++0x u"" and U"" string literals,
+// which makes its definition of char16_t and char32_t somewhat useless.
+// Until VC++ supports string literals, the buildystems should define
+// _CHAR16T and let EABase define char16_t and EA_CHAR16.
+//
+// GCC defines char16_t and char32_t in the C compiler in -std=gnu99 mode,
+// as __CHAR16_TYPE__ and __CHAR32_TYPE__, and for the C++ compiler
+// in -std=c++0x and -std=gnu++0x modes, as char16_t and char32_t too.
+
+#if !defined(EA_CHAR16_NATIVE)
+# if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_CHAR16T) || (defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT) // VS2010+
+# define EA_CHAR16_NATIVE 1
+# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 404) && (defined(__GXX_EXPERIMENTAL_CXX0X__) || defined(__STDC_VERSION__) || defined(_CHAR16T)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99 or QNX660 has defined char16_t and char32_t in its yvals.h header
+# define EA_CHAR16_NATIVE 1
+# else
+# define EA_CHAR16_NATIVE 0
+# endif
+#endif
+
+#if !defined(EA_CHAR32_NATIVE)
+# if defined(_MSC_VER) && (_MSC_VER >= 1600) && defined(_HAS_CHAR16_T_LANGUAGE_SUPPORT) && _HAS_CHAR16_T_LANGUAGE_SUPPORT // VS2010+
+# define EA_CHAR32_NATIVE 1
+# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 404) && (defined(__GXX_EXPERIMENTAL_CXX0X__) || defined(__STDC_VERSION__)) // g++ (C++ compiler) 4.4+ with -std=c++0x or gcc (C compiler) 4.4+ with -std=gnu99
+# define EA_CHAR32_NATIVE 1
+# else
+# define EA_CHAR32_NATIVE 0
+# endif
+#endif
+
+
+#ifndef CHAR8_T_DEFINED // If the user hasn't already defined these...
+# define CHAR8_T_DEFINED
+
+# if EA_CHAR16_NATIVE
+ typedef char char8_t;
+
+ // In C++, char16_t and char32_t are already defined by the compiler.
+ // In MS C, char16_t and char32_t are already defined by the compiler/standard library.
+ // In GCC C, __CHAR16_TYPE__ and __CHAR32_TYPE__ are defined instead, and we must define char16_t and char32_t from these.
+# if defined(__GNUC__) && !defined(__GXX_EXPERIMENTAL_CXX0X__) && defined(__CHAR16_TYPE__) // If using GCC and compiling in C...
+ typedef __CHAR16_TYPE__ char16_t;
+ typedef __CHAR32_TYPE__ char32_t;
+# endif
+# elif defined(EA_COMPILER_HAS_CHAR_16_32)
+ typedef char char8_t;
+# elif (EA_WCHAR_SIZE == 2)
+# define _CHAR16T
+ typedef char char8_t;
+ typedef wchar_t char16_t;
+ typedef uint32_t char32_t;
+# else
+ typedef char char8_t;
+ typedef uint16_t char16_t;
+ typedef wchar_t char32_t;
+# endif
+#endif
+
+
+// EA_CHAR16 / EA_CHAR32
+//
+// Supports usage of portable string constants.
+//
+// Example usage:
+// const char16_t* str = EA_CHAR16("Hello world");
+// const char32_t* str = EA_CHAR32("Hello world");
+// const char16_t c = EA_CHAR16('\x3001');
+// const char32_t c = EA_CHAR32('\x3001');
+//
+#ifndef EA_CHAR16
+# if EA_CHAR16_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char16_t string literals.
+# define EA_CHAR16(s) u ## s
+# elif (EA_WCHAR_SIZE == 2)
+# define EA_CHAR16(s) L ## s
+# else
+ //#define EA_CHAR16(s) // Impossible to implement.
+# endif
+#endif
+
+#ifndef EA_CHAR32
+# if EA_CHAR32_NATIVE && !defined(_MSC_VER) // Microsoft doesn't support char32_t string literals.
+# define EA_CHAR32(s) U ## s
+# elif (EA_WCHAR_SIZE == 2)
+ //#define EA_CHAR32(s) // Impossible to implement.
+# else
+# define EA_CHAR32(s) L ## s
+# endif
+#endif
+
+
+// ------------------------------------------------------------------------
+// EAArrayCount
+//
+// Returns the count of items in a built-in C array. This is a common technique
+// which is often used to help properly calculate the number of items in an
+// array at runtime in order to prevent overruns, etc.
+//
+// Example usage:
+// int array[75];
+// size_t arrayCount = EAArrayCount(array); // arrayCount is 75.
+//
+#ifndef EAArrayCount
+# define EAArrayCount(x) (sizeof(x) / sizeof(x[0]))
+#endif
+
+
+// ------------------------------------------------------------------------
+// static_assert
+//
+// C++0x static_assert (a.k.a. compile-time assert).
+//
+// Specification:
+// void static_assert(bool const_expression, const char* description);
+//
+// Example usage:
+// static_assert(sizeof(int) == 4, "int must be 32 bits");
+//
+#if !defined(EABASE_STATIC_ASSERT_ENABLED)
+# if defined(EA_DEBUG) || defined(_DEBUG)
+# define EABASE_STATIC_ASSERT_ENABLED 1
+# else
+# define EABASE_STATIC_ASSERT_ENABLED 0
+# endif
+#endif
+
+#ifndef EA_PREPROCESSOR_JOIN
+# define EA_PREPROCESSOR_JOIN(a, b) EA_PREPROCESSOR_JOIN1(a, b)
+# define EA_PREPROCESSOR_JOIN1(a, b) EA_PREPROCESSOR_JOIN2(a, b)
+# define EA_PREPROCESSOR_JOIN2(a, b) a##b
+#endif
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1600)
+ // static_assert is defined by the compiler for both C and C++.
+#elif defined(__GNUC__) && defined(__GXX_EXPERIMENTAL_CXX0X__)
+ // static_assert is defined by the compiler.
+#elif defined(__clang__) && __has_feature(cxx_static_assert)
+ // static_assert is defined by the compiler.
+#elif defined(EA_COMPILER_GHS)
+ // static_assert is defined by the compiler.
+#else
+# if EABASE_STATIC_ASSERT_ENABLED
+# if defined(__COUNTER__) // If this VC++ extension is available...
+# define static_assert(expression, description) enum { EA_PREPROCESSOR_JOIN(static_assert_, __COUNTER__) = 1 / ((!!(expression)) ? 1 : 0) }
+# else
+# define static_assert(expression, description) enum { EA_PREPROCESSOR_JOIN(static_assert_, __LINE__) = 1 / ((!!(expression)) ? 1 : 0) }
+# endif
+# else
+# if defined(EA_COMPILER_METROWERKS)
+# if defined(__cplusplus)
+# define static_assert(expression, description) struct EA_PREPROCESSOR_JOIN(EACTAssertUnused_, __LINE__){ }
+# else
+# define static_assert(expression, description) enum { EA_PREPROCESSOR_JOIN(static_assert_, __LINE__) = 1 / ((!!(expression)) ? 1 : 0) }
+# endif
+# else
+# define static_assert(expression, description)
+# endif
+# endif
+#endif
+
+#if defined(__cplusplus)
+#endif
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EABase/earesult.h b/UnknownVersion/include/EABase/earesult.h
new file mode 100644
index 0000000..928d9fc
--- /dev/null
+++ b/UnknownVersion/include/EABase/earesult.h
@@ -0,0 +1,78 @@
+/*
+Copyright (C) 2009 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/*-----------------------------------------------------------------------------
+ * earesult.h
+ *
+ * Copyright (c) 2002 - 2005 Electronic Arts Inc. All rights reserved.
+ * Maintained by Paul Pedriana, Maxis
+ *---------------------------------------------------------------------------*/
+
+
+#ifndef INCLUDED_earesult_H
+#define INCLUDED_earesult_H
+
+
+#ifndef INCLUDED_eabase_H
+ #include "EABase/eabase.h"
+#endif
+
+
+
+/// \brief This result type is width-compatible with most systems
+typedef int32_t ea_result_type;
+
+
+namespace EA
+{
+ typedef int32_t result_type;
+
+ enum
+ {
+ SUCCESS = 0,
+ FAILURE = -1
+ };
+}
+
+
+/// \brief Macro to simplify testing for success
+#ifndef EA_SUCCEEDED
+ #define EA_SUCCEEDED(result) ((result) >= 0)
+#endif
+
+/// \brief Macro to simplfify testing for general failure
+#ifndef EA_FAILED
+ #define EA_FAILED(result) ((result) < 0)
+#endif
+
+
+#endif
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/algorithm.h b/UnknownVersion/include/EASTL/algorithm.h
new file mode 100644
index 0000000..33633d1
--- /dev/null
+++ b/UnknownVersion/include/EASTL/algorithm.h
@@ -0,0 +1,2981 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/algorithm.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements some of the primary algorithms from the C++ STL
+// algorithm library. These versions are just like that STL versions and so
+// are redundant. They are provided solely for the purpose of projects that
+// either cannot use standard C++ STL or want algorithms that have guaranteed
+// identical behaviour across platforms.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Definitions
+//
+// You will notice that we are very particular about the templated typenames
+// we use here. You will notice that we follow the C++ standard closely in
+// these respects. Each of these typenames have a specific meaning;
+// this is why we don't just label templated arguments with just letters
+// such as T, U, V, A, B. Here we provide a quick reference for the typenames
+// we use. See the C++ standard, section 25-8 for more details.
+// --------------------------------------------------------------
+// typename Meaning
+// --------------------------------------------------------------
+// T The value type.
+// Compare A function which takes two arguments and returns the lesser of the two.
+// Predicate A function which takes one argument returns true if the argument meets some criteria.
+// BinaryPredicate A function which takes two arguments and returns true if some criteria is met (e.g. they are equal).
+// StrickWeakOrdering A BinaryPredicate that compares two objects, returning true if the first precedes the second. Like Compare but has additional requirements. Used for sorting routines.
+// Function A function which takes one argument and applies some operation to the target.
+// Size A count or size.
+// Generator A function which takes no arguments and returns a value (which will usually be assigned to an object).
+// UnaryOperation A function which takes one argument and returns a value (which will usually be assigned to second object).
+// BinaryOperation A function which takes two arguments and returns a value (which will usually be assigned to a third object).
+// InputIterator An input iterator (iterator you read from) which allows reading each element only once and only in a forward direction.
+// ForwardIterator An input iterator which is like InputIterator except it can be reset back to the beginning.
+// BidirectionalIterator An input iterator which is like ForwardIterator except it can be read in a backward direction as well.
+// RandomAccessIterator An input iterator which can be addressed like an array. It is a superset of all other input iterators.
+// OutputIterator An output iterator (iterator you write to) which allows writing each element only once in only in a forward direction.
+//
+// Note that with iterators that a function which takes an InputIterator will
+// also work with a ForwardIterator, BidirectionalIterator, or RandomAccessIterator.
+// The given iterator type is merely the -minimum- supported functionality the
+// iterator must support.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Optimizations
+//
+// There are a number of opportunities for opptimizations that we take here
+// in this library. The most obvious kinds are those that subsitute memcpy
+// in the place of a conventional loop for data types with which this is
+// possible. The algorithms here are optimized to a higher level than currently
+// available C++ STL algorithms from vendors. This is especially
+// so for game programming on console devices, as we do things such as reduce
+// branching relative to other STL algorithm implementations. However, the
+// proper implementation of these algorithm optimizations is a fairly tricky
+// thing.
+//
+// The various things we look to take advantage of in order to implement
+// optimizations include:
+// - Taking advantage of random access iterators.
+// - Taking advantage of POD (plain old data) data types.
+// - Taking advantage of type_traits in general.
+// - Reducing branching and taking advantage of likely branch predictions.
+// - Taking advantage of issues related to pointer and reference aliasing.
+// - Improving cache coherency during memory accesses.
+// - Making code more likely to be inlinable by the compiler.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALGORITHM_H
+#define EASTL_ALGORITHM_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/utility.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/internal/generic_iterator.h>
+#include <EASTL/type_traits.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stddef.h>
+#ifdef __MWERKS__
+ #include <../Include/string.h> // Force the compiler to use the std lib header.
+#else
+ #include <string.h> // memcpy, memcmp, memmove
+#endif
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// min/max workaround
+//
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. The following may still not completely resolve some kinds of
+// problems with MSVC++ #defines, though it deals with most cases in production
+// game code.
+//
+#if EASTL_NOMINMAX
+ #ifdef min
+ #undef min
+ #endif
+ #ifdef max
+ #undef max
+ #endif
+#endif
+
+
+
+
+namespace eastl
+{
+ #if EASTL_MINMAX_ENABLED
+
+ /// min
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with operator <.
+ ///
+ /// This min and our other min implementations are defined as returning:
+ /// b < a ? b : a
+ /// which for example may in practice result in something different than:
+ /// b <= a ? b : a
+ /// in the case where b is different from a (though they compare as equal).
+ /// We choose the specific ordering here because that's the ordering
+ /// done by other STL implementations.
+ ///
+ template <typename T>
+ inline const T&
+ min(const T& a, const T& b)
+ {
+ return b < a ? b : a;
+ }
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// min_alt
+ ///
+ /// This is an alternative version of min that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ template <typename T>
+ inline const T&
+ min_alt(const T& a, const T& b)
+ {
+ return b < a ? b : a;
+ }
+
+ #if EASTL_MINMAX_ENABLED
+ /// min
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with the Compare function (or function object), which
+ /// takes two arguments and returns true if the first is less than
+ /// the second.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ /// Example usage:
+ /// struct A{ int a; };
+ /// struct Struct{ bool operator()(const A& a1, const A& a2){ return a1.a < a2.a; } };
+ ///
+ /// A a1, a2, a3;
+ /// a3 = min(a1, a2, Struct());
+ ///
+ /// Example usage:
+ /// struct B{ int b; };
+ /// inline bool Function(const B& b1, const B& b2){ return b1.b < b2.b; }
+ ///
+ /// B b1, b2, b3;
+ /// b3 = min(b1, b2, Function);
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ min(const T& a, const T& b, Compare compare)
+ {
+ return compare(b, a) ? b : a;
+ }
+
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// min_alt
+ ///
+ /// This is an alternative version of min that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ /// See min(a, b) for detailed specifications.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ min_alt(const T& a, const T& b, Compare compare)
+ {
+ return compare(b, a) ? b : a;
+ }
+
+
+ #if EASTL_MINMAX_ENABLED
+ /// max
+ ///
+ /// Max returns the greater of its two arguments; it returns the first
+ /// argument if neither is greater than the other. The two arguments are
+ /// compared with operator < (and not operator >).
+ ///
+ /// This min and our other min implementations are defined as returning:
+ /// a < b ? b : a
+ /// which for example may in practice result in something different than:
+ /// a <= b ? b : a
+ /// in the case where b is different from a (though they compare as equal).
+ /// We choose the specific ordering here because that's the ordering
+ /// done by other STL implementations.
+ ///
+ template <typename T>
+ inline const T&
+ max(const T& a, const T& b)
+ {
+ return a < b ? b : a;
+ }
+ #endif // EASTL_MINMAX_ENABLED
+
+
+ /// max_alt
+ ///
+ /// This is an alternative version of max that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ template <typename T>
+ inline const T&
+ max_alt(const T& a, const T& b)
+ {
+ return a < b ? b : a;
+ }
+
+ #if EASTL_MINMAX_ENABLED
+ /// max
+ ///
+ /// Min returns the lesser of its two arguments; it returns the first
+ /// argument if neither is less than the other. The two arguments are
+ /// compared with the Compare function (or function object), which
+ /// takes two arguments and returns true if the first is less than
+ /// the second.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ max(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) ? b : a;
+ }
+
+ #endif
+
+
+ /// max_alt
+ ///
+ /// This is an alternative version of max that avoids any possible
+ /// collisions with Microsoft #defines of min and max.
+ ///
+ template <typename T, typename Compare>
+ inline const T&
+ max_alt(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) ? b : a;
+ }
+
+
+
+ /// min_element
+ ///
+ /// min_element finds the smallest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value smaller than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: !(*j < *i).
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMin = first;
+
+ while(++first != last)
+ {
+ if(*first < *currentMin)
+ currentMin = first;
+ }
+ return currentMin;
+ }
+ return first;
+ }
+
+
+ /// min_element
+ ///
+ /// min_element finds the smallest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value smaller than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// conditions hold: compare(*j, *i) == false.
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator, typename Compare>
+ ForwardIterator min_element(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMin = first;
+
+ while(++first != last)
+ {
+ if(compare(*first, *currentMin))
+ currentMin = first;
+ }
+ return currentMin;
+ }
+ return first;
+ }
+
+
+ /// max_element
+ ///
+ /// max_element finds the largest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value greater than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: !(*i < *j).
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMax = first;
+
+ while(++first != last)
+ {
+ if(*currentMax < *first)
+ currentMax = first;
+ }
+ return currentMax;
+ }
+ return first;
+ }
+
+
+ /// max_element
+ ///
+ /// max_element finds the largest element in the range [first, last).
+ /// It returns the first iterator i in [first, last) such that no other
+ /// iterator in [first, last) points to a value greater than *i.
+ /// The return value is last if and only if [first, last) is an empty range.
+ ///
+ /// Returns: The first iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, last) the following corresponding
+ /// condition holds: compare(*i, *j) == false.
+ ///
+ /// Complexity: Exactly 'max((last - first) - 1, 0)' applications of the
+ /// corresponding comparisons.
+ ///
+ template <typename ForwardIterator, typename Compare>
+ ForwardIterator max_element(ForwardIterator first, ForwardIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator currentMax = first;
+
+ while(++first != last)
+ {
+ if(compare(*currentMax, *first))
+ currentMax = first;
+ }
+ return currentMax;
+ }
+ return first;
+ }
+
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T>
+ inline const T& median(const T& a, const T& b, const T& c)
+ {
+ if(a < b)
+ {
+ if(b < c)
+ return b;
+ else if(a < c)
+ return c;
+ else
+ return a;
+ }
+ else if(a < c)
+ return a;
+ else if(b < c)
+ return c;
+ return b;
+ }
+
+
+ /// median
+ ///
+ /// median finds which element of three (a, b, d) is in-between the other two.
+ /// If two or more elements are equal, the first (e.g. a before b) is chosen.
+ ///
+ /// Complexity: Either two or three comparisons will be required, depending
+ /// on the values.
+ ///
+ template <typename T, typename Compare>
+ inline const T& median(const T& a, const T& b, const T& c, Compare compare)
+ {
+ if(compare(a, b))
+ {
+ if(compare(b, c))
+ return b;
+ else if(compare(a, c))
+ return c;
+ else
+ return a;
+ }
+ else if(compare(a, c))
+ return a;
+ else if(compare(b, c))
+ return c;
+ return b;
+ }
+
+
+
+ /// swap
+ ///
+ /// Assigns the contents of a to b and the contents of b to a.
+ /// A temporary instance of type T is created and destroyed
+ /// in the process.
+ ///
+ /// This function is used by numerous other algorithms, and as
+ /// such it may in some cases be feasible and useful for the user
+ /// to implement an override version of this function which is
+ /// more efficient in some way.
+ ///
+ template <typename T>
+ inline void swap(T& a, T& b)
+ {
+ T temp(a);
+ a = b;
+ b = temp;
+ }
+
+
+
+ // iter_swap helper functions
+ //
+ template <bool bTypesAreEqual>
+ struct iter_swap_impl
+ {
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type_a;
+
+ value_type_a temp(*a);
+ *a = *b;
+ *b = temp;
+ }
+ };
+
+ template <>
+ struct iter_swap_impl<true>
+ {
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ static void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ eastl::swap(*a, *b);
+ }
+ };
+
+ /// iter_swap
+ ///
+ /// Equivalent to swap(*a, *b), though the user can provide an override to
+ /// iter_swap that is independent of an override which may exist for swap.
+ ///
+ /// We provide a version of iter_swap which uses swap when the swapped types
+ /// are equal but a manual implementation otherwise. We do this because the
+ /// C++ standard defect report says that iter_swap(a, b) must be implemented
+ /// as swap(*a, *b) when possible.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline void iter_swap(ForwardIterator1 a, ForwardIterator2 b)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type_a;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::value_type value_type_b;
+ typedef typename eastl::iterator_traits<ForwardIterator1>::reference reference_a;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::reference reference_b;
+
+ iter_swap_impl<type_and<is_same<value_type_a, value_type_b>::value, is_same<value_type_a&, reference_a>::value, is_same<value_type_b&, reference_b>::value >::value >::iter_swap(a, b);
+ }
+
+
+
+ /// swap_ranges
+ ///
+ /// Swaps each of the elements in the range [first1, last1) with the
+ /// corresponding element in the range [first2, first2 + (last1 - first1)).
+ ///
+ /// Effects: For each nonnegative integer n < (last1 - first1),
+ /// performs: swap(*(first1 + n), *(first2 + n)).
+ ///
+ /// Requires: The two ranges [first1, last1) and [first2, first2 + (last1 - first1))
+ /// shall not overlap.
+ ///
+ /// Returns: first2 + (last1 - first1). That is, returns the end of the second range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' swaps.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline ForwardIterator2
+ swap_ranges(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ iter_swap(first1, first2);
+ return first2;
+ }
+
+
+
+ /// adjacent_find
+ ///
+ /// Returns: The first iterator i such that both i and i + 1 are in the range
+ /// [first, last) for which the following corresponding conditions hold: *i == *(i + 1).
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator>
+ inline ForwardIterator
+ adjacent_find(ForwardIterator first, ForwardIterator last)
+ {
+ if(first != last)
+ {
+ ForwardIterator i = first;
+
+ for(++i; i != last; ++i)
+ {
+ if(*first == *i)
+ return first;
+ first = i;
+ }
+ }
+ return last;
+ }
+
+
+
+ /// adjacent_find
+ ///
+ /// Returns: The first iterator i such that both i and i + 1 are in the range
+ /// [first, last) for which the following corresponding conditions hold: predicate(*i, *(i + 1)) != false.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: Exactly 'find(first, last, value) - first' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator, typename BinaryPredicate>
+ inline ForwardIterator
+ adjacent_find(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+ {
+ if(first != last)
+ {
+ ForwardIterator i = first;
+
+ for(++i; i != last; ++i)
+ {
+ if(predicate(*first, *i))
+ return first;
+ first = i;
+ }
+ }
+ return last;
+ }
+
+
+
+
+ // copy
+ //
+ // We implement copy via some helper functions whose purpose is to
+ // try to use memcpy when possible. We need to use type_traits and
+ // iterator categories to do this.
+ //
+ template <bool bHasTrivialCopy, typename IteratorTag>
+ struct copy_impl
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result, ++first)
+ *result = *first;
+ return result;
+ }
+ };
+
+ template <>
+ struct copy_impl<true, EASTL_ITC_NS::random_access_iterator_tag> // If we have a trivally copyable random access array, use memcpy
+ {
+ template <typename T>
+ static T* do_copy(const T* first, const T* last, T* result)
+ {
+ // We cannot use memcpy because memcpy requires the entire source and dest ranges to be
+ // non-overlapping, whereas the copy algorithm requires only that 'result' not be within
+ // the range from first to last.
+ return (T*)memmove(result, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first);
+ }
+ };
+
+ // copy_chooser
+ // Calls one of the above copy_impl functions.
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator
+ copy_chooser(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<InputIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<OutputIterator>::value_type value_type_output;
+
+ const bool bHasTrivialCopy = type_and<has_trivial_assign<value_type_input>::value,
+ is_pointer<InputIterator>::value,
+ is_pointer<OutputIterator>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return eastl::copy_impl<bHasTrivialCopy, IC>::do_copy(first, last, result);
+ }
+
+ // copy_generic_iterator
+ // Converts a copy call via a generic_iterator to a copy call via the iterator type the
+ // generic_iterator holds. We do this because generic_iterator's purpose is to hold
+ // iterators that are simply pointers, and if we want the functions above to be fast,
+ // we need them to see the pointers and not an iterator that wraps the pointers as
+ // does generic_iterator. We are forced into using a templated struct with a templated
+ // do_copy member function because C++ doesn't allow specializations for standalone functions.
+ template <bool bInputIsGenericIterator, bool bOutputIsGenericIterator>
+ struct copy_generic_iterator
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::copy_chooser(first, last, result);
+ }
+ };
+
+ template <>
+ struct copy_generic_iterator<true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::copy_chooser(first.base(), last.base(), result); // first.base(), last.base() will resolve to a pointer (e.g. T*).
+ }
+ };
+
+ template <>
+ struct copy_generic_iterator<false, true>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::copy_chooser(first, last, result.base())); // Have to convert to OutputIterator because result.base() is a T*
+ }
+ };
+
+ template <>
+ struct copy_generic_iterator<true, true>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::copy_chooser(first.base(), last.base(), result.base())); // Have to convert to OutputIterator because result.base() is a T*
+ }
+ };
+
+ /// copy
+ ///
+ /// Effects: Copies elements in the range [first, last) into the range [result, result + (last - first))
+ /// starting from first and proceeding to last. For each nonnegative integer n < (last - first),
+ /// performs *(result + n) = *(first + n).
+ ///
+ /// Returns: result + (last - first). That is, returns the end of the result. Note that this
+ /// is different from how memcpy works, as memcpy returns the beginning of the result.
+ ///
+ /// Requires: result shall not be in the range [first, last).
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ /// Note: This function is like memcpy in that the result must not be with the
+ /// range of (first, last), as would cause memory to be overwritten incorrectly.
+ ///
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator
+ copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ //#ifdef __GNUC__ // GCC has template depth problems and this shortcut may need to be enabled.
+ // return eastl::copy_chooser(first, last, result);
+ //#else
+ const bool bInputIsGenericIterator = is_generic_iterator<InputIterator>::value;
+ const bool bOutputIsGenericIterator = is_generic_iterator<OutputIterator>::value;
+ return eastl::copy_generic_iterator<bInputIsGenericIterator, bOutputIsGenericIterator>::do_copy(first, last, result);
+ //#endif
+ }
+
+
+
+
+ // copy_backward
+ //
+ // We implement copy_backward via some helper functions whose purpose is
+ // to try to use memcpy when possible. We need to use type_traits and
+ // iterator categories to do this.
+ //
+ template <bool bHasTrivialCopy, typename IteratorTag>
+ struct copy_backward_impl
+ {
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ static BidirectionalIterator2 do_copy(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 result)
+ {
+ while(last != first)
+ *--result = *--last;
+ return result;
+ }
+ };
+
+ template <>
+ struct copy_backward_impl<true, EASTL_ITC_NS::random_access_iterator_tag> // If we have a trivally copyable random access array, use memcpy
+ {
+ template <typename T>
+ static T* do_copy(const T* first, const T* last, T* result)
+ {
+ return (T*)memmove(result - (last - first), first, (size_t)((uintptr_t)last - (uintptr_t)first));
+ }
+ };
+
+ // copy_backward_chooser
+ // Calls one of the above copy_backward_impl functions.
+ template <typename InputIterator, typename OutputIterator>
+ inline OutputIterator
+ copy_backward_chooser(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<InputIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<OutputIterator>::value_type value_type_output;
+
+ const bool bHasTrivialCopy = type_and<has_trivial_assign<value_type_input>::value,
+ is_pointer<InputIterator>::value,
+ is_pointer<OutputIterator>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return eastl::copy_backward_impl<bHasTrivialCopy, IC>::do_copy(first, last, result);
+ }
+
+ // copy_backward_generic_iterator
+ // Converts a copy call via a generic_iterator to a copy call via the iterator type the
+ // generic_iterator holds. We do this because generic_iterator's purpose is to hold
+ // iterators that are simply pointers, and if we want the functions above to be fast,
+ // we need them to see the pointers and not an iterator that wraps the pointers as
+ // does generic_iterator. We are forced into using a templated struct with a templated
+ // do_copy member function because C++ doesn't allow specializations for standalone functions.
+ template <bool bInputIsGenericIterator, bool bOutputIsGenericIterator>
+ struct copy_backward_generic_iterator
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::copy_backward_chooser(first, last, result);
+ }
+ };
+
+ template <>
+ struct copy_backward_generic_iterator<true, false>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return eastl::copy_backward_chooser(first.base(), last.base(), result); // first.base(), last.base() will resolve to a pointer (e.g. T*).
+ }
+ };
+
+ template <>
+ struct copy_backward_generic_iterator<false, true>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::copy_backward_chooser(first, last, result.base())); // Have to convert to OutputIterator because result.base() is a T*
+ }
+ };
+
+ template <>
+ struct copy_backward_generic_iterator<true, true>
+ {
+ template <typename InputIterator, typename OutputIterator>
+ static OutputIterator do_copy(InputIterator first, InputIterator last, OutputIterator result)
+ {
+ return OutputIterator(eastl::copy_backward_chooser(first.base(), last.base(), result.base())); // Have to convert to OutputIterator because result.base() is a T*
+ }
+ };
+
+ /// copy_backward
+ ///
+ /// copies memory in the range of [first, last) to the range *ending* with result.
+ ///
+ /// Effects: Copies elements in the range [first, last) into the range
+ /// [result - (last - first), result) starting from last 1 and proceeding to first.
+ /// For each positive integer n <= (last - first), performs *(result n) = *(last - n).
+ ///
+ /// Requires: result shall not be in the range [first, last).
+ ///
+ /// Returns: result - (last - first). That is, returns the beginning of the result range.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ inline BidirectionalIterator2
+ copy_backward(BidirectionalIterator1 first, BidirectionalIterator1 last, BidirectionalIterator2 result)
+ {
+ const bool bInputIsGenericIterator = is_generic_iterator<BidirectionalIterator1>::value;
+ const bool bOutputIsGenericIterator = is_generic_iterator<BidirectionalIterator2>::value;
+
+ return eastl::copy_backward_generic_iterator<bInputIsGenericIterator, bOutputIsGenericIterator>::do_copy(first, last, result);
+ }
+
+
+
+ /// count
+ ///
+ /// Counts the number of items in the range of [first, last) which equal the input value.
+ ///
+ /// Effects: Returns the number of iterators i in the range [first, last) for which the
+ /// following corresponding conditions hold: *i == value.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of count is count_if and not another variation of count.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename T>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ count(InputIterator first, InputIterator last, const T& value)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type result = 0;
+
+ for(; first != last; ++first)
+ {
+ if(*first == value)
+ ++result;
+ }
+ return result;
+ }
+
+
+ /// count_if
+ ///
+ /// Counts the number of items in the range of [first, last) which match
+ /// the input value as defined by the input predicate function.
+ ///
+ /// Effects: Returns the number of iterators i in the range [first, last) for which the
+ /// following corresponding conditions hold: predicate(*i) != false.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The non-predicate version of count_if is count and not another variation of count_if.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ count_if(InputIterator first, InputIterator last, Predicate predicate)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type result = 0;
+
+ for(; first != last; ++first)
+ {
+ if(predicate(*first))
+ ++result;
+ }
+ return result;
+ }
+
+
+
+ // fill
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_imp
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // The C++ standard doesn't specify whether we need to create a temporary
+ // or not, but all std STL implementations are written like what we have here.
+ for(; first != last; ++first)
+ *first = value;
+ }
+ };
+
+ template <>
+ struct fill_imp<true>
+ {
+ template <typename ForwardIterator, typename T>
+ static void do_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // We create a temp and fill from that because value might alias to the
+ // destination range and so the compiler would be forced into generating
+ // less efficient code.
+ for(const T temp(value); first != last; ++first)
+ *first = temp;
+ }
+ };
+
+ /// fill
+ ///
+ /// fill is like memset in that it assigns a single value repeatedly to a
+ /// destination range. It allows for any type of iterator (not just an array)
+ /// and the source value can be any type, not just a byte.
+ /// Note that the source value (which is a reference) can come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, last).
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ /// Note: The C++ standard doesn't specify anything about the value parameter
+ /// coming from within the first-last range. All std STL implementations act
+ /// as if the standard specifies that value must not come from within this range.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ eastl::fill_imp< is_scalar<T>::value >::do_fill(first, last, value);
+
+ // Possibly better implementation, as it will deal with small PODs as well as scalars:
+ // bEasyCopy is true if the type has a trivial constructor (e.g. is a POD) and if
+ // it is small. Thus any built-in type or any small user-defined struct will qualify.
+ //const bool bEasyCopy = eastl::type_and<eastl::has_trivial_constructor<T>::value,
+ // eastl::integral_constant<bool, (sizeof(T) <= 16)>::value;
+ //eastl::fill_imp<bEasyCopy>::do_fill(first, last, value);
+
+ }
+
+ inline void fill(char* first, char* last, const char& c) // It's debateable whether we should use 'char& c' or 'char c' here.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(char* first, char* last, const int c) // This is used for cases like 'fill(first, last, 0)'.
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const unsigned char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(unsigned char* first, unsigned char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const signed char& c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ inline void fill(signed char* first, signed char* last, const int c)
+ {
+ memset(first, (unsigned char)c, (size_t)(last - first));
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__SNC__) || defined(__ICL) || defined(__PPU__) || defined(__SPU__) // SN = SN compiler, ICL = Intel compiler, PPU == PS3 processor, SPU = PS3 cell processor
+ inline void fill(bool* first, bool* last, const bool& b)
+ {
+ memset(first, (char)b, (size_t)(last - first));
+ }
+ #endif
+
+
+
+
+ // fill_n
+ //
+ // We implement some fill helper functions in order to allow us to optimize it
+ // where possible.
+ //
+ template <bool bIsScalar>
+ struct fill_n_imp
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ for(; n-- > 0; ++first)
+ *first = value;
+ return first;
+ }
+ };
+
+ template <>
+ struct fill_n_imp<true>
+ {
+ template <typename OutputIterator, typename Size, typename T>
+ static OutputIterator do_fill(OutputIterator first, Size n, const T& value)
+ {
+ // We create a temp and fill from that because value might alias to
+ // the destination range and so the compiler would be forced into
+ // generating less efficient code.
+ for(const T temp = value; n-- > 0; ++first)
+ *first = temp;
+ return first;
+ }
+ };
+
+ /// fill_n
+ ///
+ /// The fill_n function is very much like memset in that a copies a source value
+ /// n times into a destination range. The source value may come from within
+ /// the destination range.
+ ///
+ /// Effects: Assigns value through all the iterators in the range [first, first + n).
+ ///
+ /// Complexity: Exactly n assignments.
+ ///
+ template <typename OutputIterator, typename Size, typename T>
+ OutputIterator fill_n(OutputIterator first, Size n, const T& value)
+ {
+ #ifdef _MSC_VER // VC++ up to and including VC8 blow up when you pass a 64 bit scalar to the do_fill function.
+ return eastl::fill_n_imp< is_scalar<T>::value && (sizeof(T) <= sizeof(::uint32_t)) >::do_fill(first, n, value);
+ #else
+ return eastl::fill_n_imp< is_scalar<T>::value >::do_fill(first, n, value);
+ #endif
+ }
+
+ template <typename Size>
+ inline char* fill_n(char* first, Size n, const char& c)
+ {
+ return (char*)memset(first, (char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline unsigned char* fill_n(unsigned char* first, Size n, const unsigned char& c)
+ {
+ return (unsigned char*)memset(first, (unsigned char)c, (size_t)n) + n;
+ }
+
+ template <typename Size>
+ inline signed char* fill_n(signed char* first, Size n, const signed char& c)
+ {
+ return (signed char*)memset(first, (signed char)c, n) + (size_t)n;
+ }
+
+ #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__SNC__) || defined(__ICL) || defined(__PPU__) || defined(__SPU__) // SN = SN compiler, ICL = Intel compiler, PU == PS3 processor, SPU = PS3 cell processor
+ template <typename Size>
+ inline bool* fill_n(bool* first, Size n, const bool& b)
+ {
+ return (bool*)memset(first, (char)b, n) + (size_t)n;
+ }
+ #endif
+
+
+
+ /// find
+ ///
+ /// finds the value within the unsorted range of [first, last).
+ ///
+ /// Returns: The first iterator i in the range [first, last) for which
+ /// the following corresponding conditions hold: *i == value.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ /// This is a linear search and not a binary one.
+ ///
+ /// Note: The predicate version of find is find_if and not another variation of find.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename T>
+ inline InputIterator
+ find(InputIterator first, InputIterator last, const T& value)
+ {
+ while((first != last) && !(*first == value)) // Note that we always express value comparisons in terms of < or ==.
+ ++first;
+ return first;
+ }
+
+
+
+ /// find_if
+ ///
+ /// finds the value within the unsorted range of [first, last).
+ ///
+ /// Returns: The first iterator i in the range [first, last) for which
+ /// the following corresponding conditions hold: pred(*i) != false.
+ /// Returns last if no such iterator is found.
+ /// If the sequence of elements to search for (i.e. first2 - last2) is empty,
+ /// the find always fails and last1 will be returned.
+ ///
+ /// Complexity: At most 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The non-predicate version of find_if is find and not another variation of find_if.
+ /// This is because both versions would have three parameters and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename Predicate>
+ inline InputIterator
+ find_if(InputIterator first, InputIterator last, Predicate predicate)
+ {
+ while((first != last) && !predicate(*first))
+ ++first;
+ return first;
+ }
+
+
+
+ /// find_first_of
+ ///
+ /// find_first_of is similar to find in that it performs linear search through
+ /// a range of ForwardIterators. The difference is that while find searches
+ /// for one particular value, find_first_of searches for any of several values.
+ /// Specifically, find_first_of searches for the first occurrance in the
+ /// range [first1, last1) of any of the elements in [first2, last2).
+ /// This function is thus similar to the strpbrk standard C string function.
+ /// If the sequence of elements to search for (i.e. first2-last2) is empty,
+ /// the find always fails and last1 will be returned.
+ ///
+ /// Effects: Finds an element that matches one of a set of values.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: *i == *j.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ find_first_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ for(ForwardIterator2 i = first2; i != last2; ++i)
+ {
+ if(*first1 == *i)
+ return first1;
+ }
+ }
+ return last1;
+ }
+
+
+ /// find_first_of
+ ///
+ /// find_first_of is similar to find in that it performs linear search through
+ /// a range of ForwardIterators. The difference is that while find searches
+ /// for one particular value, find_first_of searches for any of several values.
+ /// Specifically, find_first_of searches for the first occurrance in the
+ /// range [first1, last1) of any of the elements in [first2, last2).
+ /// This function is thus similar to the strpbrk standard C string function.
+ ///
+ /// Effects: Finds an element that matches one of a set of values.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) != false.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ find_first_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ for(ForwardIterator2 i = first2; i != last2; ++i)
+ {
+ if(predicate(*first1, *i))
+ return first1;
+ }
+ }
+ return last1;
+ }
+
+
+ /// find_first_not_of
+ ///
+ /// Searches through first range for the first element that does not belong the second input range.
+ /// This is very much like the C++ string find_first_not_of function.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: !(*i == *j).
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template<class ForwardIterator1, class ForwardIterator2>
+ ForwardIterator1
+ find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ for(; first1 != last1; ++first1)
+ {
+ if(eastl::find(first2, last2, *first1) == last2)
+ break;
+ }
+
+ return first1;
+ }
+
+
+
+ /// find_first_not_of
+ ///
+ /// Searches through first range for the first element that does not belong the second input range.
+ /// This is very much like the C++ string find_first_not_of function.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1) such that for some
+ /// integer j in the range [first2, last2) the following conditions hold: pred(*i, *j) == false.
+ /// Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * (last2 - first2)' applications of the
+ /// corresponding predicate.
+ ///
+ template<class ForwardIterator1, class ForwardIterator2, class BinaryPredicate>
+ inline ForwardIterator1
+ find_first_not_of(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::value_type value_type;
+
+ for(; first1 != last1; ++first1)
+ {
+ if(eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *first1)) == last2)
+ break;
+ }
+
+ return first1;
+ }
+
+
+ template<class BidirectionalIterator1, class ForwardIterator2>
+ inline BidirectionalIterator1
+ find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find(first2, last2, *it1) == last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find(first2, last2, *it1) != last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template<class BidirectionalIterator1, class ForwardIterator2, class BinaryPredicate>
+ BidirectionalIterator1
+ find_last_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::value_type value_type;
+
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) == last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) != last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template<class BidirectionalIterator1, class ForwardIterator2>
+ inline BidirectionalIterator1
+ find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find(first2, last2, *it1) != last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find( first2, last2, *it1) == last2))
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+ template<class BidirectionalIterator1, class ForwardIterator2, class BinaryPredicate>
+ inline BidirectionalIterator1
+ find_last_not_of(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator1>::value_type value_type;
+
+ if((first1 != last1) && (first2 != last2))
+ {
+ BidirectionalIterator1 it1(last1);
+
+ while((--it1 != first1) && (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1)) != last2))
+ ; // Do nothing
+
+ if((it1 != first1) || (eastl::find_if(first2, last2, eastl::bind1st<BinaryPredicate, value_type>(predicate, *it1))) != last2)
+ return it1;
+ }
+
+ return last1;
+ }
+
+
+
+
+ /// for_each
+ ///
+ /// Calls the Function function for each value in the range [first, last).
+ /// Function takes a single parameter: the current value.
+ ///
+ /// Effects: Applies function to the result of dereferencing every iterator in
+ /// the range [first, last), starting from first and proceeding to last 1.
+ ///
+ /// Returns: function.
+ ///
+ /// Complexity: Applies function exactly 'last - first' times.
+ ///
+ /// Note: If function returns a result, the result is ignored.
+ ///
+ template <typename InputIterator, typename Function>
+ inline Function
+ for_each(InputIterator first, InputIterator last, Function function)
+ {
+ for(; first != last; ++first)
+ function(*first);
+ return function;
+ }
+
+
+ /// generate
+ ///
+ /// Iterates the range of [first, last) and assigns to each element the
+ /// result of the function generator. Generator is a function which takes
+ /// no arguments.
+ ///
+ /// Complexity: Exactly 'last - first' invocations of generator and assignments.
+ ///
+ template <typename ForwardIterator, typename Generator>
+ inline void
+ generate(ForwardIterator first, ForwardIterator last, Generator generator)
+ {
+ for(; first != last; ++first) // We cannot call generate_n(first, last-first, generator)
+ *first = generator(); // because the 'last-first' might not be supported by the
+ } // given iterator.
+
+
+ /// generate_n
+ ///
+ /// Iterates an interator n times and assigns the result of generator
+ /// to each succeeding element. Generator is a function which takes
+ /// no arguments.
+ ///
+ /// Complexity: Exactly n invocations of generator and assignments.
+ ///
+ template <typename OutputIterator, typename Size, typename Generator>
+ inline OutputIterator
+ generate_n(OutputIterator first, Size n, Generator generator)
+ {
+ for(; n > 0; --n, ++first)
+ *first = generator();
+ return first;
+ }
+
+
+ /// transform
+ ///
+ /// Iterates the input range of [first, last) and the output iterator result
+ /// and assigns the result of unaryOperation(input) to result.
+ ///
+ /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1))
+ /// a new corresponding value equal to unaryOperation(*(first1 + (i - result)).
+ ///
+ /// Requires: op shall not have any side effects.
+ ///
+ /// Returns: result + (last1 - first1). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' applications of unaryOperation.
+ ///
+ /// Note: result may be equal to first.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename UnaryOperation>
+ inline OutputIterator
+ transform(InputIterator first, InputIterator last, OutputIterator result, UnaryOperation unaryOperation)
+ {
+ for(; first != last; ++first, ++result)
+ *result = unaryOperation(*first);
+ return result;
+ }
+
+
+ /// transform
+ ///
+ /// Iterates the input range of [first, last) and the output iterator result
+ /// and assigns the result of binaryOperation(input1, input2) to result.
+ ///
+ /// Effects: Assigns through every iterator i in the range [result, result + (last1 - first1))
+ /// a new corresponding value equal to binaryOperation(*(first1 + (i - result), *(first2 + (i - result))).
+ ///
+ /// Requires: binaryOperation shall not have any side effects.
+ ///
+ /// Returns: result + (last1 - first1). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last1 - first1' applications of binaryOperation.
+ ///
+ /// Note: result may be equal to first1 or first2.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename BinaryOperation>
+ inline OutputIterator
+ transform(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, OutputIterator result, BinaryOperation binaryOperation)
+ {
+ for(; first1 != last1; ++first1, ++first2, ++result)
+ *result = binaryOperation(*first1, *first2);
+ return result;
+ }
+
+
+ /// equal
+ ///
+ /// Returns: true if for every iterator i in the range [first1, last1) the
+ /// following corresponding conditions hold: predicate(*i, *(first2 + (i - first1))) != false.
+ /// Otherwise, returns false.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ /// To consider: Make specializations of this for scalar types and random access
+ /// iterators that uses memcmp or some trick memory comparison function.
+ /// We should verify that such a thing results in an improvement.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ inline bool
+ equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ {
+ if(!(*first1 == *first2)) // Note that we always express value comparisons in terms of < or ==.
+ return false;
+ }
+ return true;
+ }
+
+ /// equal
+ ///
+ /// Returns: true if for every iterator i in the range [first1, last1) the
+ /// following corresponding conditions hold: pred(*i, *(first2 + (i first1))) != false.
+ /// Otherwise, returns false.
+ ///
+ /// Complexity: At most last1 first1 applications of the corresponding predicate.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
+ inline bool
+ equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate predicate)
+ {
+ for(; first1 != last1; ++first1, ++first2)
+ {
+ if(!predicate(*first1, *first2))
+ return false;
+ }
+ return true;
+ }
+
+
+
+ /// identical
+ ///
+ /// Returns true if the two input ranges are equivalent.
+ /// There is a subtle difference between this algorithm and
+ /// the 'equal' algorithm. The equal algorithm assumes the
+ /// two ranges are of equal length. This algorithm efficiently
+ /// compares two ranges for both length equality and for
+ /// element equality. There is no other standard algorithm
+ /// that can do this.
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is of the same length as the sequence of
+ /// elements defined by the range of [first2, last2) and if
+ /// the elements in these ranges are equal as per the
+ /// equal algorithm.
+ ///
+ /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ bool identical(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2)
+ {
+ while((first1 != last1) && (first2 != last2) && (*first1 == *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+ return (first1 == last1) && (first2 == last2);
+ }
+
+
+ /// identical
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
+ bool identical(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2, BinaryPredicate predicate)
+ {
+ while((first1 != last1) && (first2 != last2) && predicate(*first1, *first2))
+ {
+ ++first1;
+ ++first2;
+ }
+ return (first1 == last1) && (first2 == last2);
+ }
+
+
+ /// lexicographical_compare
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is lexicographically less than the sequence of
+ /// elements defined by the range [first2, last2). Returns false otherwise.
+ ///
+ /// Complexity: At most 'min((last1 - first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ /// Note: If two sequences have the same number of elements and their
+ /// corresponding elements are equivalent, then neither sequence is
+ /// lexicographically less than the other. If one sequence is a prefix
+ /// of the other, then the shorter sequence is lexicographically less
+ /// than the longer sequence. Otherwise, the lexicographical comparison
+ /// of the sequences yields the same result as the comparison of the first
+ /// corresponding pair of elements that are not equivalent.
+ ///
+ template <typename InputIterator1, typename InputIterator2>
+ inline bool
+ lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
+ {
+ for(; (first1 != last1) && (first2 != last2); ++first1, ++first2)
+ {
+ if(*first1 < *first2)
+ return true;
+ if(*first2 < *first1)
+ return false;
+ }
+ return (first1 == last1) && (first2 != last2);
+ }
+
+ inline bool // Specialization for const char*.
+ lexicographical_compare(const char* first1, const char* last1, const char* first2, const char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for char*.
+ lexicographical_compare(char* first1, char* last1, char* first2, char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for const unsigned char*.
+ lexicographical_compare(const unsigned char* first1, const unsigned char* last1, const unsigned char* first2, const unsigned char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for unsigned char*.
+ lexicographical_compare(unsigned char* first1, unsigned char* last1, unsigned char* first2, unsigned char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for const signed char*.
+ lexicographical_compare(const signed char* first1, const signed char* last1, const signed char* first2, const signed char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ inline bool // Specialization for signed char*.
+ lexicographical_compare(signed char* first1, signed char* last1, signed char* first2, signed char* last2)
+ {
+ const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ return result ? (result < 0) : (n1 < n2);
+ }
+
+ #if defined(_MSC_VER) // If using the VC++ compiler (and thus bool is known to be a single byte)...
+ //Not sure if this is a good idea.
+ //inline bool // Specialization for const bool*.
+ //lexicographical_compare(const bool* first1, const bool* last1, const bool* first2, const bool* last2)
+ //{
+ // const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ // return result ? (result < 0) : (n1 < n2);
+ //}
+ //
+ //inline bool // Specialization for bool*.
+ //lexicographical_compare(bool* first1, bool* last1, bool* first2, bool* last2)
+ //{
+ // const ptrdiff_t n1(last1 - first1), n2(last2 - first2);
+ // const int result = memcmp(first1, first2, (size_t)eastl::min_alt(n1, n2));
+ // return result ? (result < 0) : (n1 < n2);
+ //}
+ #endif
+
+
+
+ /// lexicographical_compare
+ ///
+ /// Returns: true if the sequence of elements defined by the range
+ /// [first1, last1) is lexicographically less than the sequence of
+ /// elements defined by the range [first2, last2). Returns false otherwise.
+ ///
+ /// Complexity: At most 'min((last1 -first1), (last2 - first2))' applications
+ /// of the corresponding comparison.
+ ///
+ /// Note: If two sequences have the same number of elements and their
+ /// corresponding elements are equivalent, then neither sequence is
+ /// lexicographically less than the other. If one sequence is a prefix
+ /// of the other, then the shorter sequence is lexicographically less
+ /// than the longer sequence. Otherwise, the lexicographical comparison
+ /// of the sequences yields the same result as the comparison of the first
+ /// corresponding pair of elements that are not equivalent.
+ ///
+ /// Note: False is always returned if range 1 is exhausted before range 2.
+ /// The result of this is that you can't do a successful reverse compare
+ /// (e.g. use greater<> as the comparison instead of less<>) unless the
+ /// two sequences are of identical length. What you want to do is reverse
+ /// the order of the arguments in order to get the desired effect.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename Compare>
+ inline bool
+ lexicographical_compare(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2, Compare compare)
+ {
+ for(; (first1 != last1) && (first2 != last2); ++first1, ++first2)
+ {
+ if(compare(*first1, *first2))
+ return true;
+ if(compare(*first2, *first1))
+ return false;
+ }
+ return (first1 == last1) && (first2 != last2);
+ }
+
+
+
+ /// lower_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a value
+ /// greater than or equivalent to a specified value.
+ ///
+ /// Effects: Finds the first position into which value can be inserted without
+ /// violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: *j < value.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ /// Optimizations: We have no need to specialize this implementation for random
+ /// access iterators (e.g. contiguous array), as the code below will already
+ /// take advantage of them.
+ ///
+ template <typename ForwardIterator, typename T>
+ ForwardIterator
+ lower_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array.
+
+ while(d > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array.
+
+ if(*i < value)
+ {
+ // Disabled because std::lower_bound doesn't specify (23.3.3.3, p3) this can be done: EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else
+ d = d2;
+ }
+ return first;
+ }
+
+
+ /// lower_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a value
+ /// greater than or equivalent to a specified value. The input Compare function
+ /// takes two arguments and returns true if the first argument is less than
+ /// the second argument.
+ ///
+ /// Effects: Finds the first position into which value can be inserted without
+ /// violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: compare(*j, value) != false.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ /// Optimizations: We have no need to specialize this implementation for random
+ /// access iterators (e.g. contiguous array), as the code below will already
+ /// take advantage of them.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ ForwardIterator
+ lower_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last); // This will be efficient for a random access iterator such as an array.
+
+ while(d > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2); // This will be efficient for a random access iterator such as an array.
+
+ if(compare(*i, value))
+ {
+ // Disabled because std::lower_bound doesn't specify (23.3.3.1, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else
+ d = d2;
+ }
+ return first;
+ }
+
+
+
+ /// upper_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a
+ /// value that is greater than a specified value.
+ ///
+ /// Effects: Finds the furthermost position into which value can be inserted
+ /// without violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: !(value < *j).
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ ForwardIterator
+ upper_bound(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType len = eastl::distance(first, last);
+
+ while(len > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, len2);
+
+ if(!(value < *i)) // Note that we always express value comparisons in terms of < or ==.
+ {
+ first = ++i;
+ len -= len2 + 1;
+ }
+ else
+ {
+ // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane.
+ len = len2;
+ }
+ }
+ return first;
+ }
+
+
+ /// upper_bound
+ ///
+ /// Finds the position of the first element in a sorted range that has a
+ /// value that is greater than a specified value. The input Compare function
+ /// takes two arguments and returns true if the first argument is less than
+ /// the second argument.
+ ///
+ /// Effects: Finds the furthermost position into which value can be inserted
+ /// without violating the ordering.
+ ///
+ /// Returns: The furthermost iterator i in the range [first, last) such that
+ /// for any iterator j in the range [first, i) the following corresponding
+ /// condition holds: compare(value, *j) == false.
+ ///
+ /// Complexity: At most 'log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ ForwardIterator
+ upper_bound(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType len = eastl::distance(first, last);
+
+ while(len > 0)
+ {
+ ForwardIterator i = first;
+ DifferenceType len2 = len >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, len2);
+
+ if(!compare(value, *i))
+ {
+ first = ++i;
+ len -= len2 + 1;
+ }
+ else
+ {
+ // Disabled because std::upper_bound doesn't specify (23.3.3.2, p3) this can be done: EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane.
+ len = len2;
+ }
+ }
+ return first;
+ }
+
+
+ /// equal_range
+ ///
+ /// Effects: Finds the largest subrange [i, j) such that the value can be inserted
+ /// at any iterator k in it without violating the ordering. k satisfies the
+ /// corresponding conditions: !(*k < value) && !(value < *k).
+ ///
+ /// Complexity: At most '2 * log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ pair<ForwardIterator, ForwardIterator>
+ equal_range(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef pair<ForwardIterator, ForwardIterator> ResultType;
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last);
+
+ while(d > 0)
+ {
+ ForwardIterator i(first);
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2);
+
+ if(*i < value)
+ {
+ EASTL_VALIDATE_COMPARE(!(value < *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else if(value < *i)
+ {
+ EASTL_VALIDATE_COMPARE(!(*i < value)); // Validate that the compare function is sane.
+ d = d2;
+ last = i;
+ }
+ else
+ {
+ ForwardIterator j(i);
+
+ return ResultType(eastl::lower_bound(first, i, value),
+ eastl::upper_bound(++j, last, value));
+ }
+ }
+ return ResultType(first, first);
+ }
+
+
+ /// equal_range
+ ///
+ /// Effects: Finds the largest subrange [i, j) such that the value can be inserted
+ /// at any iterator k in it without violating the ordering. k satisfies the
+ /// corresponding conditions: comp(*k, value) == false && comp(value, *k) == false.
+ ///
+ /// Complexity: At most '2 * log(last - first) + 1' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ pair<ForwardIterator, ForwardIterator>
+ equal_range(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ typedef pair<ForwardIterator, ForwardIterator> ResultType;
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type DifferenceType;
+
+ DifferenceType d = eastl::distance(first, last);
+
+ while(d > 0)
+ {
+ ForwardIterator i(first);
+ DifferenceType d2 = d >> 1; // We use '>>1' here instead of '/2' because MSVC++ for some reason generates significantly worse code for '/2'. Go figure.
+
+ eastl::advance(i, d2);
+
+ if(compare(*i, value))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(value, *i)); // Validate that the compare function is sane.
+ first = ++i;
+ d -= d2 + 1;
+ }
+ else if(compare(value, *i))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*i, value)); // Validate that the compare function is sane.
+ d = d2;
+ last = i;
+ }
+ else
+ {
+ ForwardIterator j(i);
+
+ return ResultType(eastl::lower_bound(first, i, value, compare),
+ eastl::upper_bound(++j, last, value, compare));
+ }
+ }
+ return ResultType(first, first);
+ }
+
+
+ /// replace
+ ///
+ /// Effects: Substitutes elements referred by the iterator i in the range [first, last)
+ /// with new_value, when the following corresponding conditions hold: *i == old_value.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace is replace_if and not another variation of replace.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void
+ replace(ForwardIterator first, ForwardIterator last, const T& old_value, const T& new_value)
+ {
+ for(; first != last; ++first)
+ {
+ if(*first == old_value)
+ *first = new_value;
+ }
+ }
+
+
+ /// replace_if
+ ///
+ /// Effects: Substitutes elements referred by the iterator i in the range [first, last)
+ /// with new_value, when the following corresponding conditions hold: predicate(*i) != false.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_if is replace and not another variation of replace_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename ForwardIterator, typename Predicate, typename T>
+ inline void
+ replace_if(ForwardIterator first, ForwardIterator last, Predicate predicate, const T& new_value)
+ {
+ for(; first != last; ++first)
+ {
+ if(predicate(*first))
+ *first = new_value;
+ }
+ }
+
+
+ /// remove_copy
+ ///
+ /// Effects: Copies all the elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition does not hold:
+ /// *i == value.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename T>
+ inline OutputIterator
+ remove_copy(InputIterator first, InputIterator last, OutputIterator result, const T& value)
+ {
+ for(; first != last; ++first)
+ {
+ if(!(*first == value)) // Note that we always express value comparisons in terms of < or ==.
+ {
+ *result = *first;
+ ++result;
+ }
+ }
+ return result;
+ }
+
+
+ /// remove_copy_if
+ ///
+ /// Effects: Copies all the elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition does not hold:
+ /// predicate(*i) != false.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename Predicate>
+ inline OutputIterator
+ remove_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate)
+ {
+ for(; first != last; ++first)
+ {
+ if(!predicate(*first))
+ {
+ *result = *first;
+ ++result;
+ }
+ }
+ return result;
+ }
+
+
+ /// remove
+ ///
+ /// Effects: Eliminates all the elements referred to by iterator i in the
+ /// range [first, last) for which the following corresponding condition
+ /// holds: *i == value.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of remove is remove_if and not another variation of remove.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(remove(intArray.begin(), intArray.end(), 4), intArray.end()); // Erase all elements of value 4.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline ForwardIterator
+ remove(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ first = eastl::find(first, last, value);
+ if(first != last)
+ {
+ ForwardIterator i(first);
+ return eastl::remove_copy(++i, last, first, value);
+ }
+ return first;
+ }
+
+
+ /// remove_if
+ ///
+ /// Effects: Eliminates all the elements referred to by iterator i in the
+ /// range [first, last) for which the following corresponding condition
+ /// holds: predicate(*i) != false.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of remove_if is remove and not another variation of remove_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(remove(intArray.begin(), intArray.end(), bind2nd(less<int>(), (int)3)), intArray.end()); // Erase all elements less than 3.
+ ///
+ template <typename ForwardIterator, typename Predicate>
+ inline ForwardIterator
+ remove_if(ForwardIterator first, ForwardIterator last, Predicate predicate)
+ {
+ first = eastl::find_if(first, last, predicate);
+ if(first != last)
+ {
+ ForwardIterator i(first);
+ return eastl::remove_copy_if<ForwardIterator, ForwardIterator, Predicate>(++i, last, first, predicate);
+ }
+ return first;
+ }
+
+
+ /// replace_copy
+ ///
+ /// Effects: Assigns to every iterator i in the range [result, result + (last - first))
+ /// either new_value or *(first + (i - result)) depending on whether the following
+ /// corresponding conditions hold: *(first + (i - result)) == old_value.
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first)) shall not overlap.
+ ///
+ /// Returns: result + (last - first).
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_copy is replace_copy_if and not another variation of replace_copy.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename T>
+ inline OutputIterator
+ replace_copy(InputIterator first, InputIterator last, OutputIterator result, const T& old_value, const T& new_value)
+ {
+ for(; first != last; ++first, ++result)
+ *result = (*first == old_value) ? new_value : *first;
+ return result;
+ }
+
+
+ /// replace_copy_if
+ ///
+ /// Effects: Assigns to every iterator i in the range [result, result + (last - first))
+ /// either new_value or *(first + (i - result)) depending on whether the following
+ /// corresponding conditions hold: predicate(*(first + (i - result))) != false.
+ ///
+ /// Requires: The ranges [first, last) and [result, result+(lastfirst)) shall not overlap.
+ ///
+ /// Returns: result + (last - first).
+ ///
+ /// Complexity: Exactly 'last - first' applications of the corresponding predicate.
+ ///
+ /// Note: The predicate version of replace_copy_if is replace_copy and not another variation of replace_copy_if.
+ /// This is because both versions would have the same parameter count and there could be ambiguity.
+ ///
+ template <typename InputIterator, typename OutputIterator, typename Predicate, typename T>
+ inline OutputIterator
+ replace_copy_if(InputIterator first, InputIterator last, OutputIterator result, Predicate predicate, const T& new_value)
+ {
+ for(; first != last; ++first, ++result)
+ *result = predicate(*first) ? new_value : *first;
+ return result;
+ }
+
+
+
+
+ // reverse
+ //
+ // We provide helper functions which allow reverse to be implemented more
+ // efficiently for some types of iterators and types.
+ //
+ template <typename BidirectionalIterator>
+ inline void reverse_impl(BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ for(; (first != last) && (first != --last); ++first) // We are not allowed to use operator <, <=, >, >= with a
+ eastl::iter_swap(first, last); // generic (bidirectional or otherwise) iterator.
+ }
+
+ template <typename RandomAccessIterator>
+ inline void reverse_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ if ( first != last ) {
+ for(; first < --last; ++first) // With a random access iterator, we can use operator < to more efficiently implement
+ eastl::iter_swap(first, last); // this algorithm. A generic iterator doesn't necessarily have an operator < defined.
+ }
+ }
+
+ /// reverse
+ ///
+ /// Reverses the values within the range [first, last).
+ ///
+ /// Effects: For each nonnegative integer i <= (last - first) / 2,
+ /// applies swap to all pairs of iterators first + i, (last i) - 1.
+ ///
+ /// Complexity: Exactly '(last - first) / 2' swaps.
+ ///
+ template <typename BidirectionalIterator>
+ inline void reverse(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::iterator_category IC;
+ eastl::reverse_impl(first, last, IC());
+ }
+
+
+
+ /// reverse_copy
+ ///
+ /// Copies the range [first, last) in reverse order to the result.
+ ///
+ /// Effects: Copies the range [first, last) to the range
+ /// [result, result + (last - first)) such that for any nonnegative
+ /// integer i < (last - first) the following assignment takes place:
+ /// *(result + (last - first) - i) = *(first + i)
+ ///
+ /// Requires: The ranges [first, last) and [result, result + (last - first))
+ /// shall not overlap.
+ ///
+ /// Returns: result + (last - first). That is, returns the end of the output range.
+ ///
+ /// Complexity: Exactly 'last - first' assignments.
+ ///
+ template <typename BidirectionalIterator, typename OutputIterator>
+ inline OutputIterator
+ reverse_copy(BidirectionalIterator first, BidirectionalIterator last, OutputIterator result)
+ {
+ for(; first != last; ++result)
+ *result = *--last;
+ return result;
+ }
+
+
+
+ /// search
+ ///
+ /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2)
+ /// when compared element-by-element. It returns an iterator pointing to the beginning of that
+ /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like
+ /// the C strstr function, with the primary difference being that strstr uses 0-terminated strings
+ /// whereas search uses an end iterator to specify the end of a string.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for
+ /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds:
+ /// *(i + n) == *(first2 + n). Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ search(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ if(first2 != last2) // If there is anything to search for...
+ {
+ // We need to make a special case for a pattern of one element,
+ // as the logic below prevents one element patterns from working.
+ ForwardIterator2 temp2(first2);
+ ++temp2;
+
+ if(temp2 != last2) // If what we are searching for has a length > 1...
+ {
+ ForwardIterator1 cur1(first1);
+ ForwardIterator2 p2;
+
+ while(first1 != last1)
+ {
+ // The following loop is the equivalent of eastl::find(first1, last1, *first2)
+ while((first1 != last1) && !(*first1 == *first2))
+ ++first1;
+
+ if(first1 != last1)
+ {
+ p2 = temp2;
+ cur1 = first1;
+
+ if(++cur1 != last1)
+ {
+ while(*cur1 == *p2)
+ {
+ if(++p2 == last2)
+ return first1;
+
+ if(++cur1 == last1)
+ return last1;
+ }
+
+ ++first1;
+ continue;
+ }
+ }
+ return last1;
+ }
+
+ // Fall through to the end.
+ }
+ else
+ return eastl::find(first1, last1, *first2);
+ }
+
+ return first1;
+
+
+ #if 0
+ /* Another implementation which is a little more simpler but executes a little slower on average.
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type_1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::difference_type difference_type_2;
+
+ const difference_type_2 d2 = eastl::distance(first2, last2);
+
+ for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; ++first1, --d1)
+ {
+ ForwardIterator1 temp1 = first1;
+
+ for(ForwardIterator2 temp2 = first2; ; ++temp1, ++temp2)
+ {
+ if(temp2 == last2)
+ return first1;
+ if(!(*temp1 == *temp2))
+ break;
+ }
+ }
+
+ return last1;
+ */
+ #endif
+ }
+
+
+ /// search
+ ///
+ /// Search finds a subsequence within the range [first1, last1) that is identical to [first2, last2)
+ /// when compared element-by-element. It returns an iterator pointing to the beginning of that
+ /// subsequence, or else last1 if no such subsequence exists. As such, it is very much like
+ /// the C strstr function, with the only difference being that strstr uses 0-terminated strings
+ /// whereas search uses an end iterator to specify the end of a string.
+ ///
+ /// Returns: The first iterator i in the range [first1, last1 - (last2 - first2)) such that for
+ /// any nonnegative integer n less than 'last2 - first2' the following corresponding condition holds:
+ /// predicate(*(i + n), *(first2 + n)) != false. Returns last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last1 first1) * (last2 first2) applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ search(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::difference_type difference_type_1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::difference_type difference_type_2;
+
+ difference_type_2 d2 = eastl::distance(first2, last2);
+
+ if(d2 != 0)
+ {
+ ForwardIterator1 i(first1);
+ eastl::advance(i, d2);
+
+ for(difference_type_1 d1 = eastl::distance(first1, last1); d1 >= d2; --d1)
+ {
+ if(eastl::equal<ForwardIterator1, ForwardIterator2, BinaryPredicate>(first1, i, first2, predicate))
+ return first1;
+ if(d1 > d2) // To do: Find a way to make the algorithm more elegant.
+ {
+ ++first1;
+ ++i;
+ }
+ }
+ return last1;
+ }
+ return first1; // Just like with strstr, we return first1 if the match string is empty.
+ }
+
+
+
+ // search_n helper functions
+ //
+ template <typename ForwardIterator, typename Size, typename T>
+ ForwardIterator // Generic implementation.
+ search_n_impl(ForwardIterator first, ForwardIterator last, Size count, const T& value, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(count <= 0)
+ return first;
+
+ Size d1 = (Size)eastl::distance(first, last); // Should d1 be of type Size, ptrdiff_t, or iterator_traits<ForwardIterator>::difference_type?
+ // The problem with using iterator_traits<ForwardIterator>::difference_type is that
+ if(count > d1) // ForwardIterator may not be a true iterator but instead something like a pointer.
+ return last;
+
+ for(; d1 >= count; ++first, --d1)
+ {
+ ForwardIterator i(first);
+
+ for(Size n = 0; n < count; ++n, ++i, --d1)
+ {
+ if(!(*i == value)) // Note that we always express value comparisons in terms of < or ==.
+ goto not_found;
+ }
+ return first;
+
+ not_found:
+ first = i;
+ }
+ return last;
+ }
+
+ template <typename RandomAccessIterator, typename Size, typename T> inline
+ RandomAccessIterator // Random access iterator implementation. Much faster than generic implementation.
+ search_n_impl(RandomAccessIterator first, RandomAccessIterator last, Size count, const T& value, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ if(count <= 0)
+ return first;
+ else if(count == 1)
+ return find(first, last, value);
+ else if(last > first)
+ {
+ RandomAccessIterator lookAhead;
+ RandomAccessIterator backTrack;
+
+ Size skipOffset = (count - 1);
+ Size tailSize = (Size)(last - first);
+ Size remainder;
+ Size prevRemainder;
+
+ for(lookAhead = first + skipOffset; tailSize >= count; lookAhead += count)
+ {
+ tailSize -= count;
+
+ if(*lookAhead == value)
+ {
+ remainder = skipOffset;
+
+ for(backTrack = lookAhead - 1; *backTrack == value; --backTrack)
+ {
+ if(--remainder == 0)
+ return (lookAhead - skipOffset); // success
+ }
+
+ if(remainder <= tailSize)
+ {
+ prevRemainder = remainder;
+
+ while(*(++lookAhead) == value)
+ {
+ if(--remainder == 0)
+ return (backTrack + 1); // success
+ }
+ tailSize -= (prevRemainder - remainder);
+ }
+ else
+ return last; // failure
+ }
+
+ // lookAhead here is always pointing to the element of the last mismatch.
+ }
+ }
+
+ return last; // failure
+ }
+
+
+ /// search_n
+ ///
+ /// Returns: The first iterator i in the range [first, last count) such that
+ /// for any nonnegative integer n less than count the following corresponding
+ /// conditions hold: *(i + n) == value, pred(*(i + n),value) != false.
+ /// Returns last if no such iterator is found.
+ ///
+ /// Complexity: At most '(last1 - first1) * count' applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator, typename Size, typename T>
+ ForwardIterator
+ search_n(ForwardIterator first, ForwardIterator last, Size count, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ return eastl::search_n_impl(first, last, count, value, IC());
+ }
+
+
+ /// binary_search
+ ///
+ /// Returns: true if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ /// Note: The reason binary_search returns bool instead of an iterator is
+ /// that search_n, lower_bound, or equal_range already return an iterator.
+ /// However, there are arguments that binary_search should return an iterator.
+ /// Note that we provide binary_search_i (STL extension) to return an iterator.
+ ///
+ /// To use search_n to find an item, do this:
+ /// iterator i = search_n(begin, end, 1, value);
+ /// To use lower_bound to find an item, do this:
+ /// iterator i = lower_bound(begin, end, value);
+ /// if((i != last) && !(value < *i))
+ /// <use the iterator>
+ /// It turns out that the above lower_bound method is as fast as binary_search
+ /// would be if it returned an iterator.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline bool
+ binary_search(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T>(first, last, value));
+ return ((i != last) && !(value < *i)); // Note that we always express value comparisons in terms of < or ==.
+ }
+
+
+ /// binary_search
+ ///
+ /// Returns: true if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: compare(*i, value) == false &&
+ /// compare(value, *i) == false.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ /// Note: See comments above regarding the bool return value of binary_search.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ inline bool
+ binary_search(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T, Compare>(first, last, value, compare));
+ return ((i != last) && !compare(value, *i));
+ }
+
+
+ /// binary_search_i
+ ///
+ /// Returns: iterator if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ /// Returns last if the value is not found.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ template <typename ForwardIterator, typename T>
+ inline ForwardIterator
+ binary_search_i(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T>(first, last, value));
+ if((i != last) && !(value < *i)) // Note that we always express value comparisons in terms of < or ==.
+ return i;
+ return last;
+ }
+
+
+ /// binary_search_i
+ ///
+ /// Returns: iterator if there is an iterator i in the range [first last) that
+ /// satisfies the corresponding conditions: !(*i < value) && !(value < *i).
+ /// Returns last if the value is not found.
+ ///
+ /// Complexity: At most 'log(last - first) + 2' comparisons.
+ ///
+ template <typename ForwardIterator, typename T, typename Compare>
+ inline ForwardIterator
+ binary_search_i(ForwardIterator first, ForwardIterator last, const T& value, Compare compare)
+ {
+ // To do: This can be made slightly faster by not using lower_bound.
+ ForwardIterator i(eastl::lower_bound<ForwardIterator, T, Compare>(first, last, value, compare));
+ if((i != last) && !compare(value, *i))
+ return i;
+ return last;
+ }
+
+
+ /// unique
+ ///
+ /// Given a sorted range, this function removes duplicated items.
+ /// Note that if you have a container then you will probably want
+ /// to call erase on the container with the return value if your
+ /// goal is to remove the duplicated items from the container.
+ ///
+ /// Effects: Eliminates all but the first element from every consecutive
+ /// group of equal elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// *i == *(i - 1).
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: If the range (last - first) is not empty, exactly (last - first)
+ /// applications of the corresponding predicate, otherwise no applications of the predicate.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// ...
+ /// intArray.erase(unique(intArray.begin(), intArray.end()), intArray.end());
+ ///
+ template <typename ForwardIterator>
+ ForwardIterator unique(ForwardIterator first, ForwardIterator last)
+ {
+ first = eastl::adjacent_find<ForwardIterator>(first, last);
+
+ if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function.
+ {
+ ForwardIterator dest(first);
+
+ for(++first; first != last; ++first)
+ {
+ if(!(*dest == *first)) // Note that we always express value comparisons in terms of < or ==.
+ *++dest = *first;
+ }
+ return ++dest;
+ }
+ return last;
+ }
+
+
+ /// unique
+ ///
+ /// Given a sorted range, this function removes duplicated items.
+ /// Note that if you have a container then you will probably want
+ /// to call erase on the container with the return value if your
+ /// goal is to remove the duplicated items from the container.
+ ///
+ /// Effects: Eliminates all but the first element from every consecutive
+ /// group of equal elements referred to by the iterator i in the range
+ /// [first, last) for which the following corresponding condition holds:
+ /// predicate(*i, *(i - 1)) != false.
+ ///
+ /// Returns: The end of the resulting range.
+ ///
+ /// Complexity: If the range (last - first) is not empty, exactly (last - first)
+ /// applications of the corresponding predicate, otherwise no applications of the predicate.
+ ///
+ template <typename ForwardIterator, typename BinaryPredicate>
+ ForwardIterator unique(ForwardIterator first, ForwardIterator last, BinaryPredicate predicate)
+ {
+ first = eastl::adjacent_find<ForwardIterator, BinaryPredicate>(first, last, predicate);
+
+ if(first != last) // We expect that there are duplicated items, else the user wouldn't be calling this function.
+ {
+ ForwardIterator dest(first);
+
+ for(++first; first != last; ++first)
+ {
+ if(!predicate(*dest, *first))
+ *++dest = *first;
+ }
+ return ++dest;
+ }
+ return last;
+ }
+
+
+
+ // find_end
+ //
+ // We provide two versions here, one for a bidirectional iterators and one for
+ // regular forward iterators. Given that we are searching backward, it's a bit
+ // more efficient if we can use backwards iteration to implement our search,
+ // though this requires an iterator that can be reversed.
+ //
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ ForwardIterator1
+ find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty.
+ {
+ for(ForwardIterator1 result(last1); ; )
+ {
+ const ForwardIterator1 resultNext(eastl::search(first1, last1, first2, last2));
+
+ if(resultNext != last1) // If another sequence was found...
+ {
+ first1 = result = resultNext;
+ ++first1;
+ }
+ else
+ return result;
+ }
+ }
+ return last1;
+ }
+
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2>
+ BidirectionalIterator1
+ find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ BidirectionalIterator2 first2, BidirectionalIterator2 last2,
+ EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ typedef eastl::reverse_iterator<BidirectionalIterator1> reverse_iterator1;
+ typedef eastl::reverse_iterator<BidirectionalIterator2> reverse_iterator2;
+
+ reverse_iterator1 rresult(eastl::search(reverse_iterator1(last1), reverse_iterator1(first1),
+ reverse_iterator2(last2), reverse_iterator2(first2)));
+ if(rresult.base() != first1) // If we found something...
+ {
+ BidirectionalIterator1 result(rresult.base());
+
+ eastl::advance(result, -eastl::distance(first2, last2)); // We have an opportunity to optimize this, as the
+ return result; // search function already calculates this distance.
+ }
+ return last1;
+ }
+
+ /// find_end
+ ///
+ /// Finds the last occurrence of the second sequence in the first sequence.
+ /// As such, this function is much like the C string function strrstr and it
+ /// is also the same as a reversed version of 'search'. It is called find_end
+ /// instead of the possibly more consistent search_end simply because the C++
+ /// standard algorithms have such naming.
+ ///
+ /// Returns an iterator between first1 and last1 if the sequence is found.
+ /// returns last1 (the end of the first seqence) if the sequence is not found.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2>
+ inline ForwardIterator1
+ find_end(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::iterator_category IC1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::iterator_category IC2;
+
+ return eastl::find_end_impl(first1, last1, first2, last2, IC1(), IC2());
+ }
+
+
+
+
+ // To consider: Fold the predicate and non-predicate versions of
+ // this algorithm into a single function.
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ ForwardIterator1
+ find_end_impl(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate,
+ EASTL_ITC_NS::forward_iterator_tag, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ if(first2 != last2) // We have to do this check because the search algorithm below will return first1 (and not last1) if the first2/last2 range is empty.
+ {
+ for(ForwardIterator1 result = last1; ; )
+ {
+ const ForwardIterator1 resultNext(eastl::search<ForwardIterator1, ForwardIterator2, BinaryPredicate>(first1, last1, first2, last2, predicate));
+
+ if(resultNext != last1) // If another sequence was found...
+ {
+ first1 = result = resultNext;
+ ++first1;
+ }
+ else
+ return result;
+ }
+ }
+ return last1;
+ }
+
+ template <typename BidirectionalIterator1, typename BidirectionalIterator2, typename BinaryPredicate>
+ BidirectionalIterator1
+ find_end_impl(BidirectionalIterator1 first1, BidirectionalIterator1 last1,
+ BidirectionalIterator2 first2, BidirectionalIterator2 last2,
+ BinaryPredicate predicate,
+ EASTL_ITC_NS::bidirectional_iterator_tag, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ typedef eastl::reverse_iterator<BidirectionalIterator1> reverse_iterator1;
+ typedef eastl::reverse_iterator<BidirectionalIterator2> reverse_iterator2;
+
+ reverse_iterator1 rresult(eastl::search<reverse_iterator1, reverse_iterator2, BinaryPredicate>
+ (reverse_iterator1(last1), reverse_iterator1(first1),
+ reverse_iterator2(last2), reverse_iterator2(first2),
+ predicate));
+ if(rresult.base() != first1) // If we found something...
+ {
+ BidirectionalIterator1 result(rresult.base());
+ eastl::advance(result, -eastl::distance(first2, last2));
+ return result;
+ }
+ return last1;
+ }
+
+
+ /// find_end
+ ///
+ /// Effects: Finds a subsequence of equal values in a sequence.
+ ///
+ /// Returns: The last iterator i in the range [first1, last1 - (last2 - first2))
+ /// such that for any nonnegative integer n < (last2 - first2), the following
+ /// corresponding conditions hold: pred(*(i+n),*(first2+n)) != false. Returns
+ /// last1 if no such iterator is found.
+ ///
+ /// Complexity: At most (last2 - first2) * (last1 - first1 - (last2 - first2) + 1)
+ /// applications of the corresponding predicate.
+ ///
+ template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
+ inline ForwardIterator1
+ find_end(ForwardIterator1 first1, ForwardIterator1 last1,
+ ForwardIterator2 first2, ForwardIterator2 last2,
+ BinaryPredicate predicate)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator1>::iterator_category IC1;
+ typedef typename eastl::iterator_traits<ForwardIterator2>::iterator_category IC2;
+
+ return eastl::find_end_impl<ForwardIterator1, ForwardIterator2, BinaryPredicate>
+ (first1, last1, first2, last2, predicate, IC1(), IC2());
+ }
+
+
+
+ /// set_difference
+ ///
+ /// set_difference iterates over both input ranges and copies elements present
+ /// in the first range but not the second to the output range.
+ ///
+ /// Effects: Copies the elements of the range [first1, last1) which are not
+ /// present in the range [first2, last2) to the range beginning at result.
+ /// The elements in the constructed range are sorted.
+ ///
+ /// Requires: The input ranges must be sorted.
+ /// Requires: The output range shall not overlap with either of the original ranges.
+ ///
+ /// Returns: The end of the output range.
+ ///
+ /// Complexity: At most (2 * ((last1 - first1) + (last2 - first2)) - 1) comparisons.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(*first1 < *first2)
+ {
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(*first2 < *first1)
+ ++first2;
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first1, last1, result);
+ }
+
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator set_difference(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first1, *first2))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first2, *first1)); // Validate that the compare function is sane.
+ *result = *first1;
+ ++first1;
+ ++result;
+ }
+ else if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ ++first2;
+ }
+ else
+ {
+ ++first1;
+ ++first2;
+ }
+ }
+
+ return eastl::copy(first1, last1, result);
+ }
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/allocator.h b/UnknownVersion/include/EASTL/allocator.h
new file mode 100644
index 0000000..06978fb
--- /dev/null
+++ b/UnknownVersion/include/EASTL/allocator.h
@@ -0,0 +1,344 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/allocator.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ALLOCATOR_H
+#define EASTL_ALLOCATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <stddef.h>
+
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable: 4189) // local variable is initialized but not referenced
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_ALLOCATOR_DEFAULT_NAME
+ ///
+ /// Defines a default allocator name in the absence of a user-provided name.
+ ///
+#ifndef EASTL_ALLOCATOR_DEFAULT_NAME
+# define EASTL_ALLOCATOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX // Unless the user overrides something, this is "EASTL".
+#endif
+
+
+ /// alloc_flags
+ ///
+ /// Defines allocation flags.
+ ///
+ enum alloc_flags
+ {
+ MEM_TEMP = 0, // Low memory, not necessarily actually temporary.
+ MEM_PERM = 1 // High memory, for things that won't be unloaded.
+ };
+
+
+ /// allocator
+ ///
+ /// In this allocator class, note that it is not templated on any type and
+ /// instead it simply allocates blocks of memory much like the C malloc and
+ /// free functions. It can be thought of as similar to C++ std::allocator<char>.
+ /// The flags parameter has meaning that is specific to the allocation
+ ///
+ class EASTL_API allocator
+ {
+ public:
+ typedef eastl_size_t size_type;
+
+ EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME));
+ allocator(const allocator& x);
+ allocator(const allocator& x, const char* pName);
+
+ allocator& operator=(const allocator& x);
+
+ void* allocate(size_t n, const char* inFile, int inLine, int flags = 0);
+ void* allocate(size_t n, const char* inFile, int inLine, size_t alignment, size_t offset, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ const char* get_name() const;
+ void set_name(const char* pName);
+
+ protected:
+#if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+#endif
+ };
+
+ bool operator==(const allocator& a, const allocator& b);
+ bool operator!=(const allocator& a, const allocator& b);
+
+ EASTL_API allocator* GetDefaultAllocator();
+ EASTL_API allocator* SetDefaultAllocator(allocator* pAllocator);
+
+
+
+ /// get_default_allocator
+ ///
+ /// This templated function allows the user to implement a default allocator
+ /// retrieval function that any part of EASTL can use. EASTL containers take
+ /// an Allocator parameter which identifies an Allocator class to use. But
+ /// different kinds of allocators have different mechanisms for retrieving
+ /// a default allocator instance, and some don't even intrinsically support
+ /// such functionality. The user can override this get_default_allocator
+ /// function in order to provide the glue between EASTL and whatever their
+ /// system's default allocator happens to be.
+ ///
+ /// Example usage:
+ /// MyAllocatorType* gpSystemAllocator;
+ ///
+ /// MyAllocatorType* get_default_allocator(const MyAllocatorType*)
+ /// { return gpSystemAllocator; }
+ ///
+ template <typename Allocator>
+ inline Allocator* get_default_allocator(const Allocator*)
+ {
+ return NULL; // By default we return NULL; the user must make specialization of this function in order to provide their own implementation.
+ }
+
+ inline EASTLAllocatorType* get_default_allocator(const EASTLAllocatorType*)
+ {
+ return EASTLAllocatorDefault(); // For the built-in allocator EASTLAllocatorType, we happen to already have a function for returning the default allocator instance, so we provide it.
+ }
+
+
+ /// default_allocfreemethod
+ ///
+ /// Implements a default allocfreemethod which uses the default global allocator.
+ /// This version supports only default alignment.
+ ///
+ inline void* default_allocfreemethod(size_t n, void* pBuffer, void* /*pContext*/)
+ {
+ EASTLAllocatorType* const pAllocator = EASTLAllocatorDefault();
+
+ if(pBuffer) // If freeing...
+ {
+ EASTLFree(*pAllocator, pBuffer, n);
+ return NULL; // The return value is meaningless for the free.
+ }
+ else // allocating
+ return EASTLAlloc(*pAllocator, n);
+ }
+
+
+ /// allocate_memory
+ ///
+ /// This is a memory allocation dispatching function.
+ /// To do: Make aligned and unaligned specializations.
+ /// Note that to do this we will need to use a class with a static
+ /// function instead of a standalone function like below.
+ ///
+ template <typename Allocator>
+ void* allocate_memory(Allocator& a, size_t n, size_t alignment, size_t alignmentOffset)
+ {
+ if(alignment <= 8)
+ return EASTLAlloc(a, n);
+ return EASTLAllocAligned(a, n, alignment, alignmentOffset);
+ }
+
+} // namespace eastl
+
+
+
+
+
+#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere...
+
+# ifdef _MSC_VER
+# pragma warning(push, 0)
+# include <new>
+# pragma warning(pop)
+# else
+# include <new>
+# endif
+
+# if !EASTL_DLL // If building a regular library and not building EASTL as a DLL...
+ // It is expected that the application define the following
+ // versions of operator new for the application. Either that or the
+ // user needs to override the implementation of the allocator class.
+ void* operator new[](size_t size, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+ void* operator new[](size_t size, size_t alignment, size_t alignmentOffset, const char* pName, int flags, unsigned debugFlags, const char* file, int line);
+# endif
+
+ namespace eastl
+ {
+ inline allocator::allocator(const char* EASTL_NAME(pName))
+ {
+# if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+# endif
+ }
+
+
+ inline allocator::allocator(const allocator& EASTL_NAME(alloc))
+ {
+# if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+# endif
+ }
+
+
+ inline allocator::allocator(const allocator&, const char* EASTL_NAME(pName))
+ {
+# if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+# endif
+ }
+
+
+ inline allocator& allocator::operator=(const allocator& EASTL_NAME(alloc))
+ {
+# if EASTL_NAME_ENABLED
+ mpName = alloc.mpName;
+# endif
+ return *this;
+ }
+
+
+ inline const char* allocator::get_name() const
+ {
+# if EASTL_NAME_ENABLED
+ return mpName;
+# else
+ return EASTL_ALLOCATOR_DEFAULT_NAME;
+# endif
+ }
+
+
+ inline void allocator::set_name(const char* EASTL_NAME(pName))
+ {
+# if EASTL_NAME_ENABLED
+ mpName = pName;
+# endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, const char* inFile, int inLine, int flags)
+ {
+# if EASTL_NAME_ENABLED
+# define pName mpName
+# else
+# define pName EASTL_ALLOCATOR_DEFAULT_NAME
+# endif
+
+# if EASTL_DLL
+ // We currently have no support for implementing flags when
+ // using the C runtime library operator new function. The user
+ // can use SetDefaultAllocator to override the default allocator.
+ (void)flags;
+ return ::new char[n];
+# elif (EASTL_DEBUGPARAMS_LEVEL <= 0)
+ return ::new((char*)0, flags, 0, inFile, inLine) char[n];
+# elif (EASTL_DEBUGPARAMS_LEVEL == 1)
+ return ::new( pName, flags, 0, inFile, inLine) char[n];
+# else
+ return ::new( pName, flags, 0, inFile, inLine) char[n];
+# endif
+ }
+
+
+ inline void* allocator::allocate(size_t n, const char* inFile, int inLine, size_t alignment, size_t offset, int flags)
+ {
+# if EASTL_DLL
+ // We have a problem here. We cannot support alignment, as we don't have access
+ // to a memory allocator that can provide aligned memory. The C++ standard doesn't
+ // recognize such a thing. The user will need to call SetDefaultAllocator to
+ // provide an alloator which supports alignment.
+ EASTL_ASSERT(alignment <= 8); // 8 (sizeof(double)) is the standard alignment returned by operator new.
+ (void)alignment; (void)offset; (void)flags;
+ return new char[n];
+# elif (EASTL_DEBUGPARAMS_LEVEL <= 0)
+ return ::new(alignment, offset, (char*)0, flags, 0, inFile, inLine) char[n];
+# elif (EASTL_DEBUGPARAMS_LEVEL == 1)
+ return ::new(alignment, offset, pName, flags, 0, inFile, inLine) char[n];
+# else
+ return ::new(alignment, offset, pName, flags, 0, inFile, inLine) char[n];
+# endif
+
+# undef pName // See above for the definition of this.
+ }
+
+
+ inline void allocator::deallocate(void* p, size_t)
+ {
+ delete[] (char*)p;
+ }
+
+
+ inline bool operator==(const allocator&, const allocator&)
+ {
+ return true; // All allocators are considered equal, as they merely use global new/delete.
+ }
+
+
+ inline bool operator!=(const allocator&, const allocator&)
+ {
+ return false; // All allocators are considered equal, as they merely use global new/delete.
+ }
+
+
+ } // namespace eastl
+
+
+#endif // EASTL_USER_DEFINED_ALLOCATOR
+
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/bitset.h b/UnknownVersion/include/EASTL/bitset.h
new file mode 100644
index 0000000..f099325
--- /dev/null
+++ b/UnknownVersion/include/EASTL/bitset.h
@@ -0,0 +1,1777 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/bitset.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a bitset much like the C++ std::bitset class.
+// The primary distinctions between this list and std::bitset are:
+// - bitset is more efficient than some other std::bitset implementations,
+// - bitset is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - bitset is savvy to environments in which 'unsigned long' is not the
+// most efficient integral data type. std::bitset implementations use
+// unsigned long, even if it is an inefficient integer type.
+// - bitset removes as much function calls as practical, in order to allow
+// debug builds to run closer in speed and code footprint to release builds.
+// - bitset doesn't support string functionality. We can add this if
+// it is deemed useful.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_BITSET_H
+#define EASTL_BITSET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/algorithm.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stddef.h>
+#ifdef __MWERKS__
+ #include <../Include/string.h> // Force the compiler to use the std lib header.
+#else
+ #include <string.h>
+#endif
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+#if EASTL_EXCEPTIONS_ENABLED
+ #ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #endif
+ #include <stdexcept> // std::out_of_range, std::length_error.
+ #ifdef _MSC_VER
+ #pragma warning(pop)
+ #endif
+#endif
+
+#if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable: 4127) // Conditional expression is constant
+#elif defined(__SNC__)
+ #pragma control %push diag
+ #pragma diag_suppress=187 // Pointless comparison of unsigned integer with zero
+#endif
+
+
+namespace eastl
+{
+
+ /// BitsetWordType
+ ///
+ /// Defines the integral data type used by bitset.
+ /// The C++ standard specifies that the std::bitset word type be unsigned long,
+ /// but that isn't necessarily the most efficient data type for the given platform.
+ /// We can follow the standard and be potentially less efficient or we can do what
+ /// is more efficient but less like the C++ std::bitset.
+ ///
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ typedef uint32_t BitsetWordType;
+ const uint32_t kBitsPerWord = 32;
+ const uint32_t kBitsPerWordMask = 31;
+ const uint32_t kBitsPerWordShift = 5;
+ #else
+ typedef uint64_t BitsetWordType;
+ const uint32_t kBitsPerWord = 64;
+ const uint32_t kBitsPerWordMask = 63;
+ const uint32_t kBitsPerWordShift = 6;
+ #endif
+
+
+
+ /// BITSET_WORD_COUNT
+ ///
+ /// Defines the number of words we use, based on the number of bits.
+ ///
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x can't handle the simpler declaration below.
+ #define BITSET_WORD_COUNT(nBitCount) (N == 0 ? 1 : ((N - 1) / (8 * sizeof(BitsetWordType)) + 1))
+ #else
+ #define BITSET_WORD_COUNT(nBitCount) ((N - 1) / (8 * sizeof(BitsetWordType)) + 1)
+ #endif
+
+
+
+ /// BitsetBase
+ ///
+ /// This is a default implementation that works for any number of words.
+ ///
+ template <size_t NW> // Templated on the number of words used to hold the bitset.
+ struct BitsetBase
+ {
+ typedef BitsetWordType word_type;
+ typedef BitsetBase<NW> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ public:
+ word_type mWord[NW];
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value);
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ unsigned long to_ulong() const;
+
+ word_type& DoGetWord(size_type i);
+ word_type DoGetWord(size_type i) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const;
+ size_type DoFindPrev(size_type last_find) const;
+
+ }; // class BitsetBase
+
+
+
+ /// BitsetBase<1>
+ ///
+ /// This is a specialization for a bitset that fits within one word.
+ ///
+ template <>
+ struct BitsetBase<1>
+ {
+ typedef BitsetWordType word_type;
+ typedef BitsetBase<1> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ public:
+ word_type mWord[1]; // Defined as an array of 1 so that bitset can treat this BitsetBase like others.
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value);
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ unsigned long to_ulong() const;
+
+ word_type& DoGetWord(size_type);
+ word_type DoGetWord(size_type) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const;
+ size_type DoFindPrev(size_type last_find) const;
+
+ }; // BitsetBase<1>
+
+
+
+ /// BitsetBase<2>
+ ///
+ /// This is a specialization for a bitset that fits within two words.
+ /// The difference here is that we avoid branching (ifs and loops).
+ ///
+ template <>
+ struct BitsetBase<2>
+ {
+ typedef BitsetWordType word_type;
+ typedef BitsetBase<2> this_type;
+ #if EASTL_BITSET_SIZE_T
+ typedef size_t size_type;
+ #else
+ typedef eastl_size_t size_type;
+ #endif
+
+ public:
+ word_type mWord[2];
+
+ public:
+ BitsetBase();
+ BitsetBase(uint32_t value);
+
+ void operator&=(const this_type& x);
+ void operator|=(const this_type& x);
+ void operator^=(const this_type& x);
+
+ void operator<<=(size_type n);
+ void operator>>=(size_type n);
+
+ void flip();
+ void set();
+ void set(size_type i, bool value);
+ void reset();
+
+ bool operator==(const this_type& x) const;
+
+ bool any() const;
+ size_type count() const;
+
+ unsigned long to_ulong() const;
+
+ word_type& DoGetWord(size_type);
+ word_type DoGetWord(size_type) const;
+
+ size_type DoFindFirst() const;
+ size_type DoFindNext(size_type last_find) const;
+
+ size_type DoFindLast() const;
+ size_type DoFindPrev(size_type last_find) const;
+
+ }; // BitsetBase<2>
+
+
+
+
+ /// bitset
+ ///
+ /// Implements a bitset much like the C++ std::bitset.
+ ///
+ /// As of this writing we don't have an implementation of bitset<0>,
+ /// as it is deemed an academic exercise that nobody should actually
+ /// use and it would increase code space.
+ ///
+ /// Note: bitset shifts of a magnitude >= sizeof(BitsetWordType)
+ /// (e.g. shift of 32 on a 32 bit system) are not guaranteed to work
+ /// properly. This is because some systems (e.g. Intel x86) take the
+ /// shift value and mod it to the word size and thus a shift of 32
+ /// can become a shift of 0 on a 32 bit system. We don't attempt to
+ /// resolve this behaviour in this class because doing so would lead
+ /// to a less efficient implementation and the vast majority of the
+ /// time the user doesn't do shifts of >= word size. You can work
+ /// around this by implementing a shift of 32 as two shifts of 16.
+ ///
+ template <size_t N>
+ class bitset : private BitsetBase<BITSET_WORD_COUNT(N)>
+ {
+ public:
+ typedef BitsetBase<BITSET_WORD_COUNT(N)> base_type;
+ typedef bitset<N> this_type;
+ typedef BitsetWordType word_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kSize = N,
+ kWordCount = BITSET_WORD_COUNT(N),
+ kNW = kWordCount // This name is deprecated.
+ };
+
+ using base_type::mWord;
+ using base_type::DoGetWord;
+ using base_type::DoFindFirst;
+ using base_type::DoFindNext;
+ using base_type::DoFindLast;
+ using base_type::DoFindPrev;
+
+ public:
+ /// reference
+ ///
+ /// A reference is a reference to a specific bit in the bitset.
+ /// The C++ standard specifies that this be a nested class,
+ /// though it is not clear if a non-nested reference implementation
+ /// would be non-conforming.
+ ///
+ class reference
+ {
+ protected:
+ friend class bitset;
+
+ word_type* mpBitWord;
+ size_type mnBitIndex;
+
+ reference(){} // The C++ standard specifies that this is private.
+
+ public:
+ reference(const bitset& x, size_type i);
+
+ reference& operator=(bool value);
+ reference& operator=(const reference& x);
+
+ bool operator~() const;
+ operator bool() const // Defined inline because CodeWarrior fails to be able to compile it outside.
+ { return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) != 0; }
+
+ reference& flip();
+ };
+
+ public:
+ friend class reference;
+
+ bitset();
+ bitset(uint32_t value);
+
+ // We don't define copy constructor and operator= because
+ // the compiler-generated versions will suffice.
+
+ this_type& operator&=(const this_type& x);
+ this_type& operator|=(const this_type& x);
+ this_type& operator^=(const this_type& x);
+
+ this_type& operator<<=(size_type n);
+ this_type& operator>>=(size_type n);
+
+ this_type& set();
+ this_type& set(size_type i, bool value = true);
+
+ this_type& reset();
+ this_type& reset(size_type i);
+
+ this_type& flip();
+ this_type& flip(size_type i);
+ this_type operator~() const;
+
+ reference operator[](size_type i);
+ bool operator[](size_type i) const;
+
+ const word_type* data() const;
+ word_type* data();
+
+ unsigned long to_ulong() const;
+
+ size_type count() const;
+ size_type size() const;
+
+ bool operator==(const this_type& x) const;
+ bool operator!=(const this_type& x) const;
+
+ bool test(size_type i) const;
+ bool any() const;
+ bool none() const;
+
+ this_type operator<<(size_type n) const;
+ this_type operator>>(size_type n) const;
+
+ // Finds the index of the first "on" bit, returns kSize if none are set.
+ size_type find_first() const;
+
+ // Finds the index of the next "on" bit after last_find, returns kSize if none are set.
+ size_type find_next(size_type last_find) const;
+
+ // Finds the index of the last "on" bit, returns kSize if none are set.
+ size_type find_last() const;
+
+ // Finds the index of the last "on" bit before last_find, returns kSize if none are set.
+ size_type find_prev(size_type last_find) const;
+
+ }; // bitset
+
+
+
+
+
+
+
+ /// BitsetCountBits
+ ///
+ /// This is a fast trick way to count bits without branches nor memory accesses.
+ ///
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ inline uint32_t BitsetCountBits(uint32_t x)
+ {
+ x = x - ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ return (uint32_t)((x * 0x01010101) >> 24);
+ }
+ #else
+ inline uint32_t BitsetCountBits(uint64_t x)
+ {
+ // GCC 3.x's implementation of UINT64_C is broken and fails to deal with
+ // the code below correctly. So we make a workaround for it. Earlier and
+ // later versions of GCC don't have this bug.
+ #if defined(__GNUC__) && (__GNUC__ == 3)
+ x = x - ((x >> 1) & 0x5555555555555555ULL);
+ x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
+ x = (x + (x >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+ return (uint32_t)((x * 0x0101010101010101ULL) >> 56);
+ #else
+ x = x - ((x >> 1) & UINT64_C(0x5555555555555555));
+ x = (x & UINT64_C(0x3333333333333333)) + ((x >> 2) & UINT64_C(0x3333333333333333));
+ x = (x + (x >> 4)) & UINT64_C(0x0F0F0F0F0F0F0F0F);
+ return (uint32_t)((x * UINT64_C(0x0101010101010101)) >> 56);
+ #endif
+ }
+ #endif
+
+ // const static char kBitsPerUint16[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 };
+ #define EASTL_BITSET_COUNT_STRING "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4"
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase
+ //
+ // We tried two forms of array access here:
+ // for(word_type *pWord(mWord), *pWordEnd(mWord + NW); pWord < pWordEnd; ++pWord)
+ // *pWord = ...
+ // and
+ // for(size_t i = 0; i < NW; i++)
+ // mWord[i] = ...
+ //
+ // For our tests (~NW < 16), the latter (using []) access resulted in faster code.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t NW>
+ inline BitsetBase<NW>::BitsetBase()
+ {
+ reset();
+ }
+
+
+ template <size_t NW>
+ inline BitsetBase<NW>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(BitsetWordType).
+ EASTL_CT_ASSERT(sizeof(value) <= sizeof(BitsetWordType));
+
+ reset();
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::operator&=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] &= x.mWord[i];
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::operator|=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] |= x.mWord[i];
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::operator^=(const this_type& x)
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] ^= x.mWord[i];
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::operator<<=(size_type n)
+ {
+ const size_type nWordShift = (size_type)(n >> kBitsPerWordShift);
+
+ if(nWordShift)
+ {
+ for(int i = (int)(NW - 1); i >= 0; --i)
+ mWord[i] = (nWordShift <= (size_type)i) ? mWord[i - nWordShift] : (word_type)0;
+ }
+
+ if(n &= kBitsPerWordMask)
+ {
+ for(size_t i = (NW - 1); i > 0; --i)
+ mWord[i] = (word_type)((mWord[i] << n) | (mWord[i - 1] >> (kBitsPerWord - n)));
+ mWord[0] <<= n;
+ }
+
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::operator>>=(size_type n)
+ {
+ const size_type nWordShift = (size_type)(n >> kBitsPerWordShift);
+
+ if(nWordShift)
+ {
+ for(size_t i = 0; i < NW; ++i)
+ mWord[i] = ((nWordShift < (NW - i)) ? mWord[i + nWordShift] : (word_type)0);
+ }
+
+ if(n &= kBitsPerWordMask)
+ {
+ for(size_t i = 0; i < (NW - 1); ++i)
+ mWord[i] = (word_type)((mWord[i] >> n) | (mWord[i + 1] << (kBitsPerWord - n)));
+ mWord[NW - 1] >>= n;
+ }
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::flip()
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = ~mWord[i];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::set()
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = ~static_cast<word_type>(0);
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[i >> kBitsPerWordShift] |= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ mWord[i >> kBitsPerWordShift] &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ }
+
+
+ template <size_t NW>
+ inline void BitsetBase<NW>::reset()
+ {
+ if(NW > 16) // This is a constant expression and should be optimized away.
+ {
+ // This will be fastest if compiler intrinsic function optimizations are enabled.
+ memset(mWord, 0, sizeof(mWord));
+ }
+ else
+ {
+ for(size_t i = 0; i < NW; i++)
+ mWord[i] = 0;
+ }
+ }
+
+
+ template <size_t NW>
+ inline bool BitsetBase<NW>::operator==(const this_type& x) const
+ {
+ for(size_t i = 0; i < NW; i++)
+ {
+ if(mWord[i] != x.mWord[i])
+ return false;
+ }
+ return true;
+ }
+
+
+ template <size_t NW>
+ inline bool BitsetBase<NW>::any() const
+ {
+ for(size_t i = 0; i < NW; i++)
+ {
+ if(mWord[i])
+ return true;
+ }
+ return false;
+ }
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::size_type
+ BitsetBase<NW>::count() const
+ {
+ size_type n = 0;
+
+ for(size_t i = 0; i < NW; i++)
+ {
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(__SNC__) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ n += (size_type)__builtin_popcountl(mWord[i]);
+ #else
+ n += (size_type)__builtin_popcountll(mWord[i]);
+ #endif
+ #elif defined(__GNUC__) && (__GNUC__ < 3)
+ n += BitsetCountBits(mWord[i]); // GCC 2.x compiler inexplicably blows up on the code below.
+ #else
+ for(word_type w = mWord[i]; w; w >>= 4)
+ n += EASTL_BITSET_COUNT_STRING[w & 0xF];
+
+ // Version which seems to run slower in benchmarks:
+ // n += BitsetCountBits(mWord[i]);
+ #endif
+
+ }
+ return n;
+ }
+
+
+ template <size_t NW>
+ inline unsigned long BitsetBase<NW>::to_ulong() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ for(size_t i = 1; i < NW; ++i)
+ {
+ if(mWord[i])
+ throw overflow_error("BitsetBase::to_ulong");
+ }
+ #endif
+ return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long)
+ }
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::word_type&
+ BitsetBase<NW>::DoGetWord(size_type i)
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::word_type
+ BitsetBase<NW>::DoGetWord(size_type i) const
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ inline uint32_t GetFirstBit(uint32_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; }
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (n - ((uint32_t)x & 1));
+ }
+
+ return 32;
+ }
+ #else
+ inline uint32_t GetFirstBit(uint64_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 1;
+
+ if((x & 0xFFFFFFFF) == 0) { n += 32; x >>= 32; }
+ if((x & 0x0000FFFF) == 0) { n += 16; x >>= 16; }
+ if((x & 0x000000FF) == 0) { n += 8; x >>= 8; }
+ if((x & 0x0000000F) == 0) { n += 4; x >>= 4; }
+ if((x & 0x00000003) == 0) { n += 2; x >>= 2; }
+
+ return (n - ((uint32_t)x & 1));
+ }
+
+ return 64;
+ }
+ #endif
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::size_type
+ BitsetBase<NW>::DoFindFirst() const
+ {
+ for(size_type word_index = 0; word_index < NW; ++word_index)
+ {
+ const size_type fbiw = GetFirstBit(mWord[word_index]);
+
+ if(fbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + fbiw;
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::size_type
+ BitsetBase<NW>::DoFindNext(size_type last_find) const
+ {
+ // Start looking from the next bit.
+ ++last_find;
+
+ // Set initial state based on last find.
+ size_type word_index = static_cast<size_type>(last_find >> kBitsPerWordShift);
+ size_type bit_index = static_cast<size_type>(last_find & kBitsPerWordMask);
+
+ // To do: There probably is a more elegant way to write looping below.
+ if(word_index < NW)
+ {
+ // Mask off previous bits of the word so our search becomes a "find first".
+ word_type this_word = mWord[word_index] & (~static_cast<word_type>(0) << bit_index);
+
+ for(;;)
+ {
+ const size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + fbiw;
+
+ if(++word_index < NW)
+ this_word = mWord[word_index];
+ else
+ break;
+ }
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ inline uint32_t GetLastBit(uint32_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 32;
+ }
+ #else
+ inline uint32_t GetLastBit(uint64_t x)
+ {
+ if(x)
+ {
+ uint32_t n = 0;
+
+ if(x & UINT64_C(0xFFFFFFFF00000000)) { n += 32; x >>= 32; }
+ if(x & 0xFFFF0000) { n += 16; x >>= 16; }
+ if(x & 0xFFFFFF00) { n += 8; x >>= 8; }
+ if(x & 0xFFFFFFF0) { n += 4; x >>= 4; }
+ if(x & 0xFFFFFFFC) { n += 2; x >>= 2; }
+ if(x & 0xFFFFFFFE) { n += 1; }
+
+ return n;
+ }
+
+ return 64;
+ }
+ #endif
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::size_type
+ BitsetBase<NW>::DoFindLast() const
+ {
+ for(size_t word_index = (size_type)NW - 1; word_index < NW; --word_index)
+ {
+ const size_type lbiw = GetLastBit(mWord[word_index]);
+
+ if(lbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + lbiw;
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+ template <size_t NW>
+ inline typename BitsetBase<NW>::size_type
+ BitsetBase<NW>::DoFindPrev(size_type last_find) const
+ {
+ if(last_find > 0)
+ {
+ // Set initial state based on last find.
+ size_type word_index = static_cast<size_type>(last_find >> kBitsPerWordShift);
+ size_type bit_index = static_cast<size_type>(last_find & kBitsPerWordMask);
+
+ // Mask off subsequent bits of the word so our search becomes a "find last".
+ word_type mask = (~static_cast<word_type>(0) >> (kBitsPerWord - 1 - bit_index)) >> 1; // We do two shifts here because many CPUs ignore requests to shift 32 bit integers by 32 bits, which could be the case above.
+ word_type this_word = mWord[word_index] & mask;
+
+ for(;;)
+ {
+ const size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return (word_index * kBitsPerWord) + lbiw;
+
+ if(word_index > 0)
+ this_word = mWord[--word_index];
+ else
+ break;
+ }
+ }
+
+ return (size_type)NW * kBitsPerWord;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase<1>
+ ///////////////////////////////////////////////////////////////////////////
+
+ inline BitsetBase<1>::BitsetBase()
+ {
+ mWord[0] = 0;
+ }
+
+
+ inline BitsetBase<1>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(BitsetWordType).
+ EASTL_CT_ASSERT(sizeof(value) <= sizeof(BitsetWordType));
+
+ mWord[0] = static_cast<word_type>(value);
+ }
+
+
+ inline void BitsetBase<1>::operator&=(const this_type& x)
+ {
+ mWord[0] &= x.mWord[0];
+ }
+
+
+ inline void BitsetBase<1>::operator|=(const this_type& x)
+ {
+ mWord[0] |= x.mWord[0];
+ }
+
+
+ inline void BitsetBase<1>::operator^=(const this_type& x)
+ {
+ mWord[0] ^= x.mWord[0];
+ }
+
+
+ inline void BitsetBase<1>::operator<<=(size_type n)
+ {
+ mWord[0] <<= n;
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<1>::operator>>=(size_type n)
+ {
+ mWord[0] >>= n;
+ }
+
+
+ inline void BitsetBase<1>::flip()
+ {
+ mWord[0] = ~mWord[0];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<1>::set()
+ {
+ mWord[0] = ~static_cast<word_type>(0);
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<1>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[0] |= (static_cast<word_type>(1) << i);
+ else
+ mWord[0] &= ~(static_cast<word_type>(1) << i);
+ }
+
+
+ inline void BitsetBase<1>::reset()
+ {
+ mWord[0] = 0;
+ }
+
+
+ inline bool BitsetBase<1>::operator==(const this_type& x) const
+ {
+ return mWord[0] == x.mWord[0];
+ }
+
+
+ inline bool BitsetBase<1>::any() const
+ {
+ return mWord[0] != 0;
+ }
+
+
+ inline BitsetBase<1>::size_type
+ BitsetBase<1>::count() const
+ {
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(__SNC__) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ return (size_type)__builtin_popcountl(mWord[0]);
+ #else
+ return (size_type)__builtin_popcountll(mWord[0]);
+ #endif
+ #elif defined(__GNUC__) && (__GNUC__ < 3)
+ return BitsetCountBits(mWord[0]); // GCC 2.x compiler inexplicably blows up on the code below.
+ #else
+ size_type n = 0;
+ for(word_type w = mWord[0]; w; w >>= 4)
+ n += EASTL_BITSET_COUNT_STRING[w & 0xF];
+ return n;
+ #endif
+ }
+
+
+ inline unsigned long BitsetBase<1>::to_ulong() const
+ {
+ return static_cast<unsigned long>(mWord[0]);
+ }
+
+
+ inline BitsetBase<1>::word_type&
+ BitsetBase<1>::DoGetWord(size_type)
+ {
+ return mWord[0];
+ }
+
+
+ inline BitsetBase<1>::word_type
+ BitsetBase<1>::DoGetWord(size_type) const
+ {
+ return mWord[0];
+ }
+
+
+ inline BitsetBase<1>::size_type
+ BitsetBase<1>::DoFindFirst() const
+ {
+ return GetFirstBit(mWord[0]);
+ }
+
+
+ inline BitsetBase<1>::size_type
+ BitsetBase<1>::DoFindNext(size_type last_find) const
+ {
+ if(++last_find < kBitsPerWord)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ const word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) << last_find);
+
+ return GetFirstBit(this_word);
+ }
+
+ return kBitsPerWord;
+ }
+
+
+ inline BitsetBase<1>::size_type
+ BitsetBase<1>::DoFindLast() const
+ {
+ return GetLastBit(mWord[0]);
+ }
+
+
+ inline BitsetBase<1>::size_type
+ BitsetBase<1>::DoFindPrev(size_type last_find) const
+ {
+ if(last_find > 0)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ const word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ return GetLastBit(this_word);
+ }
+
+ return kBitsPerWord;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // BitsetBase<2>
+ ///////////////////////////////////////////////////////////////////////////
+
+ inline BitsetBase<2>::BitsetBase()
+ {
+ mWord[0] = 0;
+ mWord[1] = 0;
+ }
+
+
+ inline BitsetBase<2>::BitsetBase(uint32_t value)
+ {
+ // This implementation assumes that sizeof(value) <= sizeof(BitsetWordType).
+ EASTL_CT_ASSERT(sizeof(value) <= sizeof(BitsetWordType));
+
+ mWord[0] = static_cast<word_type>(value);
+ mWord[1] = 0;
+ }
+
+
+ inline void BitsetBase<2>::operator&=(const this_type& x)
+ {
+ mWord[0] &= x.mWord[0];
+ mWord[1] &= x.mWord[1];
+ }
+
+
+ inline void BitsetBase<2>::operator|=(const this_type& x)
+ {
+ mWord[0] |= x.mWord[0];
+ mWord[1] |= x.mWord[1];
+ }
+
+
+ inline void BitsetBase<2>::operator^=(const this_type& x)
+ {
+ mWord[0] ^= x.mWord[0];
+ mWord[1] ^= x.mWord[1];
+ }
+
+
+ inline void BitsetBase<2>::operator<<=(size_type n)
+ {
+ if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle high bits and n >= 64
+ {
+ mWord[1] = mWord[0];
+ mWord[0] = 0;
+ n -= kBitsPerWord;
+ }
+
+ mWord[1] = (mWord[1] << n) | (mWord[0] >> (kBitsPerWord - n)); // Intentionally use | instead of +.
+ mWord[0] <<= n;
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<2>::operator>>=(size_type n)
+ {
+ if(EASTL_UNLIKELY(n >= kBitsPerWord)) // parent expected to handle n >= 64
+ {
+ mWord[0] = mWord[1];
+ mWord[1] = 0;
+ n -= kBitsPerWord;
+ }
+
+ mWord[0] = (mWord[0] >> n) | (mWord[1] << (kBitsPerWord - n)); // Intentionally use | instead of +.
+ mWord[1] >>= n;
+ }
+
+
+ inline void BitsetBase<2>::flip()
+ {
+ mWord[0] = ~mWord[0];
+ mWord[1] = ~mWord[1];
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<2>::set()
+ {
+ mWord[0] = ~static_cast<word_type>(0);
+ mWord[1] = ~static_cast<word_type>(0);
+ // We let the parent class turn off any upper bits.
+ }
+
+
+ inline void BitsetBase<2>::set(size_type i, bool value)
+ {
+ if(value)
+ mWord[i >> kBitsPerWordShift] |= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ else
+ mWord[i >> kBitsPerWordShift] &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+ }
+
+
+ inline void BitsetBase<2>::reset()
+ {
+ mWord[0] = 0;
+ mWord[1] = 0;
+ }
+
+
+ inline bool BitsetBase<2>::operator==(const this_type& x) const
+ {
+ return (mWord[0] == x.mWord[0]) && (mWord[1] == x.mWord[1]);
+ }
+
+
+ inline bool BitsetBase<2>::any() const
+ {
+ // Or with two branches: { return (mWord[0] != 0) || (mWord[1] != 0); }
+ return (mWord[0] | mWord[1]) != 0;
+ }
+
+
+ inline BitsetBase<2>::size_type
+ BitsetBase<2>::count() const
+ {
+ #if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) && !defined(__SNC__) && !defined(EA_PLATFORM_ANDROID) // GCC 3.4 or later
+ #if(EA_PLATFORM_WORD_SIZE == 4)
+ return (size_type)__builtin_popcountl(mWord[0]) + (size_type)__builtin_popcountl(mWord[1]);
+ #else
+ return (size_type)__builtin_popcountll(mWord[0]) + (size_type)__builtin_popcountll(mWord[1]);
+ #endif
+
+ #else
+ return BitsetCountBits(mWord[0]) + BitsetCountBits(mWord[1]);
+ #endif
+ }
+
+
+ inline unsigned long BitsetBase<2>::to_ulong() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ if(mWord[1])
+ throw overflow_error("BitsetBase::to_ulong");
+ #endif
+ return (unsigned long)mWord[0]; // Todo: We need to deal with the case whereby sizeof(word_type) < sizeof(unsigned long)
+ }
+
+
+ inline BitsetBase<2>::word_type&
+ BitsetBase<2>::DoGetWord(size_type i)
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ inline BitsetBase<2>::word_type
+ BitsetBase<2>::DoGetWord(size_type i) const
+ {
+ return mWord[i >> kBitsPerWordShift];
+ }
+
+
+ inline BitsetBase<2>::size_type
+ BitsetBase<2>::DoFindFirst() const
+ {
+ size_type fbiw = GetFirstBit(mWord[0]);
+
+ if(fbiw != kBitsPerWord)
+ return fbiw;
+
+ fbiw = GetFirstBit(mWord[1]);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ inline BitsetBase<2>::size_type
+ BitsetBase<2>::DoFindNext(size_type last_find) const
+ {
+ // If the last find was in the first word, we must check it and then possibly the second.
+ if(++last_find < (size_type)kBitsPerWord)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) << last_find);
+
+ // Step through words.
+ size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return fbiw;
+
+ fbiw = GetFirstBit(mWord[1]);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+ }
+ else if(last_find < (size_type)(2 * kBitsPerWord))
+ {
+ // The last find was in the second word, remove the bit count of the first word from the find.
+ last_find -= kBitsPerWord;
+
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[1] & ((~static_cast<word_type>(0)) << last_find);
+
+ const size_type fbiw = GetFirstBit(this_word);
+
+ if(fbiw != kBitsPerWord)
+ return kBitsPerWord + fbiw;
+ }
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ inline BitsetBase<2>::size_type
+ BitsetBase<2>::DoFindLast() const
+ {
+ size_type lbiw = GetLastBit(mWord[1]);
+
+ if(lbiw != kBitsPerWord)
+ return kBitsPerWord + lbiw;
+
+ lbiw = GetLastBit(mWord[0]);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+
+ return 2 * kBitsPerWord;
+ }
+
+
+ inline BitsetBase<2>::size_type
+ BitsetBase<2>::DoFindPrev(size_type last_find) const
+ {
+ // If the last find was in the second word, we must check it and then possibly the first.
+ if(last_find > (size_type)kBitsPerWord)
+ {
+ // This has the same effect as last_find %= kBitsPerWord in our case.
+ last_find -= kBitsPerWord;
+
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[1] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ // Step through words.
+ size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return kBitsPerWord + lbiw;
+
+ lbiw = GetLastBit(mWord[0]);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+ }
+ else if(last_find != 0)
+ {
+ // Mask off previous bits of word so our search becomes a "find first".
+ word_type this_word = mWord[0] & ((~static_cast<word_type>(0)) >> (kBitsPerWord - last_find));
+
+ const size_type lbiw = GetLastBit(this_word);
+
+ if(lbiw != kBitsPerWord)
+ return lbiw;
+ }
+
+ return 2 * kBitsPerWord;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bitset::reference
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N>
+ inline bitset<N>::reference::reference(const bitset& x, size_type i)
+ : mpBitWord(&const_cast<bitset&>(x).DoGetWord(i)),
+ mnBitIndex(i & kBitsPerWordMask)
+ { // We have an issue here because the above is casting away the const-ness of the source bitset.
+ // Empty
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::reference&
+ bitset<N>::reference::operator=(bool value)
+ {
+ if(value)
+ *mpBitWord |= (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ else
+ *mpBitWord &= ~(static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::reference&
+ bitset<N>::reference::operator=(const reference& x)
+ {
+ if(*x.mpBitWord & (static_cast<word_type>(1) << (x.mnBitIndex & kBitsPerWordMask)))
+ *mpBitWord |= (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ else
+ *mpBitWord &= ~(static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask));
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::reference::operator~() const
+ {
+ return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) == 0;
+ }
+
+
+ //Defined inline in the class because Metrowerks fails to be able to compile it here.
+ //template <size_t N>
+ //inline bitset<N>::reference::operator bool() const
+ //{
+ // return (*mpBitWord & (static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask))) != 0;
+ //}
+
+
+ template <size_t N>
+ inline typename bitset<N>::reference&
+ bitset<N>::reference::flip()
+ {
+ *mpBitWord ^= static_cast<word_type>(1) << (mnBitIndex & kBitsPerWordMask);
+ return *this;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bitset
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N>
+ inline bitset<N>::bitset()
+ : base_type()
+ {
+ // Empty. The base class will set all bits to zero.
+ }
+
+
+ template <size_t N>
+ inline bitset<N>::bitset(uint32_t value)
+ : base_type(value)
+ {
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kNW - 1] &= ~(~static_cast<word_type>(0) << (N & kBitsPerWordMask)); // This clears any high unused bits.
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::operator&=(const this_type& x)
+ {
+ base_type::operator&=(x);
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::operator|=(const this_type& x)
+ {
+ base_type::operator|=(x);
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::operator^=(const this_type& x)
+ {
+ base_type::operator^=(x);
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::operator<<=(size_type n)
+ {
+ if(EASTL_LIKELY((intptr_t)n < (intptr_t)N))
+ {
+ base_type::operator<<=(n);
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kNW - 1] &= ~(~static_cast<word_type>(0) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ }
+ else
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::operator>>=(size_type n)
+ {
+ if(EASTL_LIKELY(n < N))
+ base_type::operator>>=(n);
+ else
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::set()
+ {
+ base_type::set(); // This sets all bits.
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kNW - 1] &= ~(~static_cast<word_type>(0) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::set(size_type i, bool value)
+ {
+ if(i < N)
+ base_type::set(i, value);
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::set -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ else
+ throw out_of_range("bitset::set");
+ #endif
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::reset()
+ {
+ base_type::reset();
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::reset(size_type i)
+ {
+ if(EASTL_LIKELY(i < N))
+ DoGetWord(i) &= ~(static_cast<word_type>(1) << (i & kBitsPerWordMask));
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::reset -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ else
+ throw out_of_range("bitset::reset");
+ #endif
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::flip()
+ {
+ base_type::flip();
+ if((N & kBitsPerWordMask) || (N == 0)) // If there are any high bits to clear... (If we didn't have this check, then the code below would do the wrong thing when N == 32.
+ mWord[kNW - 1] &= ~(~static_cast<word_type>(0) << (N & kBitsPerWordMask)); // This clears any high unused bits. We need to do this so that shift operations proceed correctly.
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type&
+ bitset<N>::flip(size_type i)
+ {
+ if(EASTL_LIKELY(i < N))
+ DoGetWord(i) ^= (static_cast<word_type>(1) << (i & kBitsPerWordMask));
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::flip -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ else
+ throw out_of_range("bitset::flip");
+ #endif
+ return *this;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type
+ bitset<N>::operator~() const
+ {
+ return this_type(*this).flip();
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::reference
+ bitset<N>::operator[](size_type i)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::operator[] -- out of range");
+ #endif
+
+ return reference(*this, i);
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::operator[](size_type i) const
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::operator[] -- out of range");
+ #endif
+
+ return (DoGetWord(i) & (static_cast<word_type>(1) << (i & kBitsPerWordMask))) != 0;
+ }
+
+
+ template <size_t N>
+ inline const typename bitset<N>::word_type* bitset<N>::data() const
+ {
+ return base_type::mWord;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::word_type* bitset<N>::data()
+ {
+ return base_type::mWord;
+ }
+
+
+ template <size_t N>
+ inline unsigned long bitset<N>::to_ulong() const
+ {
+ return base_type::to_ulong();
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::count() const
+ {
+ return base_type::count();
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::size() const
+ {
+ return (size_type)N;
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::operator==(const this_type& x) const
+ {
+ return base_type::operator==(x);
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::operator!=(const this_type& x) const
+ {
+ return !base_type::operator==(x);
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::test(size_type i) const
+ {
+ if(EASTL_LIKELY(i < N))
+ return (DoGetWord(i) & (static_cast<word_type>(1) << (i & kBitsPerWordMask))) != 0;
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(!(i < N)))
+ EASTL_FAIL_MSG("bitset::test -- out of range");
+ #endif
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ else
+ throw out_of_range("bitset::test");
+ #endif
+ return false;
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::any() const
+ {
+ return base_type::any();
+ }
+
+
+ template <size_t N>
+ inline bool bitset<N>::none() const
+ {
+ return !base_type::any();
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type
+ bitset<N>::operator<<(size_type n) const
+ {
+ return this_type(*this).operator<<=(n);
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::this_type
+ bitset<N>::operator>>(size_type n) const
+ {
+ return this_type(*this).operator>>=(n);
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::find_first() const
+ {
+ const size_type i = base_type::DoFindFirst();
+
+ if(i < (kNW * kBitsPerWord)) // This multiplication is a compile-time constant.
+ return i;
+
+ return kSize;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::find_next(size_type last_find) const
+ {
+ const size_type i = base_type::DoFindNext(last_find);
+
+ if(i < (kNW * kBitsPerWord))// This multiplication is a compile-time constant.
+ return i;
+
+ return kSize;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::find_last() const
+ {
+ const size_type i = base_type::DoFindLast();
+
+ if(i < (kNW * kBitsPerWord)) // This multiplication is a compile-time constant.
+ return i;
+
+ return kSize;
+ }
+
+
+ template <size_t N>
+ inline typename bitset<N>::size_type
+ bitset<N>::find_prev(size_type last_find) const
+ {
+ const size_type i = base_type::DoFindPrev(last_find);
+
+ if(i < (kNW * kBitsPerWord))// This multiplication is a compile-time constant.
+ return i;
+
+ return kSize;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <size_t N>
+ inline bitset<N> operator&(const bitset<N>& a, const bitset<N>& b)
+ {
+ // We get betting inlining when we don't declare temporary variables.
+ return bitset<N>(a).operator&=(b);
+ }
+
+
+ template <size_t N>
+ inline bitset<N> operator|(const bitset<N>& a, const bitset<N>& b)
+ {
+ return bitset<N>(a).operator|=(b);
+ }
+
+
+ template <size_t N>
+ inline bitset<N> operator^(const bitset<N>& a, const bitset<N>& b)
+ {
+ return bitset<N>(a).operator^=(b);
+ }
+
+
+} // namespace eastl
+
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#elif defined(__SNC__)
+ #pragma control %pop diag
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/bonus/sort_extra.h b/UnknownVersion/include/EASTL/bonus/sort_extra.h
new file mode 100644
index 0000000..02f7c6a
--- /dev/null
+++ b/UnknownVersion/include/EASTL/bonus/sort_extra.h
@@ -0,0 +1,482 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/sort_extra.h
+// Written by Paul Pedriana - 2005
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements additional sort algorithms beyond the basic set.
+// Included here are:
+// radix_sort
+// comb_sort
+// bubble_sort
+// selection_sort
+// shaker_sort
+// bucket_sort
+//
+//////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SORT_EXTRA_H
+#define EASTL_SORT_EXTRA_H
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/heap.h>
+#include <EASTL/allocator.h>
+
+namespace eastl
+{
+
+ /// extract_radix_key
+ ///
+ /// Default radix sort integer value reader. It expects the sorted elements
+ /// to have an integer member of type radix_type and of name "mKey".
+ ///
+ template <typename Node>
+ struct extract_radix_key
+ {
+ typedef typename Node::radix_type radix_type;
+
+ const radix_type operator()(const Node& x) const
+ { return x.mKey; }
+ };
+
+
+ /// radix_sort
+ ///
+ /// Implements a classic LSD (least significant digit) radix sort.
+ /// See http://en.wikipedia.org/wiki/Radix_sort.
+ /// To consider: A static linked-list implementation may be faster than the version here.
+ ///
+ /// Example usage:
+ /// struct Element {
+ /// typedef uint16_t radix_type;
+ /// uint16_t mKey;
+ /// uint16_t mData;
+ /// };
+ ///
+ /// Element elementArray[100];
+ /// Element buffer[100];
+ ///
+ /// radix_sort<Element*, extract_radix_key<Element> >(elementArray, elementArray + 100, buffer);
+ ///
+ template <typename RandomAccessIterator, typename ExtractKey>
+ void radix_sort_impl(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer, ExtractKey extractKey, uint8_t)
+ {
+ uint32_t EA_PREFIX_ALIGN(16) bucketSize[256] EA_POSTFIX_ALIGN(16);
+ uint32_t EA_PREFIX_ALIGN(16) bucketPosition[256] EA_POSTFIX_ALIGN(16);
+ RandomAccessIterator temp;
+ uint32_t i;
+
+ memset(bucketSize, 0, sizeof(bucketSize));
+
+ for(temp = first; temp != last; ++temp)
+ ++bucketSize[extractKey(*temp)];
+
+ for(bucketPosition[0] = 0, i = 0; i < 255; i++)
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+
+ for(temp = first; temp != last; ++temp)
+ {
+ const size_t radixByte = extractKey(*temp);
+ buffer[bucketPosition[radixByte]++] = *temp;
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename ExtractKey>
+ void radix_sort_impl(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer, ExtractKey extractKey, uint16_t)
+ {
+ uint32_t EA_PREFIX_ALIGN(16) bucketSize[256] EA_POSTFIX_ALIGN(16);
+ uint32_t EA_PREFIX_ALIGN(16) bucketPosition[256] EA_POSTFIX_ALIGN(16);
+ RandomAccessIterator temp;
+ uint32_t i;
+
+ // Process byte 0 (least significant byte).
+ memset(bucketSize, 0, sizeof(bucketSize));
+
+ for(temp = first; temp != last; ++temp)
+ ++bucketSize[extractKey(*temp) & 0xff];
+
+ for(bucketPosition[0] = 0, i = 0; i < 255; i++)
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+
+ for(temp = first; temp != last; ++temp)
+ {
+ const size_t radixByte = extractKey(*temp) & 0xff;
+ buffer[bucketPosition[radixByte]++] = *temp;
+ }
+
+
+ // Process byte 1 (second least significant byte).
+ memset(bucketSize, 0, sizeof(bucketSize));
+
+ for(temp = buffer, last = buffer + (last - first); temp != last; ++temp)
+ ++bucketSize[extractKey(*temp) >> 8];
+
+ for(bucketPosition[0] = 0, i = 0; i < 255; i++)
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+
+ for(temp = buffer; temp != last; ++temp)
+ {
+ const size_t radixByte = extractKey(*temp) >> 8;
+ first[bucketPosition[radixByte]++] = *temp;
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename ExtractKey, typename IntegerType>
+ void radix_sort_impl(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer, ExtractKey extractKey, IntegerType)
+ {
+ uint32_t EA_PREFIX_ALIGN(16) bucketSize[256] EA_POSTFIX_ALIGN(16);
+ uint32_t EA_PREFIX_ALIGN(16) bucketPosition[256] EA_POSTFIX_ALIGN(16);
+ RandomAccessIterator temp;
+ uint32_t i;
+
+ for(uint32_t j = 0; j < (8 * sizeof(IntegerType)); j += 8)
+ {
+ memset(bucketSize, 0, sizeof(bucketSize));
+
+ for(temp = first; temp != last; ++temp)
+ ++bucketSize[(extractKey(*temp) >> j) & 0xff];
+
+ bucketPosition[0] = 0;
+ for(i = 0; i < 255; i++)
+ bucketPosition[i + 1] = bucketPosition[i] + bucketSize[i];
+
+ for(temp = first; temp != last; ++temp)
+ {
+ const size_t radixByte = ((extractKey(*temp) >> j) & 0xff);
+ buffer[bucketPosition[radixByte]++] = *temp;
+ }
+
+ last = buffer + (last - first);
+ temp = first;
+ first = buffer;
+ buffer = temp;
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename ExtractKey>
+ void radix_sort(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator buffer)
+ {
+ radix_sort_impl<RandomAccessIterator>(first, last, buffer, ExtractKey(), typename ExtractKey::radix_type());
+ }
+
+
+
+ /// comb_sort
+ ///
+ /// Implements the CombSort algorithm; in particular, implements the CombSort11 variation
+ /// of the CombSort algorithm, based on the reference to '11' in the implementation.
+ ///
+ /// To consider: Use a comb sort table instead of the '((nSpace * 10) + 3) / 13' expression.
+ /// Ideal tables can be found on the Internet by looking up "comb sort table".
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void comb_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type difference_type;
+
+ ForwardIterator iCurrent, iNext;
+ difference_type length = eastl::distance(first, last);
+ difference_type nSpace = length;
+
+ for(bool bSwapped = false; (nSpace > 1) || bSwapped; )
+ {
+ nSpace = ((nSpace * 10) + 3) / 13; // Integer division is less than ideal.
+
+ if((nSpace == 9) || (nSpace == 10))
+ nSpace = 11;
+
+ iCurrent = iNext = first;
+ eastl::advance(iNext, nSpace);
+
+ for(bSwapped = false; iNext != last; iCurrent++, iNext++)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iNext);
+ bSwapped = true;
+ }
+ }
+ }
+ } // comb_sort
+
+ template <typename ForwardIterator>
+ inline void comb_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ eastl::comb_sort<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+
+ /// bubble_sort
+ ///
+ /// Implements the BubbleSort algorithm. This algorithm is only useful for
+ /// small range sizes, such as 10 or less items. You may be better off using
+ /// insertion_sort for cases where bubble_sort works.
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void bubble_sort_impl(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ ForwardIterator iCurrent, iNext;
+
+ while(first != last)
+ {
+ iNext = iCurrent = first;
+
+ for(++iNext; iNext != last; iCurrent = iNext, ++iNext)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+ last = iCurrent;
+ }
+ }
+
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void bubble_sort_impl(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iLastModified;
+
+ last--;
+
+ while(first != last)
+ {
+ iLastModified = iNext = iCurrent = first;
+
+ for(++iNext; iCurrent != last; iCurrent = iNext, ++iNext)
+ {
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+
+ last = iLastModified;
+ }
+ }
+ }
+
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ inline void bubble_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+
+ eastl::bubble_sort_impl<ForwardIterator, StrictWeakOrdering>(first, last, compare, IC());
+ }
+
+ template <typename ForwardIterator>
+ inline void bubble_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+
+ eastl::bubble_sort_impl<ForwardIterator, Less>(first, last, Less(), IC());
+ }
+
+
+
+ /// selection_sort
+ ///
+ /// Implements the SelectionSort algorithm.
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ void selection_sort(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator iCurrent, iMin;
+
+ for(; first != last; ++first)
+ {
+ iCurrent = first;
+ iMin = iCurrent;
+
+ for(++iCurrent; iCurrent != last; ++iCurrent)
+ {
+ if(compare(*iCurrent, *iMin))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iMin, *iCurrent)); // Validate that the compare function is sane.
+ iMin = iCurrent;
+ }
+ }
+
+ if(first != iMin)
+ eastl::iter_swap(first, iMin);
+ }
+ }
+ } // selection_sort
+
+ template <typename ForwardIterator>
+ inline void selection_sort(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ eastl::selection_sort<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// shaker_sort
+ ///
+ /// Implements the ShakerSort algorithm, which is a sorting algorithm which
+ /// improves on bubble_sort by sweeping both from left to right and right
+ /// to left, resulting in less iteration.
+ ///
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void shaker_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iLastModified;
+
+ --last;
+
+ while(first != last)
+ {
+ iLastModified = first;
+
+ for(iCurrent = first; iCurrent != last; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ ++iNext;
+
+ if(compare(*iNext, *iCurrent))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, *iNext)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iCurrent, iNext);
+ }
+ }
+
+ last = iLastModified;
+
+ if(first != last)
+ {
+ for(iCurrent = last; iCurrent != first; iCurrent = iNext)
+ {
+ iNext = iCurrent;
+ --iNext;
+
+ if(compare(*iCurrent, *iNext))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iNext, *iCurrent)); // Validate that the compare function is sane.
+ iLastModified = iCurrent;
+ eastl::iter_swap(iNext, iCurrent);
+ }
+ }
+ first = iLastModified;
+ }
+ }
+ }
+ } // shaker_sort
+
+ template <typename BidirectionalIterator>
+ inline void shaker_sort(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<BidirectionalIterator>::value_type> Less;
+
+ eastl::shaker_sort<BidirectionalIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// bucket_sort
+ ///
+ /// Implements the BucketSort algorithm.
+ ///
+ /// Example usage:
+ /// int* pArray = new int[1000];
+ /// for(int i = 0; i < 1000; i++)
+ /// pArray[i] = rand() % 32; // Note: The C rand function is a poor random number generator.
+ /// vector<int> intVector[32];
+ /// bucket_sort(pArray, pArray + 1000; intVector);
+ /// delete[] pArray;
+ ///
+ template <typename T>
+ struct hash_use_self
+ {
+ T operator()(const T& x) const
+ { return x; }
+ };
+
+ template <typename ForwardIterator, typename ContainerArray, typename HashFunction>
+ void bucket_sort(ForwardIterator first, ForwardIterator last, ContainerArray& bucketArray, HashFunction hash /*= hash_use_self*/)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::difference_type difference_type;
+
+ for(ForwardIterator iInput = first; iInput != last; ++iInput)
+ bucketArray[hash(*iInput)].push_back(*iInput);
+
+ for(typename ContainerArray::const_iterator iBucket = bucketArray.begin(); iBucket != bucketArray.end(); ++iBucket)
+ first = eastl::copy((*iBucket).begin(), (*iBucket).end(), first);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/core_allocator_adapter.h b/UnknownVersion/include/EASTL/core_allocator_adapter.h
new file mode 100644
index 0000000..c099dc7
--- /dev/null
+++ b/UnknownVersion/include/EASTL/core_allocator_adapter.h
@@ -0,0 +1,296 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// core_allocator_adapter.h
+//
+// Copyright (c) 2007, Electronic Arts. All rights reserved.
+// Maintained by Paul Pedriana
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements an EASTL allocator that uses an ICoreAllocator.
+// However, this header file is not dependent on ICoreAllocator or its package.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_CORE_ALLOCATOR_ADAPTER_H
+#define EASTL_CORE_ALLOCATOR_ADAPTER_H
+
+
+#include <EASTL/allocator.h>
+
+
+namespace EA
+{
+ namespace Allocator
+ {
+ /// CoreAllocatorAdapter
+ ///
+ /// Implements the EASTL allocator interface.
+ /// Allocates memory from an instance of ICoreAllocator.
+ ///
+ /// Example usage:
+ /// eastl::list<Widget, CoreAllocatorAdapter<ICoreAllocator> > widgetList("UI/WidgetList", pSomeCoreAllocator);
+ /// widgetList.push_back(Widget());
+ ///
+ /// Example usage:
+ /// // Note that the CoreAllocator is declared before and thus destroyed after the widget list.
+ /// typedef CoreAllocatorAdapter<ICoreAllocator> EASTLCoreAllocator;
+ /// typedef eastl::list<Widget, EASTLCoreAllocator> WidgetList;
+ /// CoreAllocatorFixed<WidgetList::node_type> widgetCoreAllocator(pFixedAllocatorForWidgetListValueType);
+ /// WidgetList widgetList(EASTLCoreAllocator("UI/WidgetList", &widgetCoreAllocator));
+ ///
+ template<class AllocatorType>
+ class CoreAllocatorAdapter
+ {
+ public:
+ typedef CoreAllocatorAdapter<AllocatorType> this_type;
+
+ public:
+ CoreAllocatorAdapter(const char* pName = EASTL_NAME_VAL(EASTL_ALLOCATOR_DEFAULT_NAME));
+ CoreAllocatorAdapter(const char* pName, AllocatorType* pAllocator);
+ CoreAllocatorAdapter(const char* pName, AllocatorType* pAllocator, int flags);
+ CoreAllocatorAdapter(const CoreAllocatorAdapter& x);
+ CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* pName);
+
+ CoreAllocatorAdapter& operator=(const CoreAllocatorAdapter& x);
+
+ void* allocate(size_t n, int flags = 0);
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+ void deallocate(void* p, size_t n);
+
+ AllocatorType* get_allocator() const;
+ void set_allocator(AllocatorType* pAllocator);
+
+ int get_flags() const;
+ void set_flags(int flags);
+
+ const char* get_name() const;
+ void set_name(const char* pName);
+
+ public: // Public because otherwise VC++ generates (possibly invalid) warnings about inline friend template specializations.
+ AllocatorType* mpCoreAllocator;
+ int mnFlags; // Allocation flags. See ICoreAllocator/AllocFlags.
+
+ #if EASTL_NAME_ENABLED
+ const char* mpName; // Debug name, used to track memory.
+ #endif
+ };
+
+ template<class AllocatorType>
+ bool operator==(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b);
+
+ template<class AllocatorType>
+ bool operator!=(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b);
+
+
+
+ /// EASTLICoreAllocator
+ ///
+ /// Provides a standardized typedef for ICoreAllocator;
+ ///
+ /// Example usage:
+ /// eastl::list<Widget, EASTLICoreAllocator> widgetList("UI/WidgetList", pSomeCoreAllocator);
+ /// widgetList.push_back(Widget());
+ ///
+ class ICoreAllocator;
+ typedef CoreAllocatorAdapter<ICoreAllocator> EASTLICoreAllocator;
+
+
+ } // namespace Allocator
+
+} // namespace EA
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Inlines
+///////////////////////////////////////////////////////////////////////////////
+
+namespace EA
+{
+ namespace Allocator
+ {
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const char* EASTL_NAME(pName))
+ : mpCoreAllocator(AllocatorType::GetDefaultAllocator()), mnFlags(0)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator)
+ : mpCoreAllocator(pCoreAllocator), mnFlags(0)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const char* EASTL_NAME(pName), AllocatorType* pCoreAllocator, int flags)
+ : mpCoreAllocator(pCoreAllocator), mnFlags(flags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const CoreAllocatorAdapter& x)
+ : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = x.mpName;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>::CoreAllocatorAdapter(const CoreAllocatorAdapter& x, const char* EASTL_NAME(pName))
+ : mpCoreAllocator(x.mpCoreAllocator), mnFlags(x.mnFlags)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName ? pName : EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline CoreAllocatorAdapter<AllocatorType>& CoreAllocatorAdapter<AllocatorType>::operator=(const CoreAllocatorAdapter& x)
+ {
+ // In order to be consistent with EASTL's allocator implementation,
+ // we don't copy the name from the source object.
+ mpCoreAllocator = x.mpCoreAllocator;
+ mnFlags = x.mnFlags;
+ return *this;
+ }
+
+ template<class AllocatorType>
+ inline void* CoreAllocatorAdapter<AllocatorType>::allocate(size_t n, int /*flags*/)
+ {
+ // It turns out that EASTL itself doesn't use the flags parameter,
+ // whereas the user here might well want to specify a flags
+ // parameter. So we use ours instead of the one passed in.
+ return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags);
+ }
+
+ template<class AllocatorType>
+ inline void* CoreAllocatorAdapter<AllocatorType>::allocate(size_t n, size_t alignment, size_t offset, int /*flags*/)
+ {
+ // It turns out that EASTL itself doesn't use the flags parameter,
+ // whereas the user here might well want to specify a flags
+ // parameter. So we use ours instead of the one passed in.
+ return mpCoreAllocator->Alloc(n, EASTL_NAME_VAL(mpName), (unsigned)mnFlags, (unsigned)alignment, (unsigned)offset);
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::deallocate(void* p, size_t n)
+ {
+ return mpCoreAllocator->Free(p, n);
+ }
+
+ template<class AllocatorType>
+ inline AllocatorType* CoreAllocatorAdapter<AllocatorType>::get_allocator() const
+ {
+ return mpCoreAllocator;
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_allocator(AllocatorType* pAllocator)
+ {
+ mpCoreAllocator = pAllocator;
+ }
+
+ template<class AllocatorType>
+ inline int CoreAllocatorAdapter<AllocatorType>::get_flags() const
+ {
+ return mnFlags;
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_flags(int flags)
+ {
+ mnFlags = flags;
+ }
+
+ template<class AllocatorType>
+ inline const char* CoreAllocatorAdapter<AllocatorType>::get_name() const
+ {
+ #if EASTL_NAME_ENABLED
+ return mpName;
+ #else
+ return EASTL_ALLOCATOR_DEFAULT_NAME;
+ #endif
+ }
+
+ template<class AllocatorType>
+ inline void CoreAllocatorAdapter<AllocatorType>::set_name(const char* pName)
+ {
+ #if EASTL_NAME_ENABLED
+ mpName = pName;
+ #else
+ (void)pName;
+ #endif
+ }
+
+
+
+ template<class AllocatorType>
+ inline bool operator==(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b)
+ {
+ return (a.mpCoreAllocator == b.mpCoreAllocator) &&
+ (a.mnFlags == b.mnFlags);
+ }
+
+ template<class AllocatorType>
+ inline bool operator!=(const CoreAllocatorAdapter<AllocatorType>& a, const CoreAllocatorAdapter<AllocatorType>& b)
+ {
+ return (a.mpCoreAllocator != b.mpCoreAllocator) ||
+ (a.mnFlags != b.mnFlags);
+ }
+
+
+ } // namespace Allocator
+
+} // namespace EA
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_allocator.h b/UnknownVersion/include/EASTL/fixed_allocator.h
new file mode 100644
index 0000000..8c143d3
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_allocator.h
@@ -0,0 +1,467 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_allocator.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following
+// fixed_allocator
+// fixed_allocator_with_overflow
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_ALLOCATOR_H
+#define EASTL_FIXED_ALLOCATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/fixed_pool.h>
+#include <EASTL/functional.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #include <new>
+ #pragma warning(pop)
+#else
+ #include <new>
+#endif
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_allocator
+ ///
+ /// Implements an allocator which allocates a single fixed size where
+ /// the size, alignment, and memory used for the pool is defined at
+ /// runtime by the user. This is different from fixed containers
+ /// such as fixed_list whereby the size and alignment are determined
+ /// at compile time and the memory is directly built into the container's
+ /// member data.
+ ///
+ /// If the pool's memory is exhausted or was never initialized, the
+ /// allocate function returns NULL. Consider the fixed_allocator_with_overflow
+ /// class as an alternative in order to deal with this situation.
+ ///
+ /// This class requires the user to call container.get_allocator().init()
+ /// after constructing the container. There currently isn't a way to
+ /// construct the container with the initialization parameters, though
+ /// with some effort such a thing could probably be made possible.
+ /// It's not as simple as it might first seem, due to the non-copyable
+ /// nature of fixed allocators. A side effect of this limitation is that
+ /// you cannot copy-construct a container using fixed_allocators.
+ ///
+ /// Another side-effect is that you cannot swap two containers using
+ /// a fixed_allocator, as a swap requires temporary memory allocated by
+ /// an equivalent allocator, and such a thing cannot be done implicitly.
+ /// A workaround for the swap limitation is that you can implement your
+ /// own swap whereby you provide an explicitly created temporary object.
+ ///
+ /// Note: Be careful to set the allocator's node size to the size of the
+ /// container node and not the size of the contained object. Note that the
+ /// example code below uses IntListNode.
+ ///
+ /// Example usage:
+ /// typedef eastl::list<int, fixed_allocator> IntList;
+ /// typedef IntList::node_type IntListNode;
+ ///
+ /// IntListNode buffer[200];
+ /// IntList intList;
+ /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode));
+ ///
+ class EASTL_API fixed_allocator : public fixed_pool_base
+ {
+ public:
+ /// fixed_allocator
+ ///
+ /// Default constructor. The user usually will need to call init() after
+ /// constructing via this constructor.
+ ///
+ fixed_allocator(const char* /*pName*/ = EASTL_FIXED_POOL_DEFAULT_NAME)
+ : fixed_pool_base(NULL)
+ {
+ }
+
+
+ /// fixed_allocator
+ ///
+ /// Copy constructor. The user usually will need to call init() after
+ /// constructing via this constructor. By their nature, fixed-allocators
+ /// cannot be copied in any useful way, as by their nature the user
+ /// must manually initialize them.
+ ///
+ fixed_allocator(const fixed_allocator&)
+ : fixed_pool_base(NULL)
+ {
+ }
+
+
+ /// operator=
+ ///
+ /// By their nature, fixed-allocators cannot be copied in any
+ /// useful way, as by their nature the user must manually
+ /// initialize them.
+ ///
+ fixed_allocator& operator=(const fixed_allocator&)
+ {
+ return *this;
+ }
+
+
+ // init
+ //
+ // No init here, as the base class version is sufficient.
+ //
+ //void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ // size_t alignment, size_t alignmentOffset = 0);
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ // To consider: Verify that 'n' is what the user initialized us with.
+
+ Link* pLink = mpHead;
+
+ if(pLink) // If we have space...
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ mpHead = pLink->mpNext;
+ return pLink;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ pLink = mpNext;
+
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return pLink;
+ }
+
+ // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error.
+ return NULL;
+ }
+ }
+
+
+ /// allocate
+ ///
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ return allocate(n, flags);
+ }
+
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p, size_t)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+
+ void set_name(const char*)
+ {
+ // Nothing to do. We don't allocate memory.
+ }
+
+ }; // fixed_allocator
+
+ bool operator==(const fixed_allocator& a, const fixed_allocator& b);
+ bool operator!=(const fixed_allocator& a, const fixed_allocator& b);
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_allocator_with_overflow
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_allocator_with_overflow
+ ///
+ /// Implements an allocator which allocates a single fixed size where
+ /// the size, alignment, and memory used for the pool is defined at
+ /// runtime by the user. This is different from fixed containers
+ /// such as fixed_list whereby the size and alignment are determined
+ /// at compile time and the memory is directly built into the container's
+ /// member data.
+ ///
+ /// Note: Be careful to set the allocator's node size to the size of the
+ /// container node and not the size of the contained object. Note that the
+ /// example code below uses IntListNode.
+ ///
+ /// This class requires the user to call container.get_allocator().init()
+ /// after constructing the container. There currently isn't a way to
+ /// construct the container with the initialization parameters, though
+ /// with some effort such a thing could probably be made possible.
+ /// It's not as simple as it might first seem, due to the non-copyable
+ /// nature of fixed allocators. A side effect of this limitation is that
+ /// you cannot copy-construct a container using fixed_allocators.
+ ///
+ /// Another side-effect is that you cannot swap two containers using
+ /// a fixed_allocator, as a swap requires temporary memory allocated by
+ /// an equivalent allocator, and such a thing cannot be done implicitly.
+ /// A workaround for the swap limitation is that you can implement your
+ /// own swap whereby you provide an explicitly created temporary object.
+ ///
+ /// Example usage:
+ /// typedef eastl::list<int, fixed_allocator_with_overflow> IntList;
+ /// typedef IntList::node_type IntListNode;
+ ///
+ /// IntListNode buffer[200];
+ /// IntList intList;
+ /// intList.get_allocator().init(buffer, sizeof(buffer), sizeof(IntListNode), __alignof(IntListNode));
+ ///
+ class EASTL_API fixed_allocator_with_overflow : public fixed_pool_base
+ {
+ public:
+ /// fixed_allocator_with_overflow
+ ///
+ /// Default constructor. The user usually will need to call init() after
+ /// constructing via this constructor.
+ ///
+ fixed_allocator_with_overflow(const char* pName = EASTL_FIXED_POOL_DEFAULT_NAME)
+ : fixed_pool_base(NULL),
+ mOverflowAllocator(pName)
+ {
+ }
+
+
+ /// fixed_allocator_with_overflow
+ ///
+ /// Copy constructor. The user usually will need to call init() after
+ /// constructing via this constructor. By their nature, fixed-allocators
+ /// cannot be copied in any useful way, as by their nature the user
+ /// must manually initialize them.
+ ///
+ fixed_allocator_with_overflow(const fixed_allocator_with_overflow&)
+ : fixed_pool_base(NULL)
+ {
+ }
+
+
+ /// operator=
+ ///
+ /// By their nature, fixed-allocators cannot be copied in any
+ /// useful way, as by their nature the user must manually
+ /// initialize them.
+ ///
+ fixed_allocator_with_overflow& operator=(const fixed_allocator_with_overflow& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+
+ /// init
+ ///
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ mpPoolEnd = (void*)((uintptr_t)pMemory + memorySize);
+ mnNodeSize = (eastl_size_t)nodeSize;
+ }
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ // To consider: Verify that 'n' is what the user initialized us with.
+
+ void* p;
+
+ if(mpHead) // If we have space...
+ {
+ p = mpHead;
+ mpHead = mpHead->mpNext;
+ }
+ else
+ p = mOverflowAllocator.allocate(mnNodeSize, __FILE__, __LINE__);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+
+ /// allocate
+ ///
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ return allocate(n, flags);
+ }
+
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p, size_t)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ if((p >= mpPoolBegin) && (p < mpPoolEnd))
+ {
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+ else
+ mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ protected:
+ EASTLAllocatorType mOverflowAllocator; // To consider: Allow the user to define the type of this, presumably via a template parameter.
+ void* mpPoolBegin; // To consider: We have these member variables and ideally we shouldn't need them. The problem is that
+ void* mpPoolEnd; // the information about the pool buffer and object size is stored in the owning container
+ eastl_size_t mnNodeSize; // and we can't have access to it without increasing the amount of code we need and by templating
+ // more code. It may turn out that simply storing data here is smaller in the end.
+ }; // fixed_allocator_with_overflow // Granted, this class is usually used for debugging purposes, but perhaps there is an elegant solution.
+
+ bool operator==(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b);
+ bool operator!=(const fixed_allocator_with_overflow& a, const fixed_allocator_with_overflow& b);
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ inline bool operator==(const fixed_allocator&, const fixed_allocator&)
+ {
+ return false;
+ }
+
+ inline bool operator!=(const fixed_allocator&, const fixed_allocator&)
+ {
+ return false;
+ }
+
+ inline bool operator==(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&)
+ {
+ return false;
+ }
+
+ inline bool operator!=(const fixed_allocator_with_overflow&, const fixed_allocator_with_overflow&)
+ {
+ return false;
+ }
+
+
+} // namespace eastl
+
+
+
+
+#endif // Header include guard
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_hash_map.h b/UnknownVersion/include/EASTL/fixed_hash_map.h
new file mode 100644
index 0000000..4dc1a54
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_hash_map.h
@@ -0,0 +1,419 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_hash_map.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hash_map and hash_multimap which use a fixed size
+// memory pool for its buckets and nodes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_HASH_MAP_H
+#define EASTL_FIXED_HASH_MAP_H
+
+
+#include <EASTL/hash_map.h>
+#include <EASTL/internal/fixed_pool.h>
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_HASH_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_HASH_MAP_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_map" // Unless the user overrides something, this is "EASTL fixed_hash_map".
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multimap" // Unless the user overrides something, this is "EASTL fixed_hash_multimap".
+ #endif
+
+
+
+ /// fixed_hash_map
+ ///
+ /// Implements a hash_map with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Key The key type for the map. This is a map of Key to T (value).
+ /// T The value type for the map.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>, bool bCacheHashCode = false, typename Allocator = EASTLAllocatorType>
+ class fixed_hash_map : public hash_map<Key,
+ T,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::node_type),
+ nodeCount,
+ hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignment,
+ hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignmentOffset,
+ bEnableOverflow,
+ Allocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, Allocator> this_type;
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_map<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::node_type), nodeCount, hash_map<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignment, hash_map<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef hash_map<Key, T, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ /// fixed_hash_map
+ ///
+ /// Construct an empty fixed_hash_map with a given set of parameters.
+ ///
+ explicit fixed_hash_map(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ /// fixed_hash_map
+ ///
+ /// Construct a fixed_hash_map from a source sequence and with a given set of parameters.
+ ///
+ template <typename InputIterator>
+ fixed_hash_map(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ /// fixed_hash_map
+ ///
+ /// Copy constructor
+ ///
+ fixed_hash_map(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ /// operator=
+ ///
+ /// We provide an override so that assignment is done correctly.
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+ base_type::insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_hash_map
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_map<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+
+ /// fixed_hash_multimap
+ ///
+ /// Implements a hash_multimap with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Key The key type for the map. This is a map of Key to T (value).
+ /// T The value type for the map.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>, bool bCacheHashCode = false, typename Allocator = EASTLAllocatorType>
+ class fixed_hash_multimap : public hash_multimap<Key,
+ T,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::node_type),
+ nodeCount,
+ hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignment,
+ hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignmentOffset,
+ bEnableOverflow,
+ Allocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, Allocator> this_type;
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_multimap<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::node_type), nodeCount, hash_multimap<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignment, hash_multimap<Key, T, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef hash_multimap<Key, T, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ /// fixed_hash_multimap
+ ///
+ /// Construct an empty fixed_hash_multimap with a given set of parameters.
+ ///
+ explicit fixed_hash_multimap(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ /// fixed_hash_multimap
+ ///
+ /// Construct a fixed_hash_multimap from a source sequence and with a given set of parameters.
+ ///
+ template <typename InputIterator>
+ fixed_hash_multimap(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ /// fixed_hash_multimap
+ ///
+ /// Copy constructor
+ ///
+ fixed_hash_multimap(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(),fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ /// operator=
+ ///
+ /// We provide an override so that assignment is done correctly.
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+ base_type::insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_hash_multimap
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_multimap<Key, T, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_hash_set.h b/UnknownVersion/include/EASTL/fixed_hash_set.h
new file mode 100644
index 0000000..6cd5a60
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_hash_set.h
@@ -0,0 +1,422 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_hash_set.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hash_set which uses a fixed size memory pool for
+// its buckets and nodes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_HASH_SET_H
+#define EASTL_FIXED_HASH_SET_H
+
+
+#include <EASTL/hash_set.h>
+#include <EASTL/internal/fixed_pool.h>
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_HASH_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_HASH_SET_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_set" // Unless the user overrides something, this is "EASTL fixed_hash_set".
+ #endif
+
+ #ifndef EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME
+ #define EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_hash_multiset" // Unless the user overrides something, this is "EASTL fixed_hash_multiset".
+ #endif
+
+
+
+ /// fixed_hash_set
+ ///
+ /// Implements a hash_set with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Template parameters:
+ /// Value The type of object the hash_set holds.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Value, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>, bool bCacheHashCode = false, typename Allocator = EASTLAllocatorType>
+ class fixed_hash_set : public hash_set<Value,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>::node_type),
+ nodeCount,
+ hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignment,
+ hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignmentOffset,
+ bEnableOverflow,
+ Allocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, Allocator> this_type;
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_set<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::node_type), nodeCount, hash_set<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignment, hash_set<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef hash_set<Value, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ /// fixed_hash_set
+ ///
+ /// Construct an empty fixed_hash_set with a given set of parameters.
+ ///
+ explicit fixed_hash_set(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ hashFunction, predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ /// fixed_hash_set
+ ///
+ /// Construct a fixed_hash_set from a source sequence and with a given set of parameters.
+ ///
+ template <typename InputIterator>
+ fixed_hash_set(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ /// fixed_hash_set
+ ///
+ /// Copy constructor
+ ///
+ fixed_hash_set(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ /// operator=
+ ///
+ /// We provide an override so that assignment is done correctly.
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+ base_type::insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // We must do a brute-force swap, because fixed containers cannot share memory allocations.
+ // Note that we create a temp value on the stack. This approach may fail if the size of the
+ // container is too large. We have a rule against allocating memory from the heap, and so
+ // if the user wants to swap two large objects of this class, the user will currently need
+ // to implement it manually. To consider: add code to allocate a temporary buffer if the
+ // size of the container is too large for the stack.
+ EASTL_ASSERT(sizeof(x) < EASTL_MAX_STACK_USAGE); // It is dangerous to try to create objects that are too big for the stack.
+
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_hash_set
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_set<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ a.swap(b);
+ }
+
+
+
+
+
+
+ /// fixed_hash_multiset
+ ///
+ /// Implements a hash_multiset with a fixed block of memory identified by the nodeCount and bucketCount
+ /// template parameters.
+ ///
+ /// Value The type of object the hash_set holds.
+ /// nodeCount The max number of objects to contain. This value must be >= 1.
+ /// bucketCount The number of buckets to use. This value must be >= 2.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Hash hash_set hash function. See hash_set.
+ /// Predicate hash_set equality testing function. See hash_set.
+ ///
+ template <typename Value, size_t nodeCount, size_t bucketCount = nodeCount + 1, bool bEnableOverflow = true,
+ typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>, bool bCacheHashCode = false, typename Allocator = EASTLAllocatorType>
+ class fixed_hash_multiset : public hash_multiset<Value,
+ Hash,
+ Predicate,
+ fixed_hashtable_allocator<
+ bucketCount + 1,
+ sizeof(typename hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>::node_type),
+ nodeCount,
+ hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignment,
+ hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode>::kValueAlignmentOffset,
+ bEnableOverflow,
+ Allocator>,
+ bCacheHashCode>
+ {
+ public:
+ typedef fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode, Allocator> this_type;
+ typedef fixed_hashtable_allocator<bucketCount + 1, sizeof(typename hash_multiset<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::node_type), nodeCount, hash_multiset<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignment, hash_multiset<Value, Hash, Predicate,
+ Allocator, bCacheHashCode>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef hash_multiset<Value, Hash, Predicate, fixed_allocator_type, bCacheHashCode> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::mAllocator;
+
+ protected:
+ node_type** mBucketBuffer[bucketCount + 1]; // '+1' because the hash table needs a null terminating bucket.
+ char mNodeBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ public:
+ /// fixed_hash_multiset
+ ///
+ /// Construct an empty fixed_hash_multiset with a given set of parameters.
+ ///
+ explicit fixed_hash_multiset(const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ }
+
+
+ /// fixed_hash_multiset
+ ///
+ /// Construct a fixed_hash_multiset from a source sequence and with a given set of parameters.
+ ///
+ template <typename InputIterator>
+ fixed_hash_multiset(InputIterator first, InputIterator last,
+ const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate())
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), hashFunction,
+ predicate, fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_HASH_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(first, last);
+ }
+
+
+ /// fixed_hash_multiset
+ ///
+ /// Copy constructor
+ ///
+ fixed_hash_multiset(const this_type& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
+
+
+ /// operator=
+ ///
+ /// We provide an override so that assignment is done correctly.
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+ base_type::insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mNodeBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_hash_multiset
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, size_t nodeCount, size_t bucketCount, bool bEnableOverflow, typename Hash, typename Predicate, bool bCacheHashCode>
+ inline void swap(fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& a,
+ fixed_hash_multiset<Value, nodeCount, bucketCount, bEnableOverflow, Hash, Predicate, bCacheHashCode>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_list.h b/UnknownVersion/include/EASTL/fixed_list.h
new file mode 100644
index 0000000..a8a6f72
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_list.h
@@ -0,0 +1,342 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_list.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a list which uses a fixed size memory pool for its nodes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_LIST_H
+#define EASTL_FIXED_LIST_H
+
+
+#include <EASTL/list.h>
+#include <EASTL/internal/fixed_pool.h>
+#include <EASTL/sort.h>
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_LIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_LIST_DEFAULT_NAME
+ #define EASTL_FIXED_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_list" // Unless the user overrides something, this is "EASTL fixed_list".
+ #endif
+
+
+
+ /// fixed_list
+ ///
+ /// fixed_list is a list which uses a single block of contiguous memory
+ /// for its nodes. The purpose of this is to reduce memory usage relative
+ /// to a conventional memory allocation system (with block headers), to
+ /// increase allocation speed (often due to avoidance of mutex locks),
+ /// to increase performance (due to better memory locality), and to decrease
+ /// memory fragmentation due to the way that fixed block allocators work.
+ ///
+ /// The primary downside to a fixed_list is that the number of nodes it
+ /// can contain is fixed upon its declaration. If you want a fixed_list
+ /// that doesn't have this limitation, then you probably don't want a
+ /// fixed_list. You can always create your own memory allocator that works
+ /// the way you want.
+ ///
+ /// Template parameters:
+ /// T The type of object the list holds.
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename T, size_t nodeCount, bool bEnableOverflow = true, typename Allocator = EASTLAllocatorType>
+ class fixed_list : public list<T, fixed_node_allocator<sizeof(typename list<T>::node_type),
+ nodeCount, list<T>::kAlignment, list<T>::kAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_list<T, nodeCount, bEnableOverflow, Allocator> this_type;
+ typedef fixed_node_allocator<sizeof(typename list<T>::node_type), nodeCount,
+ list<T>::kAlignment, list<T>::kAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef list<T, fixed_allocator_type> base_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::assign;
+ using base_type::resize;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ fixed_list();
+ explicit fixed_list(size_type n);
+ fixed_list(size_type n, const value_type& value);
+ fixed_list(const this_type& x);
+
+ template <typename InputIterator>
+ fixed_list(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+
+ void swap(this_type& x);
+ void reset();
+ size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ bool has_overflowed() const; // Returns true if the fixed space is fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+
+ template<typename Compare>
+ void sort(Compare compare);
+ void sort();
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare);
+ void merge(this_type& x);
+
+ void splice(iterator position, this_type& x);
+ void splice(iterator position, this_type& x, iterator i);
+ void splice(iterator position, this_type& x, iterator first, iterator last);
+
+ // Deprecated:
+ bool full() const { return has_overflowed(); }
+
+ }; // fixed_list
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_list
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, Allocator>::fixed_list()
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, Allocator>::fixed_list(size_type n)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ resize(n);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, Allocator>::fixed_list(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ resize(n, value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_list<T, nodeCount, bEnableOverflow, Allocator>::fixed_list(const this_type& x)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mBuffer);
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ template <typename InputIterator>
+ fixed_list<T, nodeCount, bEnableOverflow, Allocator>::fixed_list(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_LIST_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ assign(first, last);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, Allocator>::this_type&
+ fixed_list<T, nodeCount, bEnableOverflow, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ base_type::clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ base_type::assign(x.begin(), x.end()); // It would probably be better to implement this like list::operator=.
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_list<T, nodeCount, bEnableOverflow, Allocator>::size_type
+ fixed_list<T, nodeCount, bEnableOverflow, Allocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline bool fixed_list<T, nodeCount, bEnableOverflow, Allocator>::has_overflowed() const
+ {
+ return !mAllocator.can_allocate();
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::sort()
+ {
+ eastl::insertion_sort(base_type::begin(), base_type::end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ template <typename Compare>
+ inline void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::sort(Compare compare)
+ {
+ eastl::insertion_sort(base_type::begin(), base_type::end(), compare);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::merge(this_type& /*x*/)
+ {
+ // To do.
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ template <typename Compare>
+ void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::merge(this_type& /*x*/, Compare /*compare*/)
+ {
+ // To do.
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::splice(iterator /*position*/, this_type& /*x*/)
+ {
+ // To do.
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::splice(iterator /*position*/, this_type& /*x*/, iterator /*i*/)
+ {
+ // To do.
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ void fixed_list<T, nodeCount, bEnableOverflow, Allocator>::splice(iterator /*position*/, this_type& /*x*/, iterator /*first*/, iterator /*last*/)
+ {
+ // To do.
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void swap(fixed_list<T, nodeCount, bEnableOverflow, Allocator>& a,
+ fixed_list<T, nodeCount, bEnableOverflow, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_map.h b/UnknownVersion/include/EASTL/fixed_map.h
new file mode 100644
index 0000000..75a83c6
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_map.h
@@ -0,0 +1,358 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_map.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a map and multimap which use a fixed size memory
+// pool for their nodes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_MAP_H
+#define EASTL_FIXED_MAP_H
+
+
+#include <EASTL/map.h>
+#include <EASTL/fixed_set.h> // Included because fixed_rbtree_base resides here.
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_MAP_DEFAULT_NAME
+ #define EASTL_FIXED_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_map" // Unless the user overrides something, this is "EASTL fixed_map".
+ #endif
+
+ #ifndef EASTL_FIXED_MULTIMAP_DEFAULT_NAME
+ #define EASTL_FIXED_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multimap" // Unless the user overrides something, this is "EASTL fixed_multimap".
+ #endif
+
+
+
+ /// fixed_map
+ ///
+ /// Implements a map with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The key object (key in the key/value pair).
+ /// T The mapped object (value in the key/value pair).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class fixed_map : public map<Key, T, Compare, fixed_node_allocator<sizeof(typename map<Key, T>::node_type),
+ nodeCount, map<Key, T>::kValueAlignment, map<Key, T>::kValueAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, Allocator> this_type;
+ typedef fixed_node_allocator<sizeof(typename map<Key, T>::node_type), nodeCount,
+ map<Key, T>::kValueAlignment, map<Key, T>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef map<Key, T, Compare, fixed_allocator_type> base_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ /// fixed_map
+ ///
+ fixed_map()
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_map
+ ///
+ explicit fixed_map(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_map
+ ///
+ fixed_map(const this_type& x)
+ : base_type(x.mCompare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mBuffer);
+ base_type::operator=(x);
+ }
+
+
+ /// fixed_map
+ ///
+ template <typename InputIterator>
+ fixed_map(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ insert(first, last);
+ }
+
+
+ /// operator=
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_map
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename Allocator>
+ inline void swap(fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, Allocator>& a,
+ fixed_map<Key, T, nodeCount, bEnableOverflow, Compare, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+
+
+ /// fixed_multimap
+ ///
+ /// Implements a multimap with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The key object (key in the key/value pair).
+ /// T The mapped object (value in the key/value pair).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class fixed_multimap : public multimap<Key, T, Compare, fixed_node_allocator<sizeof(typename multimap<Key, T>::node_type),
+ nodeCount, multimap<Key, T>::kValueAlignment, multimap<Key, T>::kValueAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, Allocator> this_type;
+ typedef fixed_node_allocator<sizeof(typename multimap<Key, T>::node_type), nodeCount,
+ multimap<Key, T>::kValueAlignment, multimap<Key, T>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef multimap<Key, T, Compare, fixed_allocator_type> base_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ /// fixed_multimap
+ ///
+ fixed_multimap()
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_multimap
+ ///
+ explicit fixed_multimap(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(mBuffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_multimap
+ ///
+ fixed_multimap(const this_type& x)
+ : base_type(x.mCompare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mBuffer);
+ base_type::operator=(x);
+ }
+
+
+ /// fixed_multimap
+ ///
+ template <typename InputIterator>
+ fixed_multimap(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTIMAP_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ insert(first, last);
+ }
+
+
+ /// operator=
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_multimap
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, size_t nodeCount, bool bEnableOverflow, typename Compare, typename Allocator>
+ inline void swap(fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, Allocator>& a,
+ fixed_multimap<Key, T, nodeCount, bEnableOverflow, Compare, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_set.h b/UnknownVersion/include/EASTL/fixed_set.h
new file mode 100644
index 0000000..de676fb
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_set.h
@@ -0,0 +1,360 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_set.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a set and multiset which use a fixed size memory
+// pool for their nodes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_FIXED_SET_H
+#define EASTL_FIXED_SET_H
+
+
+#include <EASTL/set.h>
+#include <EASTL/internal/fixed_pool.h>
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_SET_DEFAULT_NAME
+ #define EASTL_FIXED_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_set" // Unless the user overrides something, this is "EASTL fixed_set".
+ #endif
+
+ #ifndef EASTL_FIXED_MULTISET_DEFAULT_NAME
+ #define EASTL_FIXED_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_multiset" // Unless the user overrides something, this is "EASTL fixed_multiset".
+ #endif
+
+
+
+ /// fixed_set
+ ///
+ /// Implements a set with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Template parameters:
+ /// Key The type of object the set holds (a.k.a. value).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class fixed_set : public set<Key, Compare, fixed_node_allocator<sizeof(typename set<Key>::node_type),
+ nodeCount, set<Key>::kValueAlignment, set<Key>::kValueAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_set<Key, nodeCount, bEnableOverflow, Compare, Allocator> this_type;
+ typedef fixed_node_allocator<sizeof(typename set<Key>::node_type), nodeCount,
+ set<Key>::kValueAlignment, set<Key>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef set<Key, Compare, fixed_allocator_type> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ /// fixed_set
+ ///
+ fixed_set()
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_set
+ ///
+ explicit fixed_set(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_set
+ ///
+ fixed_set(const this_type& x)
+ : base_type(x.mCompare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mBuffer);
+ base_type::operator=(x);
+ }
+
+
+ /// fixed_set
+ ///
+ template <typename InputIterator>
+ fixed_set(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_SET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ insert(first, last);
+ }
+
+
+ /// operator=
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_set
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename Allocator>
+ inline void swap(fixed_set<Key, nodeCount, bEnableOverflow, Compare, Allocator>& a,
+ fixed_set<Key, nodeCount, bEnableOverflow, Compare, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+
+
+
+
+
+ /// fixed_multiset
+ ///
+ /// Implements a multiset with a fixed block of memory identified by the
+ /// nodeCount template parameter.
+ ///
+ /// Key The type of object the set holds (a.k.a. value).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the global heap if our object pool is exhausted.
+ /// Compare Compare function/object for set ordering.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <typename Key, size_t nodeCount, bool bEnableOverflow = true, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class fixed_multiset : public multiset<Key, Compare, fixed_node_allocator<sizeof(typename multiset<Key>::node_type),
+ nodeCount, multiset<Key>::kValueAlignment, multiset<Key>::kValueAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, Allocator> this_type;
+ typedef fixed_node_allocator<sizeof(typename multiset<Key>::node_type), nodeCount,
+ multiset<Key>::kValueAlignment, multiset<Key>::kValueAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef multiset<Key, Compare, fixed_allocator_type> base_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::size_type size_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::insert;
+
+ protected:
+ char mBuffer[fixed_allocator_type::kBufferSize]; // kBufferSize will take into account alignment requirements.
+
+ using base_type::mAllocator;
+
+ public:
+ /// fixed_multiset
+ ///
+ fixed_multiset()
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_multiset
+ ///
+ explicit fixed_multiset(const Compare& compare)
+ : base_type(compare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ }
+
+
+ /// fixed_multiset
+ ///
+ fixed_multiset(const this_type& x)
+ : base_type(x.mCompare, fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mAllocator.reset(mBuffer);
+ base_type::operator=(x);
+ }
+
+
+ /// fixed_multiset
+ ///
+ template <typename InputIterator>
+ fixed_multiset(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(NULL))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_MULTISET_DEFAULT_NAME);
+ #endif
+
+ mAllocator.reset(mBuffer);
+ insert(first, last);
+ }
+
+
+ /// operator=
+ ///
+ this_type& operator=(const this_type& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
+
+
+ void swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ void reset()
+ {
+ base_type::reset();
+ base_type::get_allocator().reset(mBuffer);
+ }
+
+
+ size_type max_size() const
+ {
+ return kMaxSize;
+ }
+
+ }; // fixed_multiset
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, size_t nodeCount, bool bEnableOverflow, typename Compare, typename Allocator>
+ inline void swap(fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, Allocator>& a,
+ fixed_multiset<Key, nodeCount, bEnableOverflow, Compare, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_string.h b/UnknownVersion/include/EASTL/fixed_string.h
new file mode 100644
index 0000000..54cd90a
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_string.h
@@ -0,0 +1,539 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_string.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a string which uses a fixed size memory pool.
+// The bEnableOverflow template parameter allows the container to resort to
+// heap allocations if the memory pool is exhausted.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_STRING_H
+#define EASTL_FIXED_STRING_H
+
+
+#include <EASTL/internal/config.h>
+#if EASTL_ABSTRACT_STRING_ENABLED
+ #include <EASTL/bonus/fixed_string_abstract.h>
+#else // 'else' encompasses the entire rest of this file.
+#include <EASTL/string.h>
+#include <EASTL/internal/fixed_pool.h>
+
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_STRING_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_STRING_DEFAULT_NAME
+ #define EASTL_FIXED_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_string" // Unless the user overrides something, this is "EASTL fixed_string".
+ #endif
+
+
+
+ /// fixed_string
+ ///
+ /// A fixed_string with bEnableOverflow == true is identical to a regular
+ /// string in terms of its behavior. All the expectations of regular string
+ /// apply to it and no additional expectations come from it. When bEnableOverflow
+ /// is false, fixed_string behaves like regular string with the exception that
+ /// its capacity can never increase. All operations you do on such a fixed_string
+ /// which require a capacity increase will result in undefined behavior or an
+ /// C++ allocation exception, depending on the configuration of EASTL.
+ ///
+ /// Note: The nodeCount value is the amount of characters to allocate, which needs to
+ /// take into account a terminating zero. Thus if you want to store strings with a strlen
+ /// of 30, the nodeCount value must be at least 31.
+ ///
+ /// Template parameters:
+ /// T The type of object the string holds (char, wchar_t, char8_t, char16_t, char32_t).
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ /// Notes:
+ /// The nodeCount value must be at least 2, one for a character and one for a terminating 0.
+ ///
+ /// As of this writing, the string class necessarily reallocates when an insert of
+ /// self is done into self. As a result, the fixed_string class doesn't support
+ /// inserting self into self unless the bEnableOverflow template parameter is true.
+ ///
+ /// Example usage:
+ /// fixed_string<char, 128 + 1, true> fixedString("hello world"); // Can hold up to a strlen of 128.
+ ///
+ /// fixedString = "hola mundo";
+ /// fixedString.clear();
+ /// fixedString.resize(200);
+ /// fixedString.sprintf("%f", 1.5f);
+ ///
+ template <typename T, int nodeCount, bool bEnableOverflow = true, typename Allocator = EASTLAllocatorType>
+ class fixed_string : public basic_string<T, fixed_vector_allocator<sizeof(T), nodeCount, basic_string<T>::kAlignment, basic_string<T>::kAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_vector_allocator<sizeof(T), nodeCount, basic_string<T>::kAlignment,
+ basic_string<T>::kAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef typename fixed_allocator_type::overflow_allocator_type overflow_allocator_type;
+ typedef basic_string<T, fixed_allocator_type> base_type;
+ typedef fixed_string<T, nodeCount, bEnableOverflow, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::CtorDoNotInitialize CtorDoNotInitialize;
+ typedef typename base_type::CtorSprintf CtorSprintf;
+ typedef aligned_buffer<nodeCount * sizeof(T), basic_string<T>::kAlignment> aligned_buffer_type;
+
+ enum
+ {
+ kMaxSize = nodeCount - 1 // -1 because we need to save one element for the silent terminating null.
+ };
+
+ using base_type::mAllocator;
+ using base_type::npos;
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::mpCapacity;
+ using base_type::append;
+ using base_type::resize;
+ using base_type::clear;
+ using base_type::size;
+ using base_type::sprintf_va_list;
+
+ protected:
+ union // We define a union in order to avoid strict pointer aliasing issues with compilers like GCC.
+ {
+ value_type mArray[1];
+ aligned_buffer_type mBuffer; // Question: Why are we doing this aligned_buffer thing? Why not just do an array of value_type, given that we are using just strings of char types.
+ };
+
+ public:
+ fixed_string();
+ fixed_string(const base_type& x, size_type position, size_type n = base_type::npos);
+ fixed_string(const value_type* p, size_type n);
+ fixed_string(const value_type* p);
+ fixed_string(size_type n, const value_type& value);
+ fixed_string(const this_type& x);
+ fixed_string(const base_type& x);
+ fixed_string(const value_type* pBegin, const value_type* pEnd);
+ fixed_string(CtorDoNotInitialize, size_type n);
+ fixed_string(CtorSprintf, const value_type* pFormat, ...);
+
+ this_type& operator=(const this_type& x);
+ this_type& operator=(const base_type& x);
+ this_type& operator=(const value_type* p);
+ this_type& operator=(const value_type c);
+
+ void swap(this_type& x);
+
+ void set_capacity(size_type n);
+ void reset();
+ size_type max_size() const;
+
+ // The inherited versions of substr/left/right call the basic_string constructor,
+ // which will call the overflow allocator and fail if bEnableOverflow == false
+ this_type substr(size_type position, size_type n) const;
+ this_type left(size_type n) const;
+ this_type right(size_type n) const;
+
+ // Allocator
+ overflow_allocator_type& get_overflow_allocator();
+ void set_overflow_allocator(const overflow_allocator_type& allocator);
+
+ }; // fixed_string
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_string
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string()
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ append(x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const base_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.get_allocator().get_name());
+ #endif
+
+ append(x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const base_type& x, size_type position, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.get_allocator().get_name());
+ #endif
+
+ append(x, position, n);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const value_type* p, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ append(p, n);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const value_type* p)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ append(p); // There better be enough space to hold the assigned string.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ append(n, value); // There better be enough space to hold the assigned string.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(const value_type* pBegin, const value_type* pEnd)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ append(pBegin, pEnd);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(CtorDoNotInitialize, size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mArray;
+ mpCapacity = mpBegin + nodeCount;
+
+ if((mpBegin + n) < mpCapacity)
+ {
+ mpEnd = mpBegin + n;
+ *mpEnd = 0;
+ }
+ else
+ {
+ mpEnd = mArray;
+ *mpEnd = 0;
+ resize(n);
+ }
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ fixed_string(CtorSprintf, const value_type* pFormat, ...)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ *mpBegin = 0;
+
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_STRING_DEFAULT_NAME);
+ #endif
+
+ va_list arguments;
+ va_start(arguments, pFormat);
+ sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::this_type&
+ fixed_string<T, nodeCount, bEnableOverflow, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+ append(x);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, Allocator>::operator=(const base_type& x)
+ {
+ if(static_cast<base_type*>(this) != &x)
+ {
+ clear();
+ append(x);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, Allocator>::operator=(const value_type* p)
+ {
+ if(mpBegin != p)
+ {
+ clear();
+ append(p);
+ }
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type& fixed_string<T, nodeCount, bEnableOverflow, Allocator>::operator=(const value_type c)
+ {
+ clear();
+ append((size_type)1, c);
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ set_capacity(size_type n)
+ {
+ // We act consistently with vector::set_capacity and reduce our
+ // size if the new capacity is smaller than our size.
+ if(n < size())
+ resize(n);
+ // To consider: If bEnableOverflow is true, then perhaps we should
+ // switch to the overflow allocator and set the capacity.
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ reset()
+ {
+ mpBegin = mpEnd = mArray;
+ mpCapacity = mpBegin + nodeCount;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ size_type fixed_string<T, nodeCount, bEnableOverflow, Allocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, Allocator>::substr(size_type position, size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(position > (size_type)(mpEnd - mpBegin))
+ ThrowRangeException();
+ #endif
+
+ return fixed_string(mpBegin + position, mpBegin + position + eastl::min_alt(n, (size_type)(mpEnd - mpBegin) - position));
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, Allocator>::left(size_type n) const
+ {
+ const size_type nLength = size();
+ if(n < nLength)
+ return fixed_string(mpBegin, mpBegin + n);
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ this_type fixed_string<T, nodeCount, bEnableOverflow, Allocator>::right(size_type n) const
+ {
+ const size_type nLength = size();
+ if(n < nLength)
+ return fixed_string(mpEnd - n, mpEnd);
+ return *this;
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_string<T, nodeCount, bEnableOverflow, Allocator>::
+ overflow_allocator_type& fixed_string<T, nodeCount, bEnableOverflow, Allocator>::get_overflow_allocator()
+ {
+ return mAllocator.get_overflow_allocator();
+ }
+
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void
+ fixed_string<T, nodeCount, bEnableOverflow, Allocator>::set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mAllocator.set_overflow_allocator(allocator);
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // operator ==, !=, <, >, <=, >= come from the string implementations.
+
+ template <typename T, int nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void swap(fixed_string<T, nodeCount, bEnableOverflow, Allocator>& a,
+ fixed_string<T, nodeCount, bEnableOverflow, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#endif // EASTL_ABSTRACT_STRING_ENABLED
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_substring.h b/UnknownVersion/include/EASTL/fixed_substring.h
new file mode 100644
index 0000000..be40d37
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_substring.h
@@ -0,0 +1,300 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_substring.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_SUBSTRING_H
+#define EASTL_FIXED_SUBSTRING_H
+
+
+#include <EASTL/string.h>
+
+
+namespace eastl
+{
+
+ /// fixed_substring
+ ///
+ /// Implements a string which is a reference to a segment of characters.
+ /// This class is efficient because it allocates no memory and copies no
+ /// memory during construction and assignment, but rather refers directly
+ /// to the segment of chracters. A common use of this is to have a
+ /// fixed_substring efficiently refer to a substring within another string.
+ ///
+ /// You cannot directly resize a fixed_substring (e.g. via resize, insert,
+ /// append, erase), but you can assign a different substring to it.
+ /// You can modify the characters within a substring in place.
+ /// As of this writing, in the name of being lean and simple it is the
+ /// user's responsibility to not call unsupported resizing functions
+ /// such as those listed above. A detailed listing of the functions which
+ /// are not supported is given below in the class declaration.
+ ///
+ /// The c_str function doesn't act as one might hope, as it simply
+ /// returns the pointer to the beginning of the string segment and the
+ /// 0-terminator may be beyond the end of the segment. If you want to
+ /// always be able to use c_str as expected, use the fixed string solution
+ /// we describe below.
+ ///
+ /// Another use of fixed_substring is to provide C++ string-like functionality
+ /// with a C character array. This allows you to work on a C character array
+ /// as if it were a C++ string as opposed using the C string API. Thus you
+ /// can do this:
+ ///
+ /// void DoSomethingForUser(char* timeStr, size_t timeStrCapacity)
+ /// {
+ /// fixed_substring tmp(timeStr, timeStrCapacity);
+ /// tmp = "hello ";
+ /// tmp += "world";
+ /// }
+ ///
+ /// Note that this class constructs and assigns from const string pointers
+ /// and const string objects, yet this class does not declare its member
+ /// data as const. This is a concession in order to allow this implementation
+ /// to be simple and lean. It is the user's responsibility to make sure
+ /// that strings that should not or can not be modified are either not
+ /// used by fixed_substring or are not modified by fixed_substring.
+ ///
+ /// A more flexible alternative to fixed_substring is fixed_string.
+ /// fixed_string has none of the functional limitations that fixed_substring
+ /// has and like fixed_substring it doesn't allocate memory. However,
+ /// fixed_string makes a *copy* of the source string and uses local
+ /// memory to store that copy. Also, fixed_string objects on the stack
+ /// are going to have a limit as to their maximum size.
+ ///
+ /// Notes:
+ /// As of this writing, the string class necessarily reallocates when
+ /// an insert of self is done into self. As a result, the fixed_substring
+ /// class doesn't support inserting self into self.
+ ///
+ /// Example usage:
+ /// basic_string<char> str("hello world");
+ /// fixed_substring<char> sub(str, 2, 5); // sub == "llo w"
+ ///
+ template <typename T>
+ class fixed_substring : public basic_string<T>
+ {
+ public:
+ typedef basic_string<T> base_type;
+ typedef fixed_substring<T> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+
+ using base_type::npos;
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::mpCapacity;
+ using base_type::reset;
+ using base_type::mAllocator;
+
+ public:
+ fixed_substring()
+ : base_type()
+ {
+ }
+
+ fixed_substring(const base_type& x)
+ : base_type()
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.get_allocator().get_name());
+ #endif
+
+ assign(x);
+ }
+
+ fixed_substring(const base_type& x, size_type position, size_type n = base_type::npos)
+ : base_type()
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.get_allocator().get_name());
+ #endif
+
+ assign(x, position, n);
+ }
+
+ fixed_substring(const value_type* p, size_type n)
+ : base_type()
+ {
+ assign(p, n);
+ }
+
+ fixed_substring(const value_type* p)
+ : base_type()
+ {
+ assign(p);
+ }
+
+ fixed_substring(const value_type* pBegin, const value_type* pEnd)
+ : base_type()
+ {
+ assign(pBegin, pEnd);
+ }
+
+ ~fixed_substring()
+ {
+ // We need to reset, as otherwise the parent destructor will
+ // attempt to free our memory.
+ reset();
+ }
+
+ this_type& operator=(const base_type& x)
+ {
+ assign(x);
+ return *this;
+ }
+
+ this_type& operator=(const value_type* p)
+ {
+ assign(p);
+ return *this;
+ }
+
+ this_type& assign(const base_type& x)
+ {
+ // By design, we need to cast away const-ness here.
+ mpBegin = const_cast<value_type*>(x.data());
+ mpEnd = mpBegin + x.size();
+ mpCapacity = mpEnd;
+ return *this;
+ }
+
+ this_type& assign(const base_type& x, size_type position, size_type n)
+ {
+ // By design, we need to cast away const-ness here.
+ mpBegin = const_cast<value_type*>(x.data()) + position;
+ mpEnd = mpBegin + n;
+ mpCapacity = mpEnd;
+ return *this;
+ }
+
+ this_type& assign(const value_type* p, size_type n)
+ {
+ // By design, we need to cast away const-ness here.
+ mpBegin = const_cast<value_type*>(p);
+ mpEnd = mpBegin + n;
+ mpCapacity = mpEnd;
+ return *this;
+ }
+
+ this_type& assign(const value_type* p)
+ {
+ // By design, we need to cast away const-ness here.
+ mpBegin = const_cast<value_type*>(p);
+ mpEnd = mpBegin + CharStrlen(p);
+ mpCapacity = mpEnd;
+ return *this;
+ }
+
+ this_type& assign(const value_type* pBegin, const value_type* pEnd)
+ {
+ // By design, we need to cast away const-ness here.
+ mpBegin = const_cast<value_type*>(pBegin);
+ mpEnd = const_cast<value_type*>(pEnd);
+ mpCapacity = mpEnd;
+ return *this;
+ }
+
+
+ // Partially supported functionality
+ //
+ // When using fixed_substring on a character sequence that is within another
+ // string, the following functions may do one of two things:
+ // 1 Attempt to reallocate
+ // 2 Write a 0 char at the end of the fixed_substring
+ //
+ // Item #1 will result in a crash, due to the attempt by the underlying
+ // string class to free the substring memory. Item #2 will result in a 0
+ // char being written to the character array. Item #2 may or may not be
+ // a problem, depending on how you use fixed_substring. Thus the following
+ // functions should be used carefully.
+ //
+ // basic_string& operator=(const basic_string& x);
+ // basic_string& operator=(value_type c);
+ // void resize(size_type n, value_type c);
+ // void resize(size_type n);
+ // void reserve(size_type = 0);
+ // void set_capacity(size_type n);
+ // void clear();
+ // basic_string& operator+=(const basic_string& x);
+ // basic_string& operator+=(const value_type* p);
+ // basic_string& operator+=(value_type c);
+ // basic_string& append(const basic_string& x);
+ // basic_string& append(const basic_string& x, size_type position, size_type n);
+ // basic_string& append(const value_type* p, size_type n);
+ // basic_string& append(const value_type* p);
+ // basic_string& append(size_type n);
+ // basic_string& append(size_type n, value_type c);
+ // basic_string& append(const value_type* pBegin, const value_type* pEnd);
+ // basic_string& append_sprintf_va_list(const value_type* pFormat, va_list arguments);
+ // basic_string& append_sprintf(const value_type* pFormat, ...);
+ // void push_back(value_type c);
+ // void pop_back();
+ // basic_string& assign(const value_type* p, size_type n);
+ // basic_string& assign(size_type n, value_type c);
+ // basic_string& insert(size_type position, const basic_string& x);
+ // basic_string& insert(size_type position, const basic_string& x, size_type beg, size_type n);
+ // basic_string& insert(size_type position, const value_type* p, size_type n);
+ // basic_string& insert(size_type position, const value_type* p);
+ // basic_string& insert(size_type position, size_type n, value_type c);
+ // iterator insert(iterator p, value_type c);
+ // void insert(iterator p, size_type n, value_type c);
+ // void insert(iterator p, const value_type* pBegin, const value_type* pEnd);
+ // basic_string& erase(size_type position = 0, size_type n = npos);
+ // iterator erase(iterator p);
+ // iterator erase(iterator pBegin, iterator pEnd);
+ // void swap(basic_string& x);
+ // basic_string& sprintf_va_list(const value_type* pFormat, va_list arguments);
+ // basic_string& sprintf(const value_type* pFormat, ...);
+
+
+ }; // fixed_substring
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/fixed_vector.h b/UnknownVersion/include/EASTL/fixed_vector.h
new file mode 100644
index 0000000..39687e8
--- /dev/null
+++ b/UnknownVersion/include/EASTL/fixed_vector.h
@@ -0,0 +1,333 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/fixed_vector.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a vector which uses a fixed size memory pool.
+// The bEnableOverflow template parameter allows the container to resort to
+// heap allocations if the memory pool is exhausted.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FIXED_VECTOR_H
+#define EASTL_FIXED_VECTOR_H
+
+
+#include <EASTL/vector.h>
+#include <EASTL/internal/fixed_pool.h>
+
+
+namespace eastl
+{
+ /// EASTL_FIXED_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ /// In the case of fixed-size containers, the allocator name always refers
+ /// to overflow allocations.
+ ///
+ #ifndef EASTL_FIXED_VECTOR_DEFAULT_NAME
+ #define EASTL_FIXED_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_vector" // Unless the user overrides something, this is "EASTL fixed_vector".
+ #endif
+
+
+ /// fixed_vector
+ ///
+ /// A fixed_vector with bEnableOverflow == true is identical to a regular
+ /// vector in terms of its behavior. All the expectations of regular vector
+ /// apply to it and no additional expectations come from it. When bEnableOverflow
+ /// is false, fixed_vector behaves like regular vector with the exception that
+ /// its capacity can never increase. All operations you do on such a fixed_vector
+ /// which require a capacity increase will result in undefined behavior or an
+ /// C++ allocation exception, depending on the configuration of EASTL.
+ ///
+ /// Template parameters:
+ /// T The type of object the vector holds.
+ /// nodeCount The max number of objects to contain.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ /// Note: The nodeCount value must be at least 1.
+ ///
+ /// Example usage:
+ /// fixed_vector<Widget, 128, true> fixedVector);
+ ///
+ /// fixedVector.push_back(Widget());
+ /// fixedVector.resize(200);
+ /// fixedVector.clear();
+ ///
+ template <typename T, size_t nodeCount, bool bEnableOverflow = true, typename Allocator = EASTLAllocatorType>
+ class fixed_vector : public vector<T, fixed_vector_allocator<sizeof(T), nodeCount, vector<T>::kAlignment, vector<T>::kAlignmentOffset, bEnableOverflow, Allocator> >
+ {
+ public:
+ typedef fixed_vector_allocator<sizeof(T), nodeCount, vector<T>::kAlignment,
+ vector<T>::kAlignmentOffset, bEnableOverflow, Allocator> fixed_allocator_type;
+ typedef vector<T, fixed_allocator_type> base_type;
+ typedef fixed_vector<T, nodeCount, bEnableOverflow, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef aligned_buffer<nodeCount * sizeof(T), vector<T>::kAlignment> aligned_buffer_type;
+
+ enum
+ {
+ kMaxSize = nodeCount
+ };
+
+ using base_type::mAllocator;
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::mpCapacity;
+ using base_type::resize;
+ using base_type::clear;
+ using base_type::size;
+ using base_type::assign;
+
+ protected:
+ aligned_buffer_type mBuffer;
+
+ public:
+ fixed_vector();
+ explicit fixed_vector(size_type n);
+ fixed_vector(size_type n, const value_type& value);
+ fixed_vector(const this_type& x);
+
+ template <typename InputIterator>
+ fixed_vector(InputIterator first, InputIterator last);
+
+ this_type& operator=(const this_type& x);
+
+ void swap(this_type& x);
+
+ void set_capacity(size_type n);
+ void reset();
+ size_type max_size() const; // Returns the max fixed size, which is the user-supplied nodeCount parameter.
+ bool has_overflowed() const; // Returns true if the fixed space is fully allocated. Note that if overflow is enabled, the container size can be greater than nodeCount but full() could return true because the fixed space may have a recently freed slot.
+
+ void* push_back_uninitialized();
+
+ // Deprecated:
+ bool full() const { return has_overflowed(); }
+
+ protected:
+ void* DoPushBackUninitialized(true_type);
+ void* DoPushBackUninitialized(false_type);
+
+ }; // fixed_vector
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // fixed_vector
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::fixed_vector()
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::fixed_vector(size_type n)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ resize(n);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::fixed_vector(size_type n, const value_type& value)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ resize(n, value);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::fixed_vector(const this_type& x)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ assign(x.begin(), x.end());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ template <typename InputIterator>
+ fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::fixed_vector(InputIterator first, InputIterator last)
+ : base_type(fixed_allocator_type(mBuffer.buffer))
+ {
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(EASTL_FIXED_VECTOR_DEFAULT_NAME);
+ #endif
+
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ //assign(first, last); // Metrowerks gets confused by this.
+ base_type::DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::this_type&
+ fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+ assign(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::swap(this_type& x)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(*this, x);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::set_capacity(size_type n)
+ {
+ // We act consistently with vector::set_capacity and reduce our
+ // size if the new capacity is smaller than our size.
+ if(n < size())
+ resize(n);
+ // To consider: If bEnableOverflow is true, then perhaps we should
+ // switch to the overflow allocator and set the capacity.
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::reset()
+ {
+ mpBegin = mpEnd = (value_type*)&mBuffer.buffer[0];
+ mpCapacity = mpBegin + nodeCount;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline typename fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::size_type
+ fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline bool fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::has_overflowed() const
+ {
+ // If size >= capacity, then we are definitely full.
+ // Also, if our size is smaller but we've switched away from mBuffer due to a previous overflow, then we are considered full.
+ return ((size_t)(mpEnd - mpBegin) >= kMaxSize) || ((void*)mpBegin != (void*)mBuffer.buffer);
+ }
+
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::push_back_uninitialized()
+ {
+ return DoPushBackUninitialized(typename type_select<bEnableOverflow, true_type, false_type>::type());
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::DoPushBackUninitialized(true_type)
+ {
+ return base_type::push_back_uninitialized();
+ }
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void* fixed_vector<T, nodeCount, bEnableOverflow, Allocator>::DoPushBackUninitialized(false_type)
+ {
+ return mpEnd++;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // operator ==, !=, <, >, <=, >= come from the vector implementations.
+
+ template <typename T, size_t nodeCount, bool bEnableOverflow, typename Allocator>
+ inline void swap(fixed_vector<T, nodeCount, bEnableOverflow, Allocator>& a,
+ fixed_vector<T, nodeCount, bEnableOverflow, Allocator>& b)
+ {
+ // Fixed containers use a special swap that can deal with excessively large buffers.
+ eastl::fixed_swap(a, b);
+ }
+
+
+
+} // namespace eastl
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/functional.h b/UnknownVersion/include/EASTL/functional.h
new file mode 100644
index 0000000..4c7fea6
--- /dev/null
+++ b/UnknownVersion/include/EASTL/functional.h
@@ -0,0 +1,936 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/functional.h
+// Written and maintained by Paul Pedriana - 2005
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_FUNCTIONAL_H
+#define EASTL_FUNCTIONAL_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // Primary C++ functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Argument, typename Result>
+ struct unary_function
+ {
+ typedef Argument argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename Argument1, typename Argument2, typename Result>
+ struct binary_function
+ {
+ typedef Argument1 first_argument_type;
+ typedef Argument2 second_argument_type;
+ typedef Result result_type;
+ };
+
+
+ template <typename T>
+ struct plus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a + b; }
+ };
+
+ template <typename T>
+ struct minus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a - b; }
+ };
+
+ template <typename T>
+ struct multiplies : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a * b; }
+ };
+
+ template <typename T>
+ struct divides : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a / b; }
+ };
+
+ template <typename T>
+ struct modulus : public binary_function<T, T, T>
+ {
+ T operator()(const T& a, const T& b) const
+ { return a % b; }
+ };
+
+ template <typename T>
+ struct negate : public unary_function<T, T>
+ {
+ T operator()(const T& a) const
+ { return -a; }
+ };
+
+ template <typename T>
+ struct equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a == b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a);
+ }
+
+ template <typename T>
+ struct not_equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a != b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_not_equal_to(const T& a, const T& b, Compare compare)
+ {
+ return compare(a, b) == compare(b, a); // We want the not equal comparison results to be equal.
+ }
+
+ /// str_equal_to
+ ///
+ /// Compares two 0-terminated string types.
+ /// The T types are expected to be iterators or act like iterators.
+ ///
+ /// Example usage:
+ /// hash_set<const char*, hash<const char*>, str_equal_to<const char*> > stringHashSet;
+ ///
+ /// Note:
+ /// You couldn't use str_equal_to like this:
+ /// bool result = equal("hi", "hi" + 2, "ho", str_equal_to<const char*>());
+ /// This is because equal tests an array of something, with each element by
+ /// the comparison function. But str_equal_to tests an array of something itself.
+ ///
+ template <typename T>
+ struct str_equal_to : public binary_function<T, T, bool>
+ {
+ bool operator()(T a, T b) const
+ {
+ while(*a && (*a == *b))
+ {
+ ++a;
+ ++b;
+ }
+ return (*a == *b);
+ }
+ };
+
+ template <typename T>
+ struct greater : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a > b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a > b), then !(b > a)
+ }
+
+ template <typename T>
+ struct less : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a < b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_less(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a < b), then !(b < a)
+ }
+
+ template <typename T>
+ struct greater_equal : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a >= b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_greater_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a >= b), then !(b >= a)
+ }
+
+ template <typename T>
+ struct less_equal : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a <= b; }
+ };
+
+ template <typename T, typename Compare>
+ bool validate_less_equal(const T& a, const T& b, Compare compare)
+ {
+ return !compare(a, b) || !compare(b, a); // If (a <= b), then !(b <= a)
+ }
+
+ template <typename T>
+ struct logical_and : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a && b; }
+ };
+
+ template <typename T>
+ struct logical_or : public binary_function<T, T, bool>
+ {
+ bool operator()(const T& a, const T& b) const
+ { return a || b; }
+ };
+
+ template <typename T>
+ struct logical_not : public unary_function<T, bool>
+ {
+ bool operator()(const T& a) const
+ { return !a; }
+ };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Dual type functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename U>
+ struct equal_to_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a == b; }
+ };
+
+ template <typename T, typename U>
+ struct not_equal_to_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a != b; }
+ };
+
+ template <typename T, typename U>
+ struct less_2 : public binary_function<T, U, bool>
+ {
+ bool operator()(const T& a, const U& b) const
+ { return a < b; }
+ };
+
+
+
+
+ /// unary_negate
+ ///
+ template <typename Predicate>
+ class unary_negate : public unary_function<typename Predicate::argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit unary_negate(const Predicate& a)
+ : mPredicate(a) {}
+ bool operator()(const typename Predicate::argument_type& a) const
+ { return !mPredicate(a); }
+ };
+
+ template <typename Predicate>
+ inline unary_negate<Predicate> not1(const Predicate& predicate)
+ { return unary_negate<Predicate>(predicate); }
+
+
+
+ /// binary_negate
+ ///
+ template <typename Predicate>
+ class binary_negate : public binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
+ {
+ protected:
+ Predicate mPredicate;
+ public:
+ explicit binary_negate(const Predicate& a)
+ : mPredicate(a) { }
+ bool operator()(const typename Predicate::first_argument_type& a, const typename Predicate::second_argument_type& b) const
+ { return !mPredicate(a, b); }
+ };
+
+ template <typename Predicate>
+ inline binary_negate<Predicate> not2(const Predicate& predicate)
+ { return binary_negate<Predicate>(predicate); }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // bind
+ ///////////////////////////////////////////////////////////////////////
+
+ /// bind1st
+ ///
+ template <typename Operation>
+ class binder1st : public unary_function<typename Operation::second_argument_type, typename Operation::result_type>
+ {
+ protected:
+ typename Operation::first_argument_type value;
+ Operation op;
+
+ public:
+ binder1st(const Operation& x, const typename Operation::first_argument_type& y)
+ : value(y), op(x) { }
+
+ typename Operation::result_type operator()(const typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+
+ typename Operation::result_type operator()(typename Operation::second_argument_type& x) const
+ { return op(value, x); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder1st<Operation> bind1st(const Operation& op, const T& x)
+ {
+ typedef typename Operation::first_argument_type value;
+ return binder1st<Operation>(op, value(x));
+ }
+
+
+ /// bind2nd
+ ///
+ template <typename Operation>
+ class binder2nd : public unary_function<typename Operation::first_argument_type, typename Operation::result_type>
+ {
+ protected:
+ Operation op;
+ typename Operation::second_argument_type value;
+
+ public:
+ binder2nd(const Operation& x, const typename Operation::second_argument_type& y)
+ : op(x), value(y) { }
+
+ typename Operation::result_type operator()(const typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+
+ typename Operation::result_type operator()(typename Operation::first_argument_type& x) const
+ { return op(x, value); }
+ };
+
+
+ template <typename Operation, typename T>
+ inline binder2nd<Operation> bind2nd(const Operation& op, const T& x)
+ {
+ typedef typename Operation::second_argument_type value;
+ return binder2nd<Operation>(op, value(x));
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_unary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_unary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ /// Example usage:
+ /// ptrdiff_t Rand(ptrdiff_t n) { return rand() % n; } // Note: The C rand function is poor and slow.
+ /// pointer_to_unary_function<ptrdiff_t, ptrdiff_t> randInstance(Rand);
+ /// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
+ ///
+ template <typename Arg, typename Result>
+ class pointer_to_unary_function : public unary_function<Arg, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg);
+
+ public:
+ pointer_to_unary_function()
+ { }
+
+ explicit pointer_to_unary_function(Result (*pFunction)(Arg))
+ : mpFunction(pFunction) { }
+
+ Result operator()(Arg x) const
+ { return mpFunction(x); }
+ };
+
+
+ /// ptr_fun
+ ///
+ /// This ptr_fun is simply shorthand for usage of pointer_to_unary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int factorial(int x) { return (x > 1) ? (x * factorial(x - 1)) : x; }
+ /// transform(pIntArrayBegin, pIntArrayEnd, pIntArrayBegin, ptr_fun(factorial));
+ ///
+ template <typename Arg, typename Result>
+ inline pointer_to_unary_function<Arg, Result> ptr_fun(Result (*pFunction)(Arg))
+ { return pointer_to_unary_function<Arg, Result>(pFunction); }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pointer_to_binary_function
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pointer_to_binary_function
+ ///
+ /// This is an adapter template which converts a pointer to a standalone
+ /// function to a function object. This allows standalone functions to
+ /// work in many cases where the system requires a function object.
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ class pointer_to_binary_function : public binary_function<Arg1, Arg2, Result>
+ {
+ protected:
+ Result (*mpFunction)(Arg1, Arg2);
+
+ public:
+ pointer_to_binary_function()
+ { }
+
+ explicit pointer_to_binary_function(Result (*pFunction)(Arg1, Arg2))
+ : mpFunction(pFunction) {}
+
+ Result operator()(Arg1 x, Arg2 y) const
+ { return mpFunction(x, y); }
+ };
+
+
+ /// This ptr_fun is simply shorthand for usage of pointer_to_binary_function.
+ ///
+ /// Example usage (actually, you don't need to use ptr_fun here, but it works anyway):
+ /// int multiply(int x, int y) { return x * y; }
+ /// transform(pIntArray1Begin, pIntArray1End, pIntArray2Begin, pIntArray1Begin, ptr_fun(multiply));
+ ///
+ template <typename Arg1, typename Arg2, typename Result>
+ inline pointer_to_binary_function<Arg1, Arg2, Result> ptr_fun(Result (*pFunction)(Arg1, Arg2))
+ { return pointer_to_binary_function<Arg1, Arg2, Result>(pFunction); }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun
+ // mem_fun1
+ //
+ // Note that mem_fun calls member functions via *pointers* to classes
+ // and not instances of classes. mem_fun_ref is for calling functions
+ // via instances of classes or references to classes.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_t
+ ///
+ /// Member function with no arguments.
+ ///
+ template <typename Result, typename T>
+ class mem_fun_t : public unary_function<T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ EA_FORCE_INLINE explicit mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_t
+ ///
+ /// Member function with one argument.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_t : public binary_function<T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ EA_FORCE_INLINE explicit mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_t
+ ///
+ /// Const member function with no arguments.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_t : public unary_function<const T*, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ EA_FORCE_INLINE explicit const_mem_fun_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(const T* pT) const
+ {
+ return (pT->*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_t
+ ///
+ /// Const member function with one argument.
+ /// Note that we inherit from unary_function<const T*, Result>
+ /// instead of what the C++ standard specifies: unary_function<T*, Result>.
+ /// The C++ standard is in error and this has been recognized by the defect group.
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_t : public binary_function<const T*, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ EA_FORCE_INLINE explicit const_mem_fun1_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(const T* pT, Argument arg) const
+ {
+ return (pT->*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun
+ ///
+ /// This is the high level interface to the mem_fun_t family.
+ ///
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass* pTestClassArray[3] = { ... };
+ /// for_each(pTestClassArray, pTestClassArray + 3, &TestClass::print);
+ ///
+ template <typename Result, typename T>
+ EA_FORCE_INLINE mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)())
+ {
+ return eastl::mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ EA_FORCE_INLINE mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument))
+ {
+ return eastl::mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ EA_FORCE_INLINE const_mem_fun_t<Result, T>
+ mem_fun(Result (T::*MemberFunction)() const)
+ {
+ return eastl::const_mem_fun_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ EA_FORCE_INLINE const_mem_fun1_t<Result, T, Argument>
+ mem_fun(Result (T::*MemberFunction)(Argument) const)
+ {
+ return eastl::const_mem_fun1_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // mem_fun_ref
+ // mem_fun1_ref
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ /// mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)();
+
+ EA_FORCE_INLINE explicit mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument);
+
+ EA_FORCE_INLINE explicit mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun_ref_t
+ ///
+ template <typename Result, typename T>
+ class const_mem_fun_ref_t : public unary_function<T, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)() const;
+
+ EA_FORCE_INLINE explicit const_mem_fun_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(const T& t) const
+ {
+ return (t.*mpMemberFunction)();
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// const_mem_fun1_ref_t
+ ///
+ template <typename Result, typename T, typename Argument>
+ class const_mem_fun1_ref_t : public binary_function<T, Argument, Result>
+ {
+ public:
+ typedef Result (T::*MemberFunction)(Argument) const;
+
+ EA_FORCE_INLINE explicit const_mem_fun1_ref_t(MemberFunction pMemberFunction)
+ : mpMemberFunction(pMemberFunction)
+ {
+ // Empty
+ }
+
+ EA_FORCE_INLINE Result operator()(const T& t, Argument arg) const
+ {
+ return (t.*mpMemberFunction)(arg);
+ }
+
+ protected:
+ MemberFunction mpMemberFunction;
+ };
+
+
+ /// mem_fun_ref
+ /// Example usage:
+ /// struct TestClass { void print() { puts("hello"); } }
+ /// TestClass testClassArray[3];
+ /// for_each(testClassArray, testClassArray + 3, &TestClass::print);
+ ///
+ template <typename Result, typename T>
+ EA_FORCE_INLINE mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)())
+ {
+ return eastl::mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ EA_FORCE_INLINE mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument))
+ {
+ return eastl::mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+ template <typename Result, typename T>
+ EA_FORCE_INLINE const_mem_fun_ref_t<Result, T>
+ mem_fun_ref(Result (T::*MemberFunction)() const)
+ {
+ return eastl::const_mem_fun_ref_t<Result, T>(MemberFunction);
+ }
+
+ template <typename Result, typename T, typename Argument>
+ EA_FORCE_INLINE const_mem_fun1_ref_t<Result, T, Argument>
+ mem_fun_ref(Result (T::*MemberFunction)(Argument) const)
+ {
+ return eastl::const_mem_fun1_ref_t<Result, T, Argument>(MemberFunction);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hash
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct hash;
+
+ template <typename T> struct hash<T*> // Note that we use the pointer as-is and don't divide by sizeof(T*). This is because the table is of a prime size and this division doesn't benefit distribution.
+ { size_t operator()(T* p) const { return size_t(uintptr_t(p)); } };
+
+ template <> struct hash<bool>
+ { size_t operator()(bool val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<char>
+ { size_t operator()(char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed char>
+ { size_t operator()(signed char val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned char>
+ { size_t operator()(unsigned char val) const { return static_cast<size_t>(val); } };
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ template <> struct hash<wchar_t>
+ { size_t operator()(wchar_t val) const { return static_cast<size_t>(val); } };
+ #endif
+
+ template <> struct hash<signed short>
+ { size_t operator()(short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned short>
+ { size_t operator()(unsigned short val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed int>
+ { size_t operator()(signed int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned int>
+ { size_t operator()(unsigned int val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed long>
+ { size_t operator()(signed long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned long>
+ { size_t operator()(unsigned long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<signed long long>
+ { size_t operator()(signed long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<unsigned long long>
+ { size_t operator()(unsigned long long val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<float>
+ { size_t operator()(float val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<double>
+ { size_t operator()(double val) const { return static_cast<size_t>(val); } };
+
+ template <> struct hash<long double>
+ { size_t operator()(long double val) const { return static_cast<size_t>(val); } };
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // string hashes
+ //
+ // Note that our string hashes here intentionally are slow for long strings.
+ // The reasoning for this is so:
+ // - The large majority of hashed strings are only a few bytes long.
+ // - The hash function is significantly more efficient if it can make this assumption.
+ // - The user is welcome to make a custom hash for those uncommon cases where
+ // long strings need to be hashed. Indeed, the user can probably make a
+ // special hash customized for such strings that's better than what we provide.
+ ///////////////////////////////////////////////////////////////////////////
+
+ template <> struct hash<char8_t*>
+ {
+ size_t operator()(const char8_t* p) const
+ {
+ size_t c, result = 2166136261U; // FNV1 hash. Perhaps the best string hash.
+ while((c = (::uint8_t)*p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char8_t*>
+ {
+ size_t operator()(const char8_t* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (::uint8_t)*p++) != 0) // cast to unsigned 8 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<char16_t*>
+ {
+ size_t operator()(const char16_t* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (::uint16_t)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char16_t*>
+ {
+ size_t operator()(const char16_t* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (::uint16_t)*p++) != 0) // cast to unsigned 16 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<char32_t*>
+ {
+ size_t operator()(const char32_t* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (::uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ template <> struct hash<const char32_t*>
+ {
+ size_t operator()(const char32_t* p) const
+ {
+ size_t c, result = 2166136261U;
+ while((c = (::uint32_t)*p++) != 0) // cast to unsigned 32 bit.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ /// string_hash
+ ///
+ /// Defines a generic string hash for an arbitrary EASTL basic_string container.
+ ///
+ /// Example usage:
+ /// eastl::hash_set<MyString, eastl::string_hash<MyString> > hashSet;
+ ///
+ template <typename String>
+ struct string_hash
+ {
+ typedef String string_type;
+ typedef typename String::value_type value_type;
+ typedef typename eastl::add_unsigned<value_type>::type unsigned_value_type;
+
+ size_t operator()(const string_type& s) const
+ {
+ const unsigned_value_type* p = (const unsigned_value_type*)s.c_str();
+ size_t c, result = 2166136261U;
+ while((c = *p++) != 0)
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/hash_map.h b/UnknownVersion/include/EASTL/hash_map.h
new file mode 100644
index 0000000..3d7661d
--- /dev/null
+++ b/UnknownVersion/include/EASTL/hash_map.h
@@ -0,0 +1,337 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/hash_map.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is based on the TR1 (technical report 1) reference implementation
+// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely
+// many or all C++ library vendors' implementations of this classes will be
+// based off of the reference version and so will look pretty similar to this
+// file as well as other vendors' versions.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_HASH_MAP_H
+#define EASTL_HASH_MAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+
+
+namespace eastl
+{
+
+ /// EASTL_HASH_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MAP_DEFAULT_NAME
+ #define EASTL_HASH_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_map" // Unless the user overrides something, this is "EASTL hash_map".
+ #endif
+
+
+ /// EASTL_HASH_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MULTIMAP_DEFAULT_NAME
+ #define EASTL_HASH_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multimap" // Unless the user overrides something, this is "EASTL hash_multimap".
+ #endif
+
+
+ /// EASTL_HASH_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MAP_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// hash_map
+ ///
+ /// Implements a hash_map, which is a hashed associative container.
+ /// Lookups are O(1) (that is, they are fast) but the container is
+ /// not sorted.
+ ///
+ /// set_max_load_factor
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// bCacheHashCode
+ /// We provide the boolean bCacheHashCode template parameter in order
+ /// to allow the storing of the hash code of the key within the map.
+ /// When this option is disabled, the rehashing of the table will
+ /// call the hash function on the key. Setting bCacheHashCode to true
+ /// is useful for cases whereby the calculation of the hash value for
+ /// a contained object is very expensive.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type.
+ ///
+ /// Example find_as usage:
+ /// hash_map<string, int> hashMap;
+ /// i = hashMap.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example find_as usage (namespaces omitted for brevity):
+ /// hash_map<string, int> hashMap;
+ /// i = hashMap.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename Key, typename T, typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_map
+ : public hashtable<Key, eastl::pair<const Key, T>, Allocator, eastl::use_first<eastl::pair<const Key, T> >, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, true>
+ {
+ public:
+ typedef hashtable<Key, eastl::pair<const Key, T>, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >,
+ Predicate, Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, true, true> base_type;
+ typedef hash_map<Key, T, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type; // Note that this is pair<const key_type, mapped_type>.
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::iterator iterator;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ #endif
+ using base_type::end;
+ using base_type::size;
+
+ public:
+ /// hash_map
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_map(const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(),
+ Predicate(), eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_map
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_map(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_map
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename ForwardIterator>
+ hash_map(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MAP_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// insert
+ ///
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const key_type& key)
+ {
+ return base_type::DoInsertKey(key, true_type());
+ }
+
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(const_iterator it, const value_type& value) { return base_type::insert(it, value); }
+ #endif
+
+
+ mapped_type& operator[](const key_type& key)
+ {
+ const typename base_type::iterator it = base_type::find(key);
+ if(it != base_type::end())
+ return (*it).second;
+ return (*base_type::insert(value_type(key, mapped_type())).first).second;
+ }
+
+ }; // hash_map
+
+
+
+
+
+
+ /// hash_multimap
+ ///
+ /// Implements a hash_multimap, which is the same thing as a hash_map
+ /// except that contained elements need not be unique. See the
+ /// documentation for hash_set for details.
+ ///
+ template <typename Key, typename T, typename Hash = eastl::hash<Key>, typename Predicate = eastl::equal_to<Key>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_multimap
+ : public hashtable<Key, eastl::pair<const Key, T>, Allocator, eastl::use_first<eastl::pair<const Key, T> >, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash, prime_rehash_policy, bCacheHashCode, true, false>
+ {
+ public:
+ typedef hashtable<Key, eastl::pair<const Key, T>, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >,
+ Predicate, Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, true, false> base_type;
+ typedef hash_multimap<Key, T, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type; // Note that this is pair<const key_type, mapped_type>.
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::iterator iterator;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ #endif
+
+ public:
+ /// hash_multimap
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_multimap(const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(),
+ Predicate(), eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multimap
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_multimap(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multimap
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename ForwardIterator>
+ hash_multimap(ForwardIterator first, ForwardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTIMAP_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(),
+ predicate, eastl::use_first<eastl::pair<const Key, T> >(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// insert
+ ///
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const key_type& key)
+ {
+ return base_type::DoInsertKey(key, false_type());
+ }
+
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(const_iterator it, const value_type& value) { return base_type::insert(it, value); }
+ #endif
+
+
+ }; // hash_multimap
+
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/hash_set.h b/UnknownVersion/include/EASTL/hash_set.h
new file mode 100644
index 0000000..7df8400
--- /dev/null
+++ b/UnknownVersion/include/EASTL/hash_set.h
@@ -0,0 +1,273 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/hash_set.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is based on the TR1 (technical report 1) reference implementation
+// of the unordered_set/unordered_map C++ classes as of about 4/2005. Most likely
+// many or all C++ library vendors' implementations of this classes will be
+// based off of the reference version and so will look pretty similar to this
+// file as well as other vendors' versions.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_HASH_SET_H
+#define EASTL_HASH_SET_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/hashtable.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+
+
+namespace eastl
+{
+
+ /// EASTL_HASH_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_SET_DEFAULT_NAME
+ #define EASTL_HASH_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_set" // Unless the user overrides something, this is "EASTL hash_set".
+ #endif
+
+
+ /// EASTL_HASH_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASH_MULTISET_DEFAULT_NAME
+ #define EASTL_HASH_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hash_multiset" // Unless the user overrides something, this is "EASTL hash_multiset".
+ #endif
+
+
+ /// EASTL_HASH_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_SET_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_SET_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_HASH_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// hash_set
+ ///
+ /// Implements a hash_set, which is a hashed unique-item container.
+ /// Lookups are O(1) (that is, they are fast) but the container is
+ /// not sorted.
+ ///
+ /// set_max_load_factor
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// bCacheHashCode
+ /// We provide the boolean bCacheHashCode template parameter in order
+ /// to allow the storing of the hash code of the key within the map.
+ /// When this option is disabled, the rehashing of the table will
+ /// call the hash function on the key. Setting bCacheHashCode to true
+ /// is useful for cases whereby the calculation of the hash value for
+ /// a contained object is very expensive.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type.
+ ///
+ /// Example find_as usage:
+ /// hash_set<string> hashSet;
+ /// i = hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example find_as usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// i = hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename Value, typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_set
+ : public hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, true>
+ {
+ public:
+ typedef hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, true> base_type;
+ typedef hash_set<Value, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+
+ using base_type::end;
+ using base_type::size;
+
+ public:
+ /// hash_set
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_set(const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_set
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_set(size_type nBucketCount, const Hash& hashFunction = Hash(), const Predicate& predicate = Predicate(),
+ const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_set
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename FowardIterator>
+ hash_set(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_SET_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+ }; // hash_set
+
+
+
+
+
+
+ /// hash_multiset
+ ///
+ /// Implements a hash_multiset, which is the same thing as a hash_set
+ /// except that contained elements need not be unique. See the documentation
+ /// for hash_set for details.
+ ///
+ template <typename Value, typename Hash = eastl::hash<Value>, typename Predicate = eastl::equal_to<Value>,
+ typename Allocator = EASTLAllocatorType, bool bCacheHashCode = false>
+ class hash_multiset
+ : public hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, false>
+ {
+ public:
+ typedef hashtable<Value, Value, Allocator, eastl::use_self<Value>, Predicate,
+ Hash, mod_range_hashing, default_ranged_hash,
+ prime_rehash_policy, bCacheHashCode, false, false> base_type;
+ typedef hash_multiset<Value, Hash, Predicate, Allocator, bCacheHashCode> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+
+ public:
+ /// hash_multiset
+ ///
+ /// Default constructor.
+ ///
+ explicit hash_multiset(const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(0, Hash(), mod_range_hashing(), default_ranged_hash(), Predicate(), eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multiset
+ ///
+ /// Constructor which creates an empty container, but start with nBucketCount buckets.
+ /// We default to a small nBucketCount value, though the user really should manually
+ /// specify an appropriate value in order to prevent memory from being reallocated.
+ ///
+ explicit hash_multiset(size_type nBucketCount, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ /// hash_multiset
+ ///
+ /// An input bucket count of <= 1 causes the bucket count to be equal to the number of
+ /// elements in the input range.
+ ///
+ template <typename FowardIterator>
+ hash_multiset(FowardIterator first, FowardIterator last, size_type nBucketCount = 0, const Hash& hashFunction = Hash(),
+ const Predicate& predicate = Predicate(), const allocator_type& allocator = EASTL_HASH_MULTISET_DEFAULT_ALLOCATOR)
+ : base_type(first, last, nBucketCount, hashFunction, mod_range_hashing(), default_ranged_hash(), predicate, eastl::use_self<Value>(), allocator)
+ {
+ // Empty
+ }
+
+
+ }; // hash_multiset
+
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/heap.h b/UnknownVersion/include/EASTL/heap.h
new file mode 100644
index 0000000..c7566f3
--- /dev/null
+++ b/UnknownVersion/include/EASTL/heap.h
@@ -0,0 +1,592 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/heap.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements heap functionality much like the std C++ heap algorithms.
+// Such heaps are not the same thing as memory heaps or pools, but rather are
+// semi-sorted random access containers which have the primary purpose of
+// supporting the implementation of priority_queue and similar data structures.
+//
+// The primary distinctions between this heap functionality and std::heap are:
+// - This heap exposes some extra functionality such as is_heap and change_heap.
+// - This heap is more efficient than versions found in typical STL
+// implementations. This comes
+// about due to better use of array dereferencing and branch prediction.
+// You should expect of 5-30%, depending on the usage and platform.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// The publicly usable functions we define are:
+// push_heap -- Adds an entry to a heap. Same as C++ std::push_heap.
+// pop_heap -- Removes the top entry from a heap. Same as C++ std::pop_heap.
+// make_heap -- Converts an array to a heap. Same as C++ std::make_heap.
+// sort_heap -- Sorts a heap in place. Same as C++ std::sort_heap.
+// remove_heap -- Removes an arbitrary entry from a heap.
+// change_heap -- Changes the priority of an entry in the heap.
+// is_heap -- Returns true if an array appears is in heap format.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_HEAP_H
+#define EASTL_HEAP_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <stddef.h>
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // promote_heap (internal function)
+ ///////////////////////////////////////////////////////////////////////
+
+ /// promote_heap
+ ///
+ /// Moves a value in the heap from a given position upward until
+ /// it is sorted correctly. It's kind of like bubble-sort, except that
+ /// instead of moving linearly from the back of a list to the front,
+ /// it moves from the bottom of the tree up the branches towards the
+ /// top. But otherwise is just like bubble-sort.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value)
+ {
+ for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+ (position > topPosition) && (*(first + parentPosition) < value);
+ parentPosition = (position - 1) >> 1)
+ {
+ *(first + position) = *(first + parentPosition); // Swap the node with its parent.
+ position = parentPosition;
+ }
+
+ *(first + position) = value;
+ }
+
+ /// promote_heap
+ ///
+ /// Takes a Compare(a, b) function (or function object) which returns true if a < b.
+ /// For example, you could use the standard 'less' comparison object.
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ inline void promote_heap(RandomAccessIterator first, Distance topPosition, Distance position, const T& value, Compare compare)
+ {
+ for(Distance parentPosition = (position - 1) >> 1; // This formula assumes that (position > 0). // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+ (position > topPosition) && compare(*(first + parentPosition), value);
+ parentPosition = (position - 1) >> 1)
+ {
+ *(first + position) = *(first + parentPosition); // Swap the node with its parent.
+ position = parentPosition;
+ }
+
+ *(first + position) = value;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // adjust_heap (internal function)
+ ///////////////////////////////////////////////////////////////////////
+
+ /// adjust_heap
+ ///
+ /// Given a position that has just been vacated, this function moves
+ /// new values into that vacated position appropriately. The value
+ /// argument is an entry which will be inserted into the heap after
+ /// we move nodes into the positions that were vacated.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value)
+ {
+ // We do the conventional approach of moving the position down to the
+ // bottom then inserting the value at the back and moving it up.
+ Distance childPosition = (2 * position) + 2;
+
+ for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2)
+ {
+ if(*(first + childPosition) < *(first + (childPosition - 1))) // Choose the larger of the two children.
+ --childPosition;
+ *(first + position) = *(first + childPosition); // Swap positions with this child.
+ position = childPosition;
+ }
+
+ if(childPosition == heapSize) // If we are at the very last index of the bottom...
+ {
+ *(first + position) = *(first + (childPosition - 1));
+ position = childPosition - 1;
+ }
+
+ eastl::promote_heap<RandomAccessIterator, Distance, T>(first, topPosition, position, value);
+ }
+
+
+ /// adjust_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// This function requires that the value argument refer to a value
+ /// that is currently not within the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename T, typename Compare>
+ void adjust_heap(RandomAccessIterator first, Distance topPosition, Distance heapSize, Distance position, const T& value, Compare compare)
+ {
+ // We do the conventional approach of moving the position down to the
+ // bottom then inserting the value at the back and moving it up.
+ Distance childPosition = (2 * position) + 2;
+
+ for(; childPosition < heapSize; childPosition = (2 * childPosition) + 2)
+ {
+ if(compare(*(first + childPosition), *(first + (childPosition - 1)))) // Choose the larger of the two children.
+ --childPosition;
+ *(first + position) = *(first + childPosition); // Swap positions with this child.
+ position = childPosition;
+ }
+
+ if(childPosition == heapSize) // If we are at the bottom...
+ {
+ *(first + position) = *(first + (childPosition - 1));
+ position = childPosition - 1;
+ }
+
+ eastl::promote_heap<RandomAccessIterator, Distance, T, Compare>(first, topPosition, position, value, compare);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // push_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// push_heap
+ ///
+ /// Adds an item to a heap (which is an array). The item necessarily
+ /// comes from the back of the heap (array). Thus, the insertion of a
+ /// new item in a heap is a two step process: push_back and push_heap.
+ ///
+ /// Example usage:
+ /// vector<int> heap;
+ ///
+ /// heap.push_back(3);
+ /// push_heap(heap.begin(), heap.end()); // Places '3' appropriately.
+ ///
+ template <typename RandomAccessIterator>
+ inline void push_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(last - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(last - first - 1), tempBottom);
+ }
+
+
+ /// push_heap
+ ///
+ /// This version is useful for cases where your object comparison is unusual
+ /// or where you want to have the heap store pointers to objects instead of
+ /// storing the objects themselves (often in order to improve cache coherency
+ /// while doing sorting).
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void push_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(last - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(last - first - 1), tempBottom, compare);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pop_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// pop_heap
+ ///
+ /// Removes the first item from the heap (which is an array), and adjusts
+ /// the heap so that the highest priority item becomes the new first item.
+ ///
+ /// Example usage:
+ /// vector<int> heap;
+ ///
+ /// heap.push_back(2);
+ /// heap.push_back(3);
+ /// heap.push_back(1);
+ /// <use heap[0], which is the highest priority item in the heap>
+ /// pop_heap(heap.begin(), heap.end()); // Moves heap[0] to the back of the heap and adjusts the heap.
+ /// heap.pop_back(); // Remove value that was just at the top of the heap
+ ///
+ template <typename RandomAccessIterator>
+ inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(last - 1));
+ *(last - 1) = *first;
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(last - first - 1), 0, tempBottom);
+ }
+
+
+
+ /// pop_heap
+ ///
+ /// This version is useful for cases where your object comparison is unusual
+ /// or where you want to have the heap store pointers to objects instead of
+ /// storing the objects themselves (often in order to improve cache coherency
+ /// while doing sorting).
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void pop_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(last - 1));
+ *(last - 1) = *first;
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(last - first - 1), 0, tempBottom, compare);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // make_heap
+ ///////////////////////////////////////////////////////////////////////
+
+
+ /// make_heap
+ ///
+ /// Given an array, this function converts it into heap format.
+ /// The complexity is O(n), where n is count of the range.
+ /// The input range is not required to be in any order.
+ ///
+ template <typename RandomAccessIterator>
+ void make_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ // We do bottom-up heap construction as per Sedgewick. Such construction is O(n).
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type heapSize = last - first;
+
+ if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below).
+ {
+ difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+
+ do{
+ --parentPosition;
+ const value_type temp(*(first + parentPosition));
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, parentPosition, heapSize, parentPosition, temp);
+ } while(parentPosition != 0);
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ void make_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type heapSize = last - first;
+
+ if(heapSize >= 2) // If there is anything to do... (we need this check because otherwise the math fails below).
+ {
+ difference_type parentPosition = ((heapSize - 2) >> 1) + 1; // We use '>> 1' instead of '/ 2' because we have seen VC++ generate better code with >>.
+
+ do{
+ --parentPosition;
+ const value_type temp(*(first + parentPosition));
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, parentPosition, heapSize, parentPosition, temp, compare);
+ } while(parentPosition != 0);
+ }
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // sort_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// sort_heap
+ ///
+ /// After the application if this algorithm, the range it was applied to
+ /// is no longer a heap, though it will be a reverse heap (smallest first).
+ /// The item with the lowest priority will be first, and the highest last.
+ /// This is not a stable sort because the relative order of equivalent
+ /// elements is not necessarily preserved.
+ /// The range referenced must be valid; all pointers must be dereferenceable
+ /// and within the sequence the last position is reachable from the first
+ /// by incrementation.
+ /// The complexity is at most O(n * log(n)), where n is count of the range.
+ ///
+ template <typename RandomAccessIterator>
+ inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ for(; (last - first) > 1; --last) // We simply use the heap to sort itself.
+ eastl::pop_heap<RandomAccessIterator>(first, last);
+ }
+
+
+ /// sort_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline void sort_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ for(; (last - first) > 1; --last) // We simply use the heap to sort itself.
+ eastl::pop_heap<RandomAccessIterator, Compare>(first, last, compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// remove_heap
+ ///
+ /// Removes an arbitrary entry from the heap and adjusts the heap appropriately.
+ /// This function is unlike pop_heap in that pop_heap moves the top item
+ /// to the back of the heap, whereas remove_heap moves an arbitrary item to
+ /// the back of the heap.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ template <typename RandomAccessIterator, typename Distance>
+ inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(first + heapSize - 1));
+ *(first + heapSize - 1) = *(first + position);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom);
+ }
+
+
+ /// remove_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ /// Note: Since this function moves the element to the back of the heap and
+ /// doesn't actually remove it from the given container, the user must call
+ /// the container erase function if the user wants to erase the element
+ /// from the container.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename Compare>
+ inline void remove_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const value_type tempBottom(*(first + heapSize - 1));
+ *(first + heapSize - 1) = *(first + position);
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), (difference_type)position, tempBottom, compare);
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // change_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// change_heap
+ ///
+ /// Given a value in the heap that has changed in priority, this function
+ /// adjusts the heap appropriately. The heap size remains unchanged after
+ /// this operation.
+ ///
+ template <typename RandomAccessIterator, typename Distance>
+ inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::remove_heap<RandomAccessIterator, Distance>(first, heapSize, position);
+
+ value_type tempBottom(*(first + heapSize - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom);
+ }
+
+
+ /// change_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Distance, typename Compare>
+ inline void change_heap(RandomAccessIterator first, Distance heapSize, Distance position, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::remove_heap<RandomAccessIterator, Distance, Compare>(first, heapSize, position, compare);
+
+ value_type tempBottom(*(first + heapSize - 1));
+
+ eastl::promote_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, (difference_type)0, (difference_type)(heapSize - 1), tempBottom, compare);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_heap
+ ///////////////////////////////////////////////////////////////////////
+
+ /// is_heap
+ ///
+ /// This is a useful debugging algorithm for verifying that a random
+ /// access container is in heap format.
+ ///
+ template <typename RandomAccessIterator>
+ inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ int counter = 0;
+
+ for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1)
+ {
+ if(*first < *child)
+ return false;
+ first += counter; // counter switches between 0 and 1 every time through.
+ }
+
+ return true;
+ }
+
+
+ /// is_heap
+ ///
+ /// The Compare function must work equivalently to the compare function used
+ /// to make and maintain the heap.
+ ///
+ template <typename RandomAccessIterator, typename Compare>
+ inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ int counter = 0;
+
+ for(RandomAccessIterator child = first + 1; child < last; ++child, counter ^= 1)
+ {
+ if(compare(*first, *child))
+ return false;
+ first += counter; // counter switches between 0 and 1 every time through.
+ }
+
+ return true;
+ }
+
+
+
+ // Faster implementation for most cases:
+ //
+ // template <typename RandomAccessIterator>
+ // inline bool is_heap(RandomAccessIterator first, RandomAccessIterator last)
+ // {
+ // if(((uintptr_t)(last - first) & 1) == 0)
+ // --last;
+ //
+ // RandomAccessIterator parent = first, child = first + 1;
+ //
+ // for(; child < last; child += 2, ++parent)
+ // {
+ // if((*parent < *child) || (*parent < *(child + 1)))
+ // return false;
+ // }
+ //
+ // if((((uintptr_t)(last - first) & 1) == 0) && (*parent < *child))
+ // return false;
+ //
+ // return true;
+ // }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/config.h b/UnknownVersion/include/EASTL/internal/config.h
new file mode 100644
index 0000000..9313418
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/config.h
@@ -0,0 +1,1205 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/config.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_CONFIG_H
+#define EASTL_INTERNAL_CONFIG_H
+
+
+///////////////////////////////////////////////////////////////////////////////
+// ReadMe
+//
+// This is the EASTL configuration file. All configurable parameters of EASTL
+// are controlled through this file. However, all the settings here can be
+// manually overridden by the user. There are three ways for a user to override
+// the settings in this file:
+//
+// - Simply edit this file.
+// - Define EASTL_USER_CONFIG_HEADER.
+// - Predefine individual defines (e.g. EASTL_ASSERT).
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_USER_CONFIG_HEADER
+//
+// This allows the user to define a header file to be #included before the
+// EASTL config.h contents are compiled. A primary use of this is to override
+// the contents of this config.h file. Note that all the settings below in
+// this file are user-overridable.
+//
+// Example usage:
+// #define EASTL_USER_CONFIG_HEADER "MyConfigOverrides.h"
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef EASTL_USER_CONFIG_HEADER
+ #include EASTL_USER_CONFIG_HEADER
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EABASE_DISABLED
+//
+// The user can disable EABase usage and manually supply the configuration
+// via defining EASTL_EABASE_DISABLED and defining the appropriate entities
+// globally or via the above EASTL_USER_CONFIG_HEADER.
+//
+// Example usage:
+// #define EASTL_EABASE_DISABLED
+// #include <EASTL/vector.h>
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EABASE_DISABLED
+ #include <EABase/eabase.h>
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// VC++ bug fix.
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef _MSC_VER
+ // VC8 has a bug whereby it generates a warning when malloc.h is #included
+ // by its headers instead of by yours. There is no practical solution but
+ // to pre-empt the #include of malloc.h with our own inclusion of it.
+ // The only other alternative is to disable the warning globally, which is
+ // something we try to avoid as much as possible.
+ #pragma warning(push, 0)
+ #include <malloc.h>
+ #pragma warning(pop)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VERSION
+//
+// We more or less follow the conventional EA packaging approach to versioning
+// here. A primary distinction here is that minor versions are defined as two
+// digit entities (e.g. .03") instead of minimal digit entities ".3"). The logic
+// here is that the value is a counter and not a floating point fraction.
+// Note that the major version doesn't have leading zeros.
+//
+// Example version strings:
+// "0.91.00" // Major version 0, minor version 91, patch version 0.
+// "1.00.00" // Major version 1, minor and patch version 0.
+// "3.10.02" // Major version 3, minor version 10, patch version 02.
+// "12.03.01" // Major version 12, minor version 03, patch version
+//
+// Example usage:
+// printf("EASTL version: %s", EASTL_VERSION);
+// printf("EASTL version: %d.%d.%d", EASTL_VERSION_N / 10000 % 100, EASTL_VERSION_N / 100 % 100, EASTL_VERSION_N % 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VERSION
+ #define EASTL_VERSION "1.11.03"
+ #define EASTL_VERSION_N 11103
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_PLATFORM_MICROSOFT
+//
+// Defined as 1 or undefined.
+// Implements support for the definition of EA_PLATFORM_MICROSOFT for the case
+// of using EABase versions prior to the addition of its EA_PLATFORM_MICROSOFT support.
+//
+#if (EABASE_VERSION_N < 20022) && !defined(EA_PLATFORM_MICROSOFT)
+ #if defined(EA_PLATFORM_WINDOWS) || defined(EA_PLATFORM_XENON)
+ #define EA_PLATFORM_MICROSOFT 1
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_STANDARD_CPP_LIBRARY
+//
+// Defined as 1 or undefined.
+// Implements support for the definition of EA_COMPILER_NO_STANDARD_CPP_LIBRARY for the case
+// of using EABase versions prior to the addition of its EA_COMPILER_NO_STANDARD_CPP_LIBRARY support.
+//
+#if (EABASE_VERSION_N < 20022) && !defined(EA_COMPILER_NO_STANDARD_CPP_LIBRARY)
+ #if defined(EA_PLATFORM_ANDROID)
+ #define EA_COMPILER_NO_STANDARD_CPP_LIBRARY 1
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EA_COMPILER_NO_RTTI
+//
+// Defined as 1 or undefined.
+// Implements support for the definition of EA_COMPILER_NO_RTTI for the case
+// of using EABase versions prior to the addition of its EA_COMPILER_NO_RTTI support.
+//
+#if (EABASE_VERSION_N < 20022) && !defined(EA_COMPILER_NO_RTTI)
+ #if defined(__SNC__) && !defined(__RTTI)
+ #define EA_COMPILER_NO_RTTI
+ #elif defined(__GXX_ABI_VERSION) && !defined(__GXX_RTTI)
+ #define EA_COMPILER_NO_RTTI
+ #elif defined(_MSC_VER) && !defined(_CPPRTTI)
+ #define EA_COMPILER_NO_RTTI
+ #elif defined(__MWERKS__)
+ #if !__option(RTTI)
+ #define EA_COMPILER_NO_RTTI
+ #endif
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL namespace
+//
+// We define this so that users that #include this config file can reference
+// these namespaces without seeing any other files that happen to use them.
+///////////////////////////////////////////////////////////////////////////////
+
+/// EA Standard Template Library
+namespace eastl
+{
+ // Intentionally empty.
+}
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG
+//
+// Defined as an integer >= 0. Default is 1 for debug builds and 0 for
+// release builds. This define is also a master switch for the default value
+// of some other settings.
+//
+// Example usage:
+// #if EASTL_DEBUG
+// ...
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG
+ #if defined(EA_DEBUG) || defined(_DEBUG)
+ #define EASTL_DEBUG 1
+ #else
+ #define EASTL_DEBUG 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUGPARAMS_LEVEL
+//
+// EASTL_DEBUGPARAMS_LEVEL controls what debug information is passed through to
+// the allocator by default.
+// This value may be defined by the user ... if not it will default to 1 for
+// EA_DEBUG builds, otherwise 0.
+//
+// 0 - no debug information is passed through to allocator calls.
+// 1 - 'name' is passed through to allocator calls.
+// 2 - 'name', __FILE__, and __LINE__ are passed through to allocator calls.
+//
+// This parameter mirrors the equivalent parameter in the CoreAllocator package.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUGPARAMS_LEVEL
+ #if EASTL_DEBUG
+ #define EASTL_DEBUGPARAMS_LEVEL 2
+ #else
+ #define EASTL_DEBUGPARAMS_LEVEL 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DLL
+//
+// Defined as 0 or 1. The default is dependent on the definition of EA_DLL.
+// If EA_DLL is defined, then EASTL_DLL is 1, else EASTL_DLL is 0.
+// EA_DLL is a define that controls DLL builds within the EAConfig build system.
+// EASTL_DLL controls whether EASTL is built and used as a DLL.
+// Normally you wouldn't do such a thing, but there are use cases for such
+// a thing, particularly in the case of embedding C++ into C# applications.
+//
+#ifndef EASTL_DLL
+ #if defined(EA_DLL)
+ #define EASTL_DLL 1
+ #else
+ #define EASTL_DLL 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_API
+//
+// This is used to label functions as DLL exports under Microsoft platforms.
+// If EA_DLL is defined, then the user is building EASTL as a DLL and EASTL's
+// non-templated functions will be exported. EASTL template functions are not
+// labelled as EASTL_API (and are thus not exported in a DLL build). This is
+// because it's not possible (or at least unsafe) to implement inline templated
+// functions in a DLL.
+//
+// Example usage of EASTL_API:
+// EASTL_API int someVariable = 10; // Export someVariable in a DLL build.
+//
+// struct EASTL_API SomeClass{ // Export SomeClass and its member functions in a DLL build.
+// };
+//
+// EASTL_API void SomeFunction(); // Export SomeFunction in a DLL build.
+//
+//
+#if defined(EA_DLL) && !defined(EASTL_DLL)
+ #define EASTL_DLL 1
+#endif
+
+#ifndef EASTL_API // If the build file hasn't already defined this to be dllexport...
+ #if EASTL_DLL && defined(_MSC_VER)
+ #define EASTL_API __declspec(dllimport)
+ #define EASTL_TEMPLATE_API // Not sure if there is anything we can do here.
+ #else
+ #define EASTL_API
+ #define EASTL_TEMPLATE_API
+ #endif
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NAME_ENABLED / EASTL_NAME / EASTL_NAME_VAL
+//
+// Used to wrap debug string names. In a release build, the definition
+// goes away. These are present to avoid release build compiler warnings
+// and to make code simpler.
+//
+// Example usage of EASTL_NAME:
+// // pName will defined away in a release build and thus prevent compiler warnings.
+// void allocator::set_name(const char* EASTL_NAME(pName))
+// {
+// #if EASTL_NAME_ENABLED
+// mpName = pName;
+// #endif
+// }
+//
+// Example usage of EASTL_NAME_VAL:
+// // "xxx" is defined to NULL in a release build.
+// vector<T, Allocator>::vector(const allocator_type& allocator = allocator_type(EASTL_NAME_VAL("xxx")));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NAME_ENABLED
+ #define EASTL_NAME_ENABLED EASTL_DEBUG
+#endif
+
+#ifndef EASTL_NAME
+ #if EASTL_NAME_ENABLED
+ #define EASTL_NAME(x) x
+ #define EASTL_NAME_VAL(x) x
+ #else
+ #define EASTL_NAME(x)
+ #define EASTL_NAME_VAL(x) ((const char*)NULL)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEFAULT_NAME_PREFIX
+//
+// Defined as a string literal. Defaults to "EASTL".
+// This define is used as the default name for EASTL where such a thing is
+// referenced in EASTL. For example, if the user doesn't specify an allocator
+// name for their deque, it is named "EASTL deque". However, you can override
+// this to say "SuperBaseball deque" by changing EASTL_DEFAULT_NAME_PREFIX.
+//
+// Example usage (which is simply taken from how deque.h uses this define):
+// #ifndef EASTL_DEQUE_DEFAULT_NAME
+// #define EASTL_DEQUE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " deque"
+// #endif
+//
+#ifndef EASTL_DEFAULT_NAME_PREFIX
+ #define EASTL_DEFAULT_NAME_PREFIX "EASTL"
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_DEBUG.
+// If EASTL_ASSERT_ENABLED is non-zero, then asserts will be executed via
+// the assertion mechanism.
+//
+// Example usage:
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(v.size() > 17);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+//
+// Defined as 0 or non-zero. Default is same as EASTL_ASSERT_ENABLED.
+// This is like EASTL_ASSERT_ENABLED, except it is for empty container
+// references. Sometime people like to be able to take a reference to
+// the front of the container, but not use it if the container is empty.
+// In practice it's often easier and more efficient to do this than to write
+// extra code to check if the container is empty.
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// inline typename vector<T, Allocator>::reference
+// vector<T, Allocator>::front()
+// {
+// #if EASTL_ASSERT_ENABLED
+// EASTL_ASSERT(mpEnd > mpBegin);
+// #endif
+//
+// return *mpBegin;
+// }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ #define EASTL_EMPTY_REFERENCE_ASSERT_ENABLED EASTL_ASSERT_ENABLED
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SetAssertionFailureFunction
+//
+// Allows the user to set a custom assertion failure mechanism.
+//
+// Example usage:
+// void Assert(const char* pExpression, void* pContext);
+// SetAssertionFailureFunction(Assert, this);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERTION_FAILURE_DEFINED
+ #define EASTL_ASSERTION_FAILURE_DEFINED
+
+ namespace eastl
+ {
+ typedef void (*EASTL_AssertionFailureFunction)(const char* pExpression, void* pContext);
+ EASTL_API void SetAssertionFailureFunction(EASTL_AssertionFailureFunction pFunction, void* pContext);
+
+ // These are the internal default functions that implement asserts.
+ EASTL_API void AssertionFailure(const char* pExpression);
+ EASTL_API void AssertionFailureFunctionDefault(const char* pExpression, void* pContext);
+ }
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ASSERT
+//
+// Assertion macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_ASSERT(intVector.size() < 100);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ASSERT
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_ASSERT(expression) (void)((expression) || (eastl::AssertionFailure(#expression), 0))
+ #else
+ #define EASTL_ASSERT(expression)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FAIL_MSG
+//
+// Failure macro. Can be overridden by user with a different value.
+//
+// Example usage:
+// EASTL_FAIL("detected error condition!");
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FAIL_MSG
+ #if EASTL_ASSERT_ENABLED
+ #define EASTL_FAIL_MSG(message) (eastl::AssertionFailure(message))
+ #else
+ #define EASTL_FAIL_MSG(message)
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_CT_ASSERT / EASTL_CT_ASSERT_NAMED
+//
+// EASTL_CT_ASSERT is a macro for compile time assertion checks, useful for
+// validating *constant* expressions. The advantage over using EASTL_ASSERT
+// is that errors are caught at compile time instead of runtime.
+//
+// Example usage:
+// EASTL_CT_ASSERT(sizeof(uint32_t == 4));
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if defined(EASTL_DEBUG) && !defined(EASTL_CT_ASSERT)
+ template <bool> struct EASTL_CT_ASSERTION_FAILURE;
+ template <> struct EASTL_CT_ASSERTION_FAILURE<true>{ enum { value = 1 }; }; // We create a specialization for true, but not for false.
+ template <int x> struct EASTL_CT_ASSERTION_TEST{};
+
+ #define EASTL_PREPROCESSOR_JOIN(a, b) EASTL_PREPROCESSOR_JOIN1(a, b)
+ #define EASTL_PREPROCESSOR_JOIN1(a, b) EASTL_PREPROCESSOR_JOIN2(a, b)
+ #define EASTL_PREPROCESSOR_JOIN2(a, b) a##b
+
+ #if defined(_MSC_VER)
+ #define EASTL_CT_ASSERT(expression) typedef EASTL_CT_ASSERTION_TEST< sizeof(EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >)> EASTL_CT_ASSERT_FAILURE
+ #elif defined(__ICL) || defined(__ICC)
+ #define EASTL_CT_ASSERT(expression) typedef char EASTL_PREPROCESSOR_JOIN(EASTL_CT_ASSERT_FAILURE_, __LINE__) [EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >::value]
+ #elif defined(__MWERKS__)
+ #define EASTL_CT_ASSERT(expression) enum { EASTL_PREPROCESSOR_JOIN(EASTL_CT_ASSERT_FAILURE_, __LINE__) = sizeof(EASTL_CT_ASSERTION_FAILURE< (bool)(expression) >) }
+ #else // GCC, Clang etc.
+ #define EASTL_CT_ASSERT(expression) static_assert(expression, "EASTL compile time assert failed")
+ #endif
+#else
+ #define EASTL_CT_ASSERT(expression)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_DEBUG_BREAK
+//
+// This function causes an app to immediately stop under the debugger.
+// It is implemented as a macro in order to allow stopping at the site
+// of the call.
+//
+//
+// Example usage:
+// EASTL_DEBUG_BREAK();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_DEBUG_BREAK
+ #if defined(_MSC_VER) && (_MSC_VER >= 1300)
+ #define EASTL_DEBUG_BREAK() __debugbreak() // This is a compiler intrinsic which will map to appropriate inlined asm for the platform.
+ #elif defined(EA_PROCESSOR_MIPS) //
+ #define EASTL_DEBUG_BREAK() asm("break")
+ #elif defined(__SNC__)
+ #define EASTL_DEBUG_BREAK() *(int*)(0) = 0
+ #elif defined(EA_PLATFORM_PS3)
+ #define EASTL_DEBUG_BREAK() asm volatile("tw 31,1,1")
+ #elif defined(EA_PROCESSOR_POWERPC) // Generic PowerPC.
+ #define EASTL_DEBUG_BREAK() asm(".long 0") // This triggers an exception by executing opcode 0x00000000.
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && defined(EA_ASM_STYLE_INTEL)
+ #define EASTL_DEBUG_BREAK() { __asm int 3 }
+ #elif (defined(EA_PROCESSOR_X86) || defined(EA_PROCESSOR_X86_64)) && (defined(EA_ASM_STYLE_ATT) || defined(__GNUC__))
+ #define EASTL_DEBUG_BREAK() asm("int3")
+ #else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+ #endif
+#else
+ void EASTL_DEBUG_BREAK(); // User must define this externally.
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 0 (disabled) until some future date.
+// If enabled (1) then container operator= copies the allocator from the
+// source container. It ideally should be set to enabled but for backwards
+// compatibility with older versions of EASTL it is currently set to 0.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_COPY_ENABLED
+ #define EASTL_ALLOCATOR_COPY_ENABLED 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FIXED_SIZE_TRACKING_ENABLED
+//
+// Defined as an integer >= 0. Default is same as EASTL_DEBUG.
+// If EASTL_FIXED_SIZE_TRACKING_ENABLED is enabled, then fixed
+// containers in debug builds track the max count of objects
+// that have been in the container. This allows for the tuning
+// of fixed container sizes to their minimum required size.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FIXED_SIZE_TRACKING_ENABLED
+ #define EASTL_FIXED_SIZE_TRACKING_ENABLED EASTL_DEBUG
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_RTTI_ENABLED
+//
+// Defined as 0 or 1. Default is 1 if RTTI is supported by the compiler.
+// This define exists so that we can use some dynamic_cast operations in the
+// code without warning. dynamic_cast is only used if the specifically refers
+// to it; EASTL won't do dynamic_cast behind your back.
+//
+// Example usage:
+// #if EASTL_RTTI_ENABLED
+// pChildClass = dynamic_cast<ChildClass*>(pParentClass);
+// #endif
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_RTTI_ENABLED
+ #if defined(EA_COMPILER_NO_RTTI)
+ #define EASTL_RTTI_ENABLED 0
+ #else
+ #define EASTL_RTTI_ENABLED 1
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_EXCEPTIONS_ENABLED
+//
+// Defined as 0 or 1. Default is to follow what the compiler settings are.
+// The user can predefine EASTL_EXCEPTIONS_ENABLED to 0 or 1; however, if the
+// compiler is set to disable exceptions then EASTL_EXCEPTIONS_ENABLED is
+// forced to a value of 0 regardless of the user predefine.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#if !defined(EASTL_EXCEPTIONS_ENABLED) || ((EASTL_EXCEPTIONS_ENABLED == 1) && defined(EA_COMPILER_NO_EXCEPTIONS))
+ #define EASTL_EXCEPTIONS_ENABLED 0
+#endif
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_OPT_XXXX
+//
+// Enables some options / optimizations options that cause the string class
+// to behave slightly different from the C++ standard basic_string. These are
+// options whereby you can improve performance by avoiding operations that
+// in practice may never occur for you.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STRING_OPT_CHAR_INIT
+ // Defined as 0 or 1. Default is 1.
+ // Defines if newly created characters are initialized to 0 or left
+ // as random values.
+ // The C++ string standard is to initialize chars to 0.
+ #define EASTL_STRING_OPT_CHAR_INIT 1
+#endif
+
+#ifndef EASTL_STRING_OPT_EXPLICIT_CTORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we should implement explicity in constructors where the C++
+ // standard string does not. The advantage of enabling explicit constructors
+ // is that you can do this: string s = "hello"; in addition to string s("hello");
+ // The disadvantage of enabling explicity constructors is that there can be
+ // silent conversions done which impede performance if the user isn't paying
+ // attention.
+ // C++ standard string ctors are not explicit.
+ #define EASTL_STRING_OPT_EXPLICIT_CTORS 0
+#endif
+
+#ifndef EASTL_STRING_OPT_LENGTH_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for string values going beyond kMaxSize
+ // (a very large value) and throw exections if so.
+ // C++ standard strings are expected to do such checks.
+ #define EASTL_STRING_OPT_LENGTH_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_RANGE_ERRORS
+ // Defined as 0 or 1. Default is equal to EASTL_EXCEPTIONS_ENABLED.
+ // Defines if we check for out-of-bounds references to string
+ // positions and throw exceptions if so. Well-behaved code shouldn't
+ // refence out-of-bounds positions and so shouldn't need these checks.
+ // C++ standard strings are expected to do such range checks.
+ #define EASTL_STRING_OPT_RANGE_ERRORS EASTL_EXCEPTIONS_ENABLED
+#endif
+
+#ifndef EASTL_STRING_OPT_ARGUMENT_ERRORS
+ // Defined as 0 or 1. Default is 0.
+ // Defines if we check for NULL ptr arguments passed to string
+ // functions by the user and throw exceptions if so. Well-behaved code
+ // shouldn't pass bad arguments and so shouldn't need these checks.
+ // Also, some users believe that strings should check for NULL pointers
+ // in all their arguments and do no-ops if so. This is very debatable.
+ // C++ standard strings are not required to check for such argument errors.
+ #define EASTL_STRING_OPT_ARGUMENT_ERRORS 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ABSTRACT_STRING_ENABLED
+//
+// Defined as 0 or 1. Default is 0 until abstract string is fully tested.
+// Defines whether the proposed replacement for the string module is enabled.
+// See bonus/abstract_string.h for more information.
+//
+#ifndef EASTL_ABSTRACT_STRING_ENABLED
+ #define EASTL_ABSTRACT_STRING_ENABLED 0
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_BITSET_SIZE_T
+//
+// Defined as 0 or 1. Default is 1.
+// Controls whether bitset uses size_t or eastl_size_t.
+//
+#ifndef EASTL_BITSET_SIZE_T
+ #define EASTL_BITSET_SIZE_T 1
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_SIZE_CACHE
+//
+// Defined as 0 or 1. Default is 0.
+// If defined as 1, the list and slist containers (and possibly any additional
+// containers as well) keep a member mSize (or similar) variable which allows
+// the size() member function to execute in constant time (a.k.a. O(1)).
+// There are debates on both sides as to whether it is better to have this
+// cached value or not, as having it entails some cost (memory and code).
+// To consider: Make list size caching an optional template parameter.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIST_SIZE_CACHE
+ #define EASTL_LIST_SIZE_CACHE 0
+#endif
+
+#ifndef EASTL_SLIST_SIZE_CACHE
+ #define EASTL_SLIST_SIZE_CACHE 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAX_STACK_USAGE
+//
+// Defined as an integer greater than zero. Default is 4000.
+// There are some places in EASTL where temporary objects are put on the
+// stack. A common example of this is in the implementation of container
+// swap functions whereby a temporary copy of the container is made.
+// There is a problem, however, if the size of the item created on the stack
+// is very large. This can happen with fixed-size containers, for example.
+// The EASTL_MAX_STACK_USAGE define specifies the maximum amount of memory
+// (in bytes) that the given platform/compiler will safely allow on the stack.
+// Platforms such as Windows will generally allow larger values than embedded
+// systems or console machines, but it is usually a good idea to stick with
+// a max usage value that is portable across all platforms, lest the user be
+// surprised when something breaks as it is ported to another platform.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_MAX_STACK_USAGE
+ #define EASTL_MAX_STACK_USAGE 4000
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VA_COPY_ENABLED
+//
+// Defined as 0 or 1. Default is 1 for compilers that need it, 0 for others.
+// Some compilers on some platforms implement va_list whereby its contents
+// are destroyed upon usage, even if passed by value to another function.
+// With these compilers you can use va_copy to restore the a va_list.
+// Known compiler/platforms that destroy va_list contents upon usage include:
+// CodeWarrior on PowerPC
+// GCC on x86-64
+// However, va_copy is part of the C99 standard and not part of earlier C and
+// C++ standards. So not all compilers support it. VC++ doesn't support va_copy,
+// but it turns out that VC++ doesn't need it on the platforms it supports.
+// For example usage, see the EASTL string.h file.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VA_COPY_ENABLED
+ #if defined(__MWERKS__) || (defined(__GNUC__) && (__GNUC__ >= 3) && (!defined(__i386__) || defined(__x86_64__)) && !defined(__ppc__) && !defined(__PPC__) && !defined(__PPC64__))
+ #define EASTL_VA_COPY_ENABLED 1
+ #else
+ #define EASTL_VA_COPY_ENABLED 0
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIST_PROXY_ENABLED
+//
+#if !defined(EASTL_LIST_PROXY_ENABLED)
+ // GCC with -fstrict-aliasing has bugs (or undocumented functionality in their
+ // __may_alias__ implementation. The compiler gets confused about function signatures.
+ // VC8 (1400) doesn't need the proxy because it has built-in smart debugging capabilities.
+ #if defined(EASTL_DEBUG) && (!defined(__GNUC__) || defined(__SNC__)) && (!defined(_MSC_VER) || (_MSC_VER < 1400))
+ #define EASTL_LIST_PROXY_ENABLED 1
+ #define EASTL_LIST_PROXY_MAY_ALIAS EASTL_MAY_ALIAS
+ #else
+ #define EASTL_LIST_PROXY_ENABLED 0
+ #define EASTL_LIST_PROXY_MAY_ALIAS
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STD_ITERATOR_CATEGORY_ENABLED
+//
+// Defined as 0 or 1. Default is 1.
+// If defined as non-zero, EASTL iterator categories (iterator.h's input_iterator_tag,
+// forward_iterator_tag, etc.) are defined to be those from std C++ in the std
+// namespace. The reason for wanting to enable such a feature is that it allows
+// EASTL containers and algorithms to work with std STL containes and algorithms.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_STD_ITERATOR_CATEGORY_ENABLED 0
+#endif
+
+#if EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #define EASTL_ITC_NS std
+#else
+ #define EASTL_ITC_NS eastl
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATION_ENABLED
+//
+// Defined as an integer >= 0. Default is to be equal to EASTL_DEBUG.
+// If nonzero, then a certain amount of automatic runtime validation is done.
+// Runtime validation is not considered the same thing as asserting that user
+// input values are valid. Validation refers to internal consistency checking
+// of the validity of containers and their iterators. Validation checking is
+// something that often involves significantly more than basic assertion
+// checking, and it may sometimes be desirable to disable it.
+// This macro would generally be used internally by EASTL.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATION_ENABLED
+ #define EASTL_VALIDATION_ENABLED EASTL_DEBUG
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_COMPARE
+//
+// Defined as EASTL_ASSERT or defined away. Default is EASTL_ASSERT if EASTL_VALIDATION_ENABLED is enabled.
+// This is used to validate user-supplied comparison functions, particularly for sorting purposes.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE_ENABLED EASTL_VALIDATION_ENABLED
+#endif
+
+#if EASTL_VALIDATE_COMPARE_ENABLED
+ #define EASTL_VALIDATE_COMPARE EASTL_ASSERT
+#else
+ #define EASTL_VALIDATE_COMPARE(expression)
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_VALIDATE_INTRUSIVE_LIST
+//
+// Defined as an integral value >= 0. Controls the amount of automatic validation
+// done by intrusive_list. A value of 0 means no automatic validation is done.
+// As of this writing, EASTL_VALIDATE_INTRUSIVE_LIST defaults to 0, as it makes
+// the intrusive_list_node become a non-POD, which may be an issue for some code.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_VALIDATE_INTRUSIVE_LIST
+ #define EASTL_VALIDATE_INTRUSIVE_LIST 0
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_FORCE_INLINE
+//
+// Defined as a "force inline" expression or defined away.
+// You generally don't need to use forced inlining with the Microsoft and
+// Metrowerks compilers, but you may need it with the GCC compiler (any version).
+//
+// Example usage:
+// template <typename T, typename Allocator>
+// EASTL_FORCE_INLINE typename vector<T, Allocator>::size_type
+// vector<T, Allocator>::size() const
+// { return mpEnd - mpBegin; }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_FORCE_INLINE
+ #define EASTL_FORCE_INLINE EA_FORCE_INLINE
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MAY_ALIAS
+//
+// Defined as a macro that wraps the GCC may_alias attribute. This attribute
+// has no significance for VC++ because VC++ doesn't support the concept of
+// strict aliasing. Users should avoid writing code that breaks strict
+// aliasing rules; EASTL_MAY_ALIAS is for cases with no alternative.
+//
+// Example usage:
+// uint32_t value EASTL_MAY_ALIAS;
+//
+// Example usage:
+// typedef uint32_t EASTL_MAY_ALIAS value_type;
+// value_type value;
+//
+#if defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 303)
+ #define EASTL_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define EASTL_MAY_ALIAS
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_LIKELY / EASTL_UNLIKELY
+//
+// Defined as a macro which gives a hint to the compiler for branch
+// prediction. GCC gives you the ability to manually give a hint to
+// the compiler about the result of a comparison, though it's often
+// best to compile shipping code with profiling feedback under both
+// GCC (-fprofile-arcs) and VC++ (/LTCG:PGO, etc.). However, there
+// are times when you feel very sure that a boolean expression will
+// usually evaluate to either true or false and can help the compiler
+// by using an explicity directive...
+//
+// Example usage:
+// if(EASTL_LIKELY(a == 0)) // Tell the compiler that a will usually equal 0.
+// { ... }
+//
+// Example usage:
+// if(EASTL_UNLIKELY(a == 0)) // Tell the compiler that a will usually not equal 0.
+// { ... }
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_LIKELY
+ #if defined(__GNUC__) && (__GNUC__ >= 3)
+ #define EASTL_LIKELY(x) __builtin_expect(!!(x), true)
+ #define EASTL_UNLIKELY(x) __builtin_expect(!!(x), false)
+ #else
+ #define EASTL_LIKELY(x) (x)
+ #define EASTL_UNLIKELY(x) (x)
+ #endif
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_MINMAX_ENABLED
+//
+// Defined as 0 or 1; default is 1.
+// Specifies whether the min and max algorithms are available.
+// It may be useful to disable the min and max algorithems because sometimes
+// #defines for min and max exist which would collide with EASTL min and max.
+// Note that there are already alternative versions of min and max in EASTL
+// with the min_alt and max_alt functions. You can use these without colliding
+// with min/max macros that may exist.
+//
+///////////////////////////////////////////////////////////////////////////////
+#ifndef EASTL_MINMAX_ENABLED
+ #define EASTL_MINMAX_ENABLED 1
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_NOMINMAX
+//
+// Defined as 0 or 1; default is 1.
+// MSVC++ has #defines for min/max which collide with the min/max algorithm
+// declarations. If EASTL_NOMINMAX is defined as 1, then we undefine min and
+// max if they are #defined by an external library. This allows our min and
+// max definitions in algorithm.h to work as expected. An alternative to
+// the enabling of EASTL_NOMINMAX is to #define NOMINMAX in your project
+// settings if you are compiling for Windows.
+// Note that this does not control the availability of the EASTL min and max
+// algorithms; the EASTL_MINMAX_ENABLED configuration parameter does that.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_NOMINMAX
+ #define EASTL_NOMINMAX 1
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALIGN_OF
+//
+// Determines the alignment of a type.
+//
+// Example usage:
+// size_t alignment = EASTL_ALIGN_OF(int);
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALIGN_OF
+ #if defined(__MWERKS__) || defined(__ghs__)
+ #define EASTL_ALIGN_OF(type) ((size_t)__alignof__(type))
+ #elif !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x doesn't do __alignof correctly all the time.
+ #define EASTL_ALIGN_OF __alignof
+ #else
+ #define EASTL_ALIGN_OF(type) ((size_t)offsetof(struct{ char c; type m; }, m))
+ #endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// eastl_size_t
+//
+// Defined as an unsigned integer type, usually either size_t or uint32_t.
+// Defaults to uint32_t instead of size_t because the latter wastes memory
+// and is sometimes slower on 64 bit machines.
+//
+// Example usage:
+// eastl_size_t n = intVector.size();
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_SIZE_T
+ #if(EA_PLATFORM_WORD_SIZE == 4) // If (sizeof(size_t) == 4) and we can thus use size_t as-is...
+ #include <stddef.h>
+ #define EASTL_SIZE_T size_t
+ #define EASTL_SSIZE_T intptr_t
+ #else
+ #define EASTL_SIZE_T uint32_t
+ #define EASTL_SSIZE_T int32_t
+ #endif
+#endif
+
+typedef EASTL_SIZE_T eastl_size_t; // Same concept as std::size_t.
+typedef EASTL_SSIZE_T eastl_ssize_t; // Signed version of eastl_size_t. Concept is similar to Posix's ssize_t.
+
+
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddRef / Release
+//
+// AddRef and Release are used for "intrusive" reference counting. By the term
+// "intrusive", we mean that the reference count is maintained by the object
+// and not by the user of the object. Given that an object implements referencing
+// counting, the user of the object needs to be able to increment and decrement
+// that reference count. We do that via the venerable AddRef and Release functions
+// which the object must supply. These defines here allow us to specify the name
+// of the functions. They could just as well be defined to addref and delref or
+// IncRef and DecRef.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTLAddRef
+ #define EASTLAddRef AddRef
+#endif
+
+#ifndef EASTLRelease
+ #define EASTLRelease Release
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_ALLOCATOR_EXPLICIT_ENABLED
+//
+// Defined as 0 or 1. Default is 0 for now but ideally would be changed to
+// 1 some day. It's 0 because setting it to 1 breaks some existing code.
+// This option enables the allocator ctor to be explicit, which avoids
+// some undesirable silent conversions, especially with the string class.
+//
+// Example usage:
+// class allocator
+// {
+// public:
+// EASTL_ALLOCATOR_EXPLICIT allocator(const char* pName);
+// };
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT_ENABLED 0
+#endif
+
+#if EASTL_ALLOCATOR_EXPLICIT_ENABLED
+ #define EASTL_ALLOCATOR_EXPLICIT explicit
+#else
+ #define EASTL_ALLOCATOR_EXPLICIT
+#endif
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL allocator
+//
+// The EASTL allocator system allows you to redefine how memory is allocated
+// via some defines that are set up here. In the container code, memory is
+// allocated via macros which expand to whatever the user has them set to
+// expand to. Given that there are multiple allocator systems available,
+// this system allows you to configure it to use whatever system you want,
+// provided your system meets the requirements of this library.
+// The requirements are:
+//
+// - Must be constructable via a const char* (name) parameter.
+// Some uses of allocators won't require this, however.
+// - Allocate a block of memory of size n and debug name string.
+// - Allocate a block of memory of size n, debug name string,
+// alignment a, and offset o.
+// - Free memory allocated via either of the allocation functions above.
+// - Provide a default allocator instance which can be used if the user
+// doesn't provide a specific one.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+// namespace eastl
+// {
+// class allocator
+// {
+// allocator(const char* pName = NULL);
+//
+// void* allocate(size_t n, int flags = 0);
+// void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0);
+// void deallocate(void* p, size_t n);
+//
+// const char* get_name() const;
+// void set_name(const char* pName);
+// };
+//
+// allocator* GetDefaultAllocator(); // This is used for anonymous allocations.
+// }
+
+#ifndef EASTLAlloc // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAlloc(allocator, n) (allocator).allocate(n, __FILE__, __LINE__);
+#endif
+
+#ifndef EASTLAllocFlags // To consider: Instead of calling through pAllocator, just go directly to operator new, since that's what allocator does.
+ #define EASTLAllocFlags(allocator, n, flags) (allocator).allocate(n, __FILE__, __LINE__, flags);
+#endif
+
+#ifndef EASTLAllocAligned
+ #define EASTLAllocAligned(allocator, n, alignment, offset) (allocator).allocate((n), __FILE__, __LINE__, (alignment), (offset))
+#endif
+
+#ifndef EASTLFree
+ #define EASTLFree(allocator, p, size) (allocator).deallocate((p), (size))
+#endif
+
+#ifndef EASTLAllocatorType
+ #define EASTLAllocatorType eastl::allocator
+#endif
+
+#ifndef EASTLAllocatorDefault
+ // EASTLAllocatorDefault returns the default allocator instance. This is not a global
+ // allocator which implements all container allocations but is the allocator that is
+ // used when EASTL needs to allocate memory internally. There are very few cases where
+ // EASTL allocates memory internally, and in each of these it is for a sensible reason
+ // that is documented to behave as such.
+ #define EASTLAllocatorDefault eastl::GetDefaultAllocator
+#endif
+
+
+
+
+
+
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/eastl_rw.h b/UnknownVersion/include/EASTL/internal/eastl_rw.h
new file mode 100644
index 0000000..d0bf15a
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/eastl_rw.h
@@ -0,0 +1,47 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// To use this file, you can either copy and paste its contents right below
+// the EASTL_USER_CONFIG_HEADER section of EASTL's config.h or you can leave
+// config.h unmodified and instead #define EASTL_USER_CONFIG_HEADER be
+// config_rw.h and config.h will #include this file automatically.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_RW_H
+#define EASTL_RW_H
+
+// Unused
+
+#endif // Header include guard
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/fixed_pool.h b/UnknownVersion/include/EASTL/internal/fixed_pool.h
new file mode 100644
index 0000000..1512678
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/fixed_pool.h
@@ -0,0 +1,1397 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/fixed_pool.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following
+// aligned_buffer
+// fixed_pool_base
+// fixed_pool
+// fixed_pool_with_overflow
+// fixed_hashtable_allocator
+// fixed_vector_allocator
+// fixed_swap
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_FIXED_POOL_H
+#define EASTL_INTERNAL_FIXED_POOL_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/functional.h>
+#include <EASTL/memory.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #include <new>
+ #pragma warning(pop)
+#else
+ #include <new>
+#endif
+
+
+
+namespace eastl
+{
+
+ /// EASTL_FIXED_POOL_DEFAULT_NAME
+ ///
+ /// Defines a default allocator name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
+ #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // aligned_buffer
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// aligned_buffer
+ ///
+ /// This is useful for creating a buffer of the same size and alignment
+ /// of a given struct or class. This is useful for creating memory pools
+ /// that support both size and alignment requirements of stored objects
+ /// but without wasting space in over-allocating.
+ ///
+ /// Note that we implement this via struct specializations, as some
+ /// compilers such as VC++ do not support specification of alignments
+ /// in any way other than via an integral constant.
+ ///
+ /// Example usage:
+ /// struct Widget{ }; // This class has a given size and alignment.
+ ///
+ /// Declare a char buffer of equal size and alignment to Widget.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetBuffer;
+ ///
+ /// Declare an array this time.
+ /// aligned_buffer<sizeof(Widget), EASTL_ALIGN_OF(Widget)> mWidgetArray[15];
+ ///
+ typedef char EASTL_MAY_ALIAS aligned_buffer_char;
+
+ template <size_t size, size_t alignment>
+ struct aligned_buffer { aligned_buffer_char buffer[size]; };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
+
+ #if !defined(EA_PLATFORM_PSP) // This compiler fails to compile alignment >= 256 and gives an error.
+
+ template<size_t size>
+ struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
+
+ template<size_t size>
+ struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
+
+ #endif // EA_PLATFORM_PSP
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_base
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_base
+ ///
+ /// This is a base class for the implementation of fixed-size pools.
+ /// In particular, the fixed_pool and fixed_pool_with_overflow classes
+ /// are based on fixed_pool_base.
+ ///
+ struct EASTL_API fixed_pool_base
+ {
+ public:
+ /// fixed_pool_base
+ ///
+ fixed_pool_base(void* pMemory = NULL)
+ : mpHead((Link*)pMemory)
+ , mpNext((Link*)pMemory)
+ , mpCapacity((Link*)pMemory)
+ #if EASTL_DEBUG
+ , mnNodeSize(0) // This is normally set in the init function.
+ #endif
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ mnCurrentSize = 0;
+ mnPeakSize = 0;
+ #endif
+ }
+
+
+ /// operator=
+ ///
+ fixed_pool_base& operator=(const fixed_pool_base&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// init
+ ///
+ /// Initializes a fixed_pool with a given set of parameters.
+ /// You cannot call this function twice else the resulting
+ /// behaviour will be undefined. You can only call this function
+ /// after constructing the fixed_pool with the default constructor.
+ ///
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0);
+
+
+ /// peak_size
+ ///
+ /// Returns the maximum number of outstanding allocations there have been
+ /// at any one time. This represents a high water mark for the allocation count.
+ ///
+ size_t peak_size() const
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ return mnPeakSize;
+ #else
+ return 0;
+ #endif
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return (mpHead != NULL) || (mpNext != mpCapacity);
+ }
+
+ public:
+ /// Link
+ /// Implements a singly-linked list.
+ struct Link
+ {
+ Link* mpNext;
+ };
+
+ Link* mpHead;
+ Link* mpNext;
+ Link* mpCapacity;
+ size_t mnNodeSize;
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ uint32_t mnCurrentSize; /// Current number of allocated nodes.
+ uint32_t mnPeakSize; /// Max number of allocated nodes at any one time.
+ #endif
+
+ }; // fixed_pool_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool
+ ///
+ /// Implements a simple fixed pool allocator for use by fixed-size containers.
+ /// This is not a generic eastl allocator which can be plugged into an arbitrary
+ /// eastl container, as it simplifies some functions are arguments for the
+ /// purpose of efficiency.
+ ///
+ class EASTL_API fixed_pool : public fixed_pool_base
+ {
+ public:
+ /// fixed_pool
+ ///
+ /// Default constructor. User usually will want to call init() after
+ /// constructing via this constructor. The pMemory argument is for the
+ /// purposes of temporarily storing a pointer to the buffer to be used.
+ /// Even though init may have a pMemory argument, this arg is useful
+ /// for temporary storage, as per copy construction.
+ ///
+ fixed_pool(void* pMemory = NULL)
+ : fixed_pool_base(pMemory)
+ {
+ }
+
+
+ /// fixed_pool
+ ///
+ /// Constructs a fixed_pool with a given set of parameters.
+ ///
+ fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+ }
+
+
+ /// operator=
+ ///
+ fixed_pool& operator=(const fixed_pool&)
+ {
+ // By design we do nothing. We don't attempt to deep-copy member data.
+ return *this;
+ }
+
+
+ /// allocate
+ ///
+ /// Allocates a new object of the size specified upon class initialization.
+ /// Returns NULL if there is no more memory.
+ ///
+ void* allocate()
+ {
+ Link* pLink = mpHead;
+
+ if(pLink) // If we have space...
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ mpHead = pLink->mpNext;
+ return pLink;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ pLink = mpNext;
+
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(++mnCurrentSize > mnPeakSize)
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return pLink;
+ }
+
+ // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error.
+ return NULL;
+ }
+ }
+
+
+ /// deallocate
+ ///
+ /// Frees the given object which was allocated by allocate().
+ /// If the given node was not allocated by allocate() then the behaviour
+ /// is undefined.
+ ///
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+
+ void set_name(const char*)
+ {
+ // Nothing to do. We don't allocate memory.
+ }
+
+ }; // fixed_pool
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_pool_with_overflow
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_pool_with_overflow
+ ///
+ template <typename Allocator = EASTLAllocatorType>
+ class fixed_pool_with_overflow : public fixed_pool_base
+ {
+ public:
+ fixed_pool_with_overflow(void* pMemory = NULL)
+ : fixed_pool_base(pMemory),
+ mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ // Leave mpPoolBegin, mpPoolEnd uninitialized.
+ }
+
+
+ fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ /// operator=
+ ///
+ fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this;
+ }
+
+
+ void init(void* pMemory, size_t memorySize, size_t nodeSize,
+ size_t alignment, size_t alignmentOffset = 0)
+ {
+ fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
+
+ mpPoolBegin = pMemory;
+ }
+
+
+ void* allocate()
+ {
+ void* p = NULL;
+ Link* pLink = mpHead;
+
+ if(pLink)
+ {
+ // Unlink from chain
+ p = pLink;
+ mpHead = pLink->mpNext;
+ }
+ else
+ {
+ // If there's no free node in the free list, just
+ // allocate another from the reserved memory area
+
+ if(mpNext != mpCapacity)
+ {
+ p = pLink = mpNext;
+ mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
+ }
+ else
+ p = mOverflowAllocator.allocate(mnNodeSize);
+ }
+
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ if(p && (++mnCurrentSize > mnPeakSize))
+ mnPeakSize = mnCurrentSize;
+ #endif
+
+ return p;
+ }
+
+
+ void deallocate(void* p)
+ {
+ #if EASTL_FIXED_SIZE_TRACKING_ENABLED
+ --mnCurrentSize;
+ #endif
+
+ if((p >= mpPoolBegin) && (p < mpCapacity))
+ {
+ ((Link*)p)->mpNext = mpHead;
+ mpHead = ((Link*)p);
+ }
+ else
+ mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
+ }
+
+
+ using fixed_pool_base::can_allocate;
+
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ public:
+ Allocator mOverflowAllocator;
+ void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information about the pool buffer and object size is stored in the owning container and we can't have access to it without increasing the amount of code we need and by templating more code. It may turn out that simply storing data here is smaller in the end.
+
+ }; // fixed_pool_with_overflow
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_node_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_node_allocator
+ ///
+ /// Note: This class was previously named fixed_node_pool, but was changed because this name
+ /// was inconsistent with the other allocators here which ended with _allocator.
+ ///
+ /// Implements a fixed_pool with a given node count, alignment, and alignment offset.
+ /// fixed_node_allocator is like fixed_pool except it is templated on the node type instead
+ /// of being a generic allocator. All it does is pass allocations through to
+ /// the fixed_pool base. This functionality is separate from fixed_pool because there
+ /// are other uses for fixed_pool.
+ ///
+ /// We template on kNodeSize instead of node_type because the former allows for the
+ /// two different node_types of the same size to use the same template implementation.
+ ///
+ /// Template parameters:
+ /// nodeSize The size of the object to allocate.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
+ class fixed_node_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ //fixed_node_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ /// fixed_node_allocator
+ ///
+ /// Note that we are copying x.mpHead to our own fixed_pool. This at first may seem
+ /// broken, as fixed pools cannot take over ownership of other fixed pools' memory.
+ /// However, we declare that this copy ctor can only ever be safely called when
+ /// the user has intentionally pre-seeded the source with the destination pointer.
+ /// This is somewhat playing with fire, but it allows us to get around chicken-and-egg
+ /// problems with containers being their own allocators, without incurring any memory
+ /// costs or extra code costs. There's another reason for this: we very strongly want
+ /// to avoid full copying of instances of fixed_pool around, especially via the stack.
+ /// Larger pools won't even be able to fit on many machine's stacks. So this solution
+ /// is also a mechanism to prevent that situation from existing and being used.
+ /// Perhaps some day we'll find a more elegant yet costless way around this.
+ ///
+ fixed_node_allocator(const this_type& x)
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
+ // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
+ // doing the copying above.
+ mPool = x.mPool;
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ /// can_allocate
+ ///
+ /// Returns true if there are any free links.
+ ///
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ /// reset
+ ///
+ /// This function unilaterally resets the fixed pool back to a newly initialized
+ /// state. This is useful for using in tandem with container reset functionality.
+ ///
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+ }; // fixed_node_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
+ class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ pool_type mPool;
+
+ public:
+ fixed_node_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ fixed_node_allocator(const this_type& x)
+ : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
+ {
+ }
+
+
+ this_type& operator=(const this_type& x)
+ {
+ mPool = x.mPool;
+ return *this;
+ }
+
+
+ void* allocate(size_t n, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ (void)n;
+ EASTL_ASSERT(n == kNodeSize);
+ return mPool.allocate();
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ EASTL_ASSERT(false);
+ return *(overflow_allocator_type*)NULL; // This is not pretty.
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ }; // fixed_node_allocator
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_hashtable_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Provides a base class for fixed hashtable allocations.
+ /// To consider: Have this inherit from fixed_node_allocator.
+ ///
+ /// Template parameters:
+ /// bucketCount The fixed number of hashtable buckets to provide.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
+ class fixed_hashtable_allocator
+ {
+ public:
+ typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x)
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
+ // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
+ // doing the copying above.
+ mPool = x.mPool;
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this; // Do nothing. Ignore the source type.
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ // To consider: allow for bucket allocations to overflow.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mPool.mOverflowAllocator;
+ }
+
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mPool.mOverflowAllocator = allocator;
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ // This is a near copy of the code above, with the only difference being
+ // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
+ // and the get_overflow_allocator / set_overflow_allocator functions.
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
+ class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
+ {
+ public:
+ typedef fixed_pool pool_type;
+ typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
+ kBucketsSize = bucketCount * sizeof(void*),
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset,
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ pool_type mPool;
+ void* mpBucketBuffer;
+
+ public:
+ //fixed_hashtable_allocator(const char* pName)
+ //{
+ // mPool.set_name(pName);
+ //}
+
+ fixed_hashtable_allocator(void* pNodeBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(NULL)
+ {
+ // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
+ }
+
+
+ fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
+ : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(pBucketBuffer)
+ {
+ }
+
+
+ /// fixed_hashtable_allocator
+ ///
+ /// Note that we are copying x.mpHead and mpBucketBuffer to our own fixed_pool.
+ /// See the discussion above in fixed_node_allocator for important information about this.
+ ///
+ fixed_hashtable_allocator(const this_type& x)
+ : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
+ mpBucketBuffer(x.mpBucketBuffer)
+ {
+ }
+
+
+ fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
+ {
+ mPool = x.mPool;
+ return *this; // Do nothing. Ignore the source type.
+ }
+
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
+ {
+ // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
+ if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
+ {
+ EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
+ return mPool.allocate();
+ }
+
+ // To consider: allow for bucket allocations to overflow.
+ EASTL_ASSERT(n <= kBucketsSize);
+ return mpBucketBuffer;
+ }
+
+
+ void deallocate(void* p, size_t)
+ {
+ if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
+ mPool.deallocate(p);
+ }
+
+
+ bool can_allocate() const
+ {
+ return mPool.can_allocate();
+ }
+
+
+ void reset(void* pNodeBuffer)
+ {
+ // No need to modify mpBucketBuffer, as that is constant.
+ mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
+ }
+
+
+ const char* get_name() const
+ {
+ return mPool.get_name();
+ }
+
+
+ void set_name(const char* pName)
+ {
+ mPool.set_name(pName);
+ }
+
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ EASTL_ASSERT(false);
+ return *(overflow_allocator_type*)NULL; // This is not pretty.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ }; // fixed_hashtable_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_vector_allocator
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_vector_allocator
+ ///
+ /// Template parameters:
+ /// nodeSize The size of individual objects.
+ /// nodeCount The number of objects the pool contains.
+ /// nodeAlignment The alignment of the objects to allocate.
+ /// nodeAlignmentOffset The alignment offset of the objects to allocate.
+ /// bEnableOverflow Whether or not we should use the overflow heap if our object pool is exhausted.
+ /// Allocator Overflow allocator, which is only used if bEnableOverflow == true. Defaults to the global heap.
+ ///
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
+ class fixed_vector_allocator
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ public:
+ overflow_allocator_type mOverflowAllocator;
+ void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
+
+ public:
+ //fixed_vector_allocator(const char* pName = NULL)
+ //{
+ // mOverflowAllocator.set_name(pName);
+ //}
+
+ fixed_vector_allocator(void* pNodeBuffer)
+ : mpPoolBegin(pNodeBuffer)
+ {
+ }
+
+ fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mOverflowAllocator = x.mOverflowAllocator;
+ #else
+ (void)x;
+ #endif
+
+ return *this; // Do nothing. Ignore the source type.
+ }
+
+ void* allocate(size_t n, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, flags);
+ }
+
+ void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
+ {
+ return mOverflowAllocator.allocate(n, alignment, offset, flags);
+ }
+
+ void deallocate(void* p, size_t n)
+ {
+ if(p != mpPoolBegin)
+ mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
+ }
+
+ const char* get_name() const
+ {
+ return mOverflowAllocator.get_name();
+ }
+
+ void set_name(const char* pName)
+ {
+ mOverflowAllocator.set_name(pName);
+ }
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ return mOverflowAllocator;
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& allocator)
+ {
+ mOverflowAllocator = allocator;
+ }
+
+ }; // fixed_vector_allocator
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
+ class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
+ {
+ public:
+ typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
+ typedef Allocator overflow_allocator_type;
+
+ enum
+ {
+ kNodeSize = nodeSize,
+ kNodeCount = nodeCount,
+ kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
+ kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
+ kNodeAlignment = nodeAlignment,
+ kNodeAlignmentOffset = nodeAlignmentOffset
+ };
+
+ //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
+ //{
+ //}
+
+ fixed_vector_allocator(void* /*pNodeBuffer*/)
+ {
+ }
+
+ void* allocate(size_t /*n*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
+ return NULL;
+ }
+
+ void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
+ {
+ EASTL_ASSERT(false);
+ return NULL;
+ }
+
+ void deallocate(void* /*p*/, size_t /*n*/)
+ {
+ }
+
+ const char* get_name() const
+ {
+ return EASTL_FIXED_POOL_DEFAULT_NAME;
+ }
+
+ void set_name(const char* /*pName*/)
+ {
+ }
+
+ overflow_allocator_type& get_overflow_allocator()
+ {
+ EASTL_ASSERT(false);
+ overflow_allocator_type* pNULL = NULL;
+ return *pNULL; // This is not pretty, but it should never execute.
+ }
+
+ void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
+ {
+ // We don't have an overflow allocator.
+ EASTL_ASSERT(false);
+ }
+
+ }; // fixed_vector_allocator
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a == &b); // They are only equal if they are the same object.
+ }
+
+
+ template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
+ inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
+ const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
+ {
+ return (&a != &b); // They are only equal if they are the same object.
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // fixed_swap
+ ///////////////////////////////////////////////////////////////////////////
+
+ /// fixed_swap
+ ///
+ /// This function implements a swap suitable for fixed containers.
+ /// This is an issue because the size of fixed containers can be very
+ /// large, due to their having the container buffer within themselves.
+ /// Note that we are referring to sizeof(container) and not the total
+ /// sum of memory allocated by the container from the heap.
+ ///
+ template <typename Container>
+ void fixed_swap(Container& a, Container& b)
+ {
+ // We must do a brute-force swap, because fixed containers cannot share memory allocations.
+ eastl::less<size_t> compare;
+
+ if(compare(sizeof(a), EASTL_MAX_STACK_USAGE)) // Using compare instead of just '<' avoids a stubborn compiler warning.
+ {
+ // Note: The C++ language does not define what happens when you declare
+ // an object in too small of stack space but the object is never created.
+ // This may result in a stack overflow exception on some systems, depending
+ // on how they work and possibly depending on enabled debug functionality.
+
+ const Container temp(a); // Can't use global swap because that could
+ a = b; // itself call this swap function in return.
+ b = temp;
+ }
+ else
+ {
+ EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
+ void* const pMemory = allocator.allocate(sizeof(a), __FILE__, __LINE__);
+
+
+ if(pMemory)
+ {
+ Container* const pTemp = ::new(pMemory) Container(a);
+ a = b;
+ b = *pTemp;
+
+ pTemp->~Container();
+ allocator.deallocate(pMemory, sizeof(a));
+ }
+ }
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/generic_iterator.h b/UnknownVersion/include/EASTL/internal/generic_iterator.h
new file mode 100644
index 0000000..9700b06
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/generic_iterator.h
@@ -0,0 +1,242 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/generic_iterator.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a generic iterator from a given iteratable type, such as a pointer.
+// We cannot put this file into our own iterator.h file because we need to
+// still be able to use this file when we have our iterator.h disabled.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_GENERIC_ITERATOR_H
+#define EASTL_INTERNAL_GENERIC_ITERATOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/type_traits.h>
+
+
+#ifdef _MSC_VER
+ #pragma warning(push) // VC++ generates a bogus warning that you cannot code away.
+ #pragma warning(disable: 4619) // There is no warning number 'number'.
+ #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction.
+#endif
+
+
+namespace eastl
+{
+
+ /// generic_iterator
+ ///
+ /// Converts something which can be iterated into a formal iterator.
+ /// While this class' primary purpose is to allow the conversion of
+ /// a pointer to an iterator, you can convert anything else to an
+ /// iterator by defining an iterator_traits<> specialization for that
+ /// object type. See EASTL iterator.h for this.
+ ///
+ /// Example usage:
+ /// typedef generic_iterator<int*> IntArrayIterator;
+ /// typedef generic_iterator<int*, char> IntArrayIteratorOther;
+ ///
+ template <typename Iterator, typename Container = void>
+ class generic_iterator
+ {
+ protected:
+ Iterator mIterator;
+
+ public:
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category iterator_category;
+ typedef typename eastl::iterator_traits<Iterator>::value_type value_type;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+ typedef Iterator iterator_type;
+ typedef Container container_type;
+ typedef generic_iterator<Iterator, Container> this_type;
+
+ generic_iterator()
+ : mIterator(iterator_type()) { }
+
+ explicit generic_iterator(const iterator_type& x)
+ : mIterator(x) { }
+
+ this_type& operator=(const iterator_type& x)
+ { mIterator = x; return *this; }
+
+ template <typename Iterator2>
+ generic_iterator(const generic_iterator<Iterator2, Container>& x)
+ : mIterator(x.base()) { }
+
+ reference operator*() const
+ { return *mIterator; }
+
+ pointer operator->() const
+ { return mIterator; }
+
+ this_type& operator++()
+ { ++mIterator; return *this; }
+
+ this_type operator++(int)
+ { return this_type(mIterator++); }
+
+ this_type& operator--()
+ { --mIterator; return *this; }
+
+ this_type operator--(int)
+ { return this_type(mIterator--); }
+
+ reference operator[](const difference_type& n) const
+ { return mIterator[n]; }
+
+ this_type& operator+=(const difference_type& n)
+ { mIterator += n; return *this; }
+
+ this_type operator+(const difference_type& n) const
+ { return this_type(mIterator + n); }
+
+ this_type& operator-=(const difference_type& n)
+ { mIterator -= n; return *this; }
+
+ this_type operator-(const difference_type& n) const
+ { return this_type(mIterator - n); }
+
+ const iterator_type& base() const
+ { return mIterator; }
+
+ }; // class generic_iterator
+
+
+
+
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator==(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator==(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() == rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator!=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator!=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() != rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() < rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() > rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator<=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator<=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() <= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline bool operator>=(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline bool operator>=(const generic_iterator<Iterator, Container>& lhs, const generic_iterator<Iterator, Container>& rhs)
+ { return lhs.base() >= rhs.base(); }
+
+ template <typename IteratorL, typename IteratorR, typename Container>
+ inline typename generic_iterator<IteratorL, Container>::difference_type
+ operator-(const generic_iterator<IteratorL, Container>& lhs, const generic_iterator<IteratorR, Container>& rhs)
+ { return lhs.base() - rhs.base(); }
+
+ template <typename Iterator, typename Container>
+ inline generic_iterator<Iterator, Container>
+ operator+(typename generic_iterator<Iterator, Container>::difference_type n, const generic_iterator<Iterator, Container>& x)
+ { return generic_iterator<Iterator, Container>(x.base() + n); }
+
+
+
+ /// is_generic_iterator
+ ///
+ /// Tells if an iterator is one of these generic_iterators. This is useful if you want to
+ /// write code that uses miscellaneous iterators but wants to tell if they are generic_iterators.
+ /// A primary reason to do so is that you can get at the pointer within the generic_iterator.
+ ///
+ template <typename Iterator>
+ struct is_generic_iterator : public false_type { };
+
+ template <typename Iterator, typename Container>
+ struct is_generic_iterator<generic_iterator<Iterator, Container> > : public true_type { };
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/hashtable.h b/UnknownVersion/include/EASTL/internal/hashtable.h
new file mode 100644
index 0000000..86fc4f8
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/hashtable.h
@@ -0,0 +1,2261 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/hashtable.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a hashtable, much like the C++ TR1 hash_set/hash_map.
+// proposed classes.
+// The primary distinctions between this hashtable and TR1 hash tables are:
+// - hashtable is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - hashtable is slightly more space-efficient than a conventional std hashtable
+// implementation on platforms with 64 bit size_t. This is
+// because std STL uses size_t (64 bits) in data structures whereby 32 bits
+// of data would be fine.
+// - hashtable can contain objects with alignment requirements. TR1 hash tables
+// cannot do so without a bit of tedious non-portable effort.
+// - hashtable supports debug memory naming natively.
+// - hashtable provides a find function that lets you specify a type that is
+// different from the hash table key type. This is particularly useful for
+// the storing of string objects but finding them by char pointers.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file is currently partially based on the TR1 (technical report 1)
+// reference implementation of the hash_set/hash_map C++ classes
+// as of about 4/2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_HASHTABLE_H
+#define EASTL_INTERNAL_HASHTABLE_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+#include <string.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #include <new>
+ #include <stddef.h>
+ #pragma warning(pop)
+#else
+ #include <new>
+ #include <stddef.h>
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated.
+ #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_HASHTABLE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_NAME
+ #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable".
+ #endif
+
+
+ /// EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR
+ #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME)
+ #endif
+
+
+
+ /// gpEmptyBucketArray
+ ///
+ /// A shared representation of an empty hash table. This is present so that
+ /// a new empty hashtable allocates no memory. It has two entries, one for
+ /// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel.
+ ///
+ extern EASTL_API void* gpEmptyBucketArray[2];
+
+
+
+ /// hash_node
+ ///
+ /// A hash_node stores an element in a hash table, much like a
+ /// linked list node stores an element in a linked list.
+ /// A hash_node additionally can, via template parameter,
+ /// store a hash code in the node to speed up hash calculations
+ /// and comparisons in some cases.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hash_node;
+
+ template <typename Value>
+ struct hash_node<Value, true>
+ {
+ Value mValue;
+ hash_node* mpNext;
+ eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ } EASTL_MAY_ALIAS;
+
+ template <typename Value>
+ struct hash_node<Value, false>
+ {
+ Value mValue;
+ hash_node* mpNext;
+ } EASTL_MAY_ALIAS;
+
+
+
+ /// node_iterator_base
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct node_iterator_base
+ {
+ public:
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ node_type* mpNode;
+
+ public:
+ node_iterator_base(node_type* pNode)
+ : mpNode(pNode) { }
+
+ void increment()
+ { mpNode = mpNode->mpNext; }
+ };
+
+
+
+ /// node_iterator
+ ///
+ /// Node iterators iterate nodes within a given bucket.
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct node_iterator : public node_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef node_iterator_base<Value, bCacheHashCode> base_type;
+ typedef node_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ explicit node_iterator(node_type* pNode = NULL)
+ : base_type(pNode) { }
+
+ node_iterator(const node_iterator<Value, true, bCacheHashCode>& x)
+ : base_type(x.mpNode) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ node_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ node_iterator operator++(int)
+ { node_iterator temp(*this); base_type::increment(); return temp; }
+
+ }; // node_iterator
+
+
+
+ /// hashtable_iterator_base
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// We define a base class here because it is shared by both const and
+ /// non-const iterators.
+ ///
+ template <typename Value, bool bCacheHashCode>
+ struct hashtable_iterator_base
+ {
+ public:
+ typedef hash_node<Value, bCacheHashCode> node_type;
+
+ public:
+ // We use public here because it allows the hashtable class to access
+ // these without a function call, and we are very strongly avoiding
+ // function calls in this library, as our primary goal is performance
+ // over correctness and some compilers (e.g. GCC) are terrible at
+ // inlining and so avoiding function calls is of major importance.
+ node_type* mpNode; // Current node within current bucket.
+ node_type** mpBucket; // Current bucket.
+
+ public:
+ hashtable_iterator_base(node_type* pNode, node_type** pBucket)
+ : mpNode(pNode), mpBucket(pBucket) { }
+
+ void increment_bucket()
+ {
+ ++mpBucket;
+ while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
+ ++mpBucket; // of the bucket array so that finding the end of the bucket
+ mpNode = *mpBucket; // array is quick and simple.
+ }
+
+ void increment()
+ {
+ mpNode = mpNode->mpNext;
+
+ while(mpNode == NULL)
+ mpNode = *++mpBucket;
+ }
+
+ }; // hashtable_iterator_base
+
+
+
+
+ /// hashtable_iterator
+ ///
+ /// A hashtable_iterator iterates the entire hash table and not just
+ /// nodes within a single bucket. Users in general will use a hash
+ /// table iterator much more often, as it is much like other container
+ /// iterators (e.g. vector::iterator).
+ ///
+ /// The bConst parameter defines if the iterator is a const_iterator
+ /// or an iterator.
+ ///
+ template <typename Value, bool bConst, bool bCacheHashCode>
+ struct hashtable_iterator : public hashtable_iterator_base<Value, bCacheHashCode>
+ {
+ public:
+ typedef hashtable_iterator_base<Value, bCacheHashCode> base_type;
+ typedef hashtable_iterator<Value, bConst, bCacheHashCode> this_type;
+ typedef hashtable_iterator<Value, false, bCacheHashCode> this_type_non_const;
+ typedef typename base_type::node_type node_type;
+ typedef Value value_type;
+ typedef typename type_select<bConst, const Value*, Value*>::type pointer;
+ typedef typename type_select<bConst, const Value&, Value&>::type reference;
+ typedef ptrdiff_t difference_type;
+ typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
+
+ public:
+ hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL)
+ : base_type(pNode, pBucket) { }
+
+ hashtable_iterator(node_type** pBucket)
+ : base_type(*pBucket, pBucket) { }
+
+ hashtable_iterator(const this_type_non_const& x)
+ : base_type(x.mpNode, x.mpBucket) { }
+
+ reference operator*() const
+ { return base_type::mpNode->mValue; }
+
+ pointer operator->() const
+ { return &(base_type::mpNode->mValue); }
+
+ hashtable_iterator& operator++()
+ { base_type::increment(); return *this; }
+
+ hashtable_iterator operator++(int)
+ { hashtable_iterator temp(*this); base_type::increment(); return temp; }
+
+ const node_type* get_node() const
+ { return base_type::mpNode; }
+
+ }; // hashtable_iterator
+
+
+
+
+ /// ht_distance
+ ///
+ /// This function returns the same thing as distance() for
+ /// forward iterators but returns zero for input iterators.
+ /// The reason why is that input iterators can only be read
+ /// once, and calling distance() on an input iterator destroys
+ /// the ability to read it. This ht_distance is used only for
+ /// optimization and so the code will merely work better with
+ /// forward iterators that input iterators.
+ ///
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator /* first */, Iterator /* last */, EASTL_ITC_NS::input_iterator_tag)
+ { return 0; }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag)
+ { return eastl::distance(first, last); }
+
+ template <typename Iterator>
+ inline typename eastl::iterator_traits<Iterator>::difference_type
+ ht_distance(Iterator first, Iterator last)
+ {
+ typedef typename eastl::iterator_traits<Iterator>::iterator_category IC;
+ return distance_fw_impl(first, last, IC());
+ }
+
+
+
+
+ /// mod_range_hashing
+ ///
+ /// Implements the algorithm for conversion of a number in the range of
+ /// [0, UINT32_MAX) to the range of [0, BucketCount).
+ ///
+ struct mod_range_hashing
+ {
+ // Defined as eastl_size_t instead of size_t because the latter
+ // wastes memory and is sometimes slower on 64 bit machines.
+ uint32_t operator()(uint32_t r, uint32_t n) const
+ { return r % n; }
+ };
+
+
+ /// default_ranged_hash
+ ///
+ /// Default ranged hash function H. In principle it should be a
+ /// function object composed from objects of type H1 and H2 such that
+ /// h(k, n) = h2(h1(k), n), but that would mean making extra copies of
+ /// h1 and h2. So instead we'll just use a tag to tell class template
+ /// hashtable to do that composition.
+ ///
+ struct default_ranged_hash{ };
+
+
+ /// prime_rehash_policy
+ ///
+ /// Default value for rehash policy. Bucket size is (usually) the
+ /// smallest prime that keeps the load factor small enough.
+ ///
+ struct EASTL_API prime_rehash_policy
+ {
+ public:
+ float mfMaxLoadFactor;
+ float mfGrowthFactor;
+ mutable uint32_t mnNextResize;
+
+ public:
+ prime_rehash_policy(float fMaxLoadFactor = 1.f)
+ : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { }
+
+ float GetMaxLoadFactor() const
+ { return mfMaxLoadFactor; }
+
+ /// Return a bucket count no greater than nBucketCountHint,
+ /// Don't update member variables while at it.
+ static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint);
+
+ /// Return a bucket count no greater than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count no smaller than nBucketCountHint.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const;
+
+ /// Return a bucket count appropriate for nElementCount elements.
+ /// This function has a side effect of updating mnNextResize.
+ uint32_t GetBucketCount(uint32_t nElementCount) const;
+
+ /// nBucketCount is current bucket count, nElementCount is current element count,
+ /// and nElementAdd is number of elements to be inserted. Do we need
+ /// to increase bucket count? If so, return pair(true, n), where
+ /// n is the new bucket count. If not, return pair(false, 0).
+ eastl::pair<bool, uint32_t>
+ GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const;
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // Base classes for hashtable. We define these base classes because
+ // in some cases we want to do different things depending on the
+ // value of a policy class. In some cases the policy class affects
+ // which member functions and nested typedefs are defined; we handle that
+ // by specializing base class templates. Several of the base class templates
+ // need to access other members of class template hashtable, so we use
+ // the "curiously recurring template pattern" (parent class is templated
+ // on type of child class) for them.
+ ///////////////////////////////////////////////////////////////////////
+
+
+ /// rehash_base
+ ///
+ /// Give hashtable the get_max_load_factor functions if the rehash
+ /// policy is prime_rehash_policy.
+ ///
+ template <typename RehashPolicy, typename Hashtable>
+ struct rehash_base { };
+
+ template <typename Hashtable>
+ struct rehash_base<prime_rehash_policy, Hashtable>
+ {
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ float get_max_load_factor() const
+ {
+ const Hashtable* const pThis = static_cast<const Hashtable*>(this);
+ return pThis->rehash_policy().GetMaxLoadFactor();
+ }
+
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ void set_max_load_factor(float fMaxLoadFactor)
+ {
+ Hashtable* const pThis = static_cast<Hashtable*>(this);
+ pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor));
+ }
+ };
+
+
+
+
+ /// hash_code_base
+ ///
+ /// Encapsulates two policy issues that aren't quite orthogonal.
+ /// (1) The difference between using a ranged hash function and using
+ /// the combination of a hash function and a range-hashing function.
+ /// In the former case we don't have such things as hash codes, so
+ /// we have a dummy type as placeholder.
+ /// (2) Whether or not we cache hash codes. Caching hash codes is
+ /// meaningless if we have a ranged hash function. This is because
+ /// a ranged hash function converts an object directly to its
+ /// bucket index without ostensibly using a hash code.
+ /// We also put the key extraction and equality comparison function
+ /// objects here, for convenience.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal,
+ typename H1, typename H2, typename H, bool bCacheHashCode>
+ struct hash_code_base;
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, no caching hash codes.
+ /// H1 and H2 are provided but ignored. We define a dummy hash code type.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
+ {
+ protected:
+ ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data.
+ Equal mEqual; // To do: Make this instance use zero space when it is zero size.
+ H mRangedHash; // To do: Make this instance use zero space when it is zero size
+
+ public:
+ H1 hash_function() const
+ { return H1(); }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef void* hash_code_t;
+ typedef uint32_t bucket_index_t;
+
+ hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h)
+ : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { }
+
+ hash_code_t get_hash_code(const Key& /* key */) const
+ { return NULL; }
+
+ bucket_index_t bucket_index(hash_code_t, uint32_t) const
+ { return (bucket_index_t)0; }
+
+ bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(key, nBucketCount); }
+
+ bucket_index_t bucket_index(const hash_node<Value, false>* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, hash_node<Value, false>* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
+ { } // Nothing to do.
+
+ void set_code(hash_node<Value, false>* /* pDest */, hash_code_t /* c */) const
+ { } // Nothing to do.
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(mRangedHash, x.mRangedHash);
+ }
+
+ }; // hash_code_base
+
+
+
+ // No specialization for ranged hash function while caching hash codes.
+ // That combination is meaningless, and trying to do it is an error.
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: ranged hash function, cache hash codes.
+ /// This combination is meaningless, so we provide only a declaration
+ /// and no definition.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// no caching of hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, false>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef ::uint32_t hash_code_t;
+ typedef ::uint32_t bucket_index_t;
+ typedef hash_node<Value, false> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, ::uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, ::uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, ::uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t, node_type* pNode) const
+ { return mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type*, const node_type*) const
+ { } // Nothing to do.
+
+ void set_code(node_type*, hash_code_t) const
+ { } // Nothing to do.
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+ /// hash_code_base
+ ///
+ /// Specialization: hash function and range-hashing function,
+ /// caching hash codes. H is provided but ignored.
+ /// Provides typedef and accessor required by TR1.
+ ///
+ template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
+ struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, true>
+ {
+ protected:
+ ExtractKey mExtractKey;
+ Equal mEqual;
+ H1 m_h1;
+ H2 m_h2;
+
+ public:
+ typedef H1 hasher;
+
+ H1 hash_function() const
+ { return m_h1; }
+
+ Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
+ { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
+
+ const Equal& key_eq() const
+ { return mEqual; }
+
+ Equal& key_eq()
+ { return mEqual; }
+
+ protected:
+ typedef uint32_t hash_code_t;
+ typedef uint32_t bucket_index_t;
+ typedef hash_node<Value, true> node_type;
+
+ hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
+ : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
+
+ hash_code_t get_hash_code(const Key& key) const
+ { return (hash_code_t)m_h1(key); }
+
+ bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2(c, nBucketCount); }
+
+ bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
+ { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); }
+
+ bool compare(const Key& key, hash_code_t c, node_type* pNode) const
+ { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); }
+
+ void copy_code(node_type* pDest, const node_type* pSource) const
+ { pDest->mnHashCode = pSource->mnHashCode; }
+
+ void set_code(node_type* pDest, hash_code_t c) const
+ { pDest->mnHashCode = c; }
+
+ void base_swap(hash_code_base& x)
+ {
+ eastl::swap(mExtractKey, x.mExtractKey);
+ eastl::swap(mEqual, x.mEqual);
+ eastl::swap(m_h1, x.m_h1);
+ eastl::swap(m_h2, x.m_h2);
+ }
+
+ }; // hash_code_base
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ /// hashtable
+ ///
+ /// Key and Value: arbitrary CopyConstructible types.
+ ///
+ /// ExtractKey: function object that takes a object of type Value
+ /// and returns a value of type Key.
+ ///
+ /// Equal: function object that takes two objects of type k and returns
+ /// a bool-like value that is true if the two objects are considered equal.
+ ///
+ /// H1: a hash function. A unary function object with argument type
+ /// Key and result type size_t. Return values should be distributed
+ /// over the entire range [0, numeric_limits<uint32_t>::max()].
+ ///
+ /// H2: a range-hashing function (in the terminology of Tavori and
+ /// Dreizin). This is a function which takes the output of H1 and
+ /// converts it to the range of [0, n]. Usually it merely takes the
+ /// output of H1 and mods it to n.
+ ///
+ /// H: a ranged hash function (Tavori and Dreizin). This is merely
+ /// a class that combines the functionality of H1 and H2 together,
+ /// possibly in some way that is somehow improved over H1 and H2
+ /// It is a binary function whose argument types are Key and size_t
+ /// and whose result type is uint32_t. Given arguments k and n, the
+ /// return value is in the range [0, n). Default: h(k, n) = h2(h1(k), n).
+ /// If H is anything other than the default, H1 and H2 are ignored,
+ /// as H is thus overriding H1 and H2.
+ ///
+ /// RehashPolicy: Policy class with three members, all of which govern
+ /// the bucket count. nBucket(n) returns a bucket count no smaller
+ /// than n. GetBucketCount(n) returns a bucket count appropriate
+ /// for an element count of n. GetRehashRequired(nBucketCount, nElementCount, nElementAdd)
+ /// determines whether, if the current bucket count is nBucket and the
+ /// current element count is nElementCount, we need to increase the bucket
+ /// count. If so, returns pair(true, n), where n is the new
+ /// bucket count. If not, returns pair(false, <anything>).
+ ///
+ /// Currently it is hard-wired that the number of buckets never
+ /// shrinks. Should we allow RehashPolicy to change that?
+ ///
+ /// bCacheHashCode: true if we store the value of the hash
+ /// function along with the value. This is a time-space tradeoff.
+ /// Storing it may improve lookup speed by reducing the number of
+ /// times we need to call the Equal function.
+ ///
+ /// bMutableIterators: true if hashtable::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const
+ /// iterators. This is true for hash_map and hash_multimap,
+ /// false for hash_set and hash_multiset.
+ ///
+ /// bUniqueKeys: true if the return value of hashtable::count(k)
+ /// is always at most one, false if it may be an arbitrary number.
+ /// This is true for hash_set and hash_map and is false for
+ /// hash_multiset and hash_multimap.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// Note:
+ /// If you want to make a hashtable never increase its bucket usage,
+ /// call set_max_load_factor with a very high value such as 100000.f.
+ ///
+ /// find_as
+ /// In order to support the ability to have a hashtable of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the hashtable key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ /// find_by_hash
+ /// In the interest of supporting fast operations wherever possible,
+ /// we provide a find_by_hash function which finds a node using its
+ /// hash code. This is useful for cases where the node's hash is
+ /// already known, allowing us to avoid a redundant hash operation
+ /// in the normal find path.
+ ///
+ template <typename Key, typename Value, typename Allocator, typename ExtractKey,
+ typename Equal, typename H1, typename H2, typename H,
+ typename RehashPolicy, bool bCacheHashCode, bool bMutableIterators, bool bUniqueKeys>
+ class hashtable
+ : public rehash_base<RehashPolicy, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> >,
+ public hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode>
+ {
+ public:
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef typename ExtractKey::result_type mapped_type;
+ typedef hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode> hash_code_base_type;
+ typedef typename hash_code_base_type::hash_code_t hash_code_t;
+ typedef Allocator allocator_type;
+ typedef Equal key_equal;
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef node_iterator<value_type, !bMutableIterators, bCacheHashCode> local_iterator;
+ typedef node_iterator<value_type, true, bCacheHashCode> const_local_iterator;
+ typedef hashtable_iterator<value_type, !bMutableIterators, bCacheHashCode> iterator;
+ typedef hashtable_iterator<value_type, true, bCacheHashCode> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef hash_node<value_type, bCacheHashCode> node_type;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type;
+ typedef hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H,
+ RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> this_type;
+ typedef RehashPolicy rehash_policy_type;
+ typedef ExtractKey extract_key_type;
+ typedef H1 h1_type;
+ typedef H2 h2_type;
+ typedef H h_type;
+
+ using hash_code_base_type::key_eq;
+ using hash_code_base_type::hash_function;
+ using hash_code_base_type::mExtractKey;
+ using hash_code_base_type::get_hash_code;
+ using hash_code_base_type::bucket_index;
+ using hash_code_base_type::compare;
+ using hash_code_base_type::set_code;
+
+ static const bool kCacheHashCode = bCacheHashCode;
+
+ enum
+ {
+ kKeyAlignment = EASTL_ALIGN_OF(key_type),
+ kKeyAlignmentOffset = 0, // To do: Make sure this really is zero for all uses of this template.
+ kValueAlignment = EASTL_ALIGN_OF(value_type),
+ kValueAlignmentOffset = 0, // To fix: This offset is zero for sets and >0 for maps. Need to fix this.
+ kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
+ };
+
+ protected:
+ node_type** mpBucketArray;
+ size_type mnBucketCount;
+ size_type mnElementCount;
+ RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away.
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
+
+ template <typename FowardIterator>
+ hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
+ const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ hashtable(const hashtable& x);
+ ~hashtable();
+
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ this_type& operator=(const this_type& x);
+
+ void swap(this_type& x);
+
+ public:
+ iterator begin()
+ {
+ iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ const_iterator begin() const
+ {
+ const_iterator i(mpBucketArray);
+ if(!i.mpNode)
+ i.increment_bucket();
+ return i;
+ }
+
+ iterator end()
+ { return iterator(mpBucketArray + mnBucketCount); }
+
+ const_iterator end() const
+ { return const_iterator(mpBucketArray + mnBucketCount); }
+
+ local_iterator begin(size_type n)
+ { return local_iterator(mpBucketArray[n]); }
+
+ local_iterator end(size_type)
+ { return local_iterator(NULL); }
+
+ const_local_iterator begin(size_type n) const
+ { return const_local_iterator(mpBucketArray[n]); }
+
+ const_local_iterator end(size_type) const
+ { return const_local_iterator(NULL); }
+
+ bool empty() const
+ { return mnElementCount == 0; }
+
+ size_type size() const
+ { return mnElementCount; }
+
+ size_type bucket_count() const
+ { return mnBucketCount; }
+
+ size_type bucket_size(size_type n) const
+ { return (size_type)eastl::distance(begin(n), end(n)); }
+
+ //size_type bucket(const key_type& k) const
+ // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); }
+
+ public:
+ float load_factor() const
+ { return (float)mnElementCount / (float)mnBucketCount; }
+
+ // Inherited from the base class.
+ // Returns the max load factor, which is the load factor beyond
+ // which we rebuild the container with a new bucket count.
+ // get_max_load_factor comes from rehash_base.
+ // float get_max_load_factor() const;
+
+ // Inherited from the base class.
+ // If you want to make the hashtable never rehash (resize),
+ // set the max load factor to be a very high number (e.g. 100000.f).
+ // set_max_load_factor comes from rehash_base.
+ // void set_max_load_factor(float fMaxLoadFactor);
+
+ /// Generalization of get_max_load_factor. This is an extension that's
+ /// not present in TR1.
+ const rehash_policy_type& rehash_policy() const
+ { return mRehashPolicy; }
+
+ /// Generalization of set_max_load_factor. This is an extension that's
+ /// not present in TR1.
+ void rehash_policy(const rehash_policy_type& rehashPolicy);
+
+ public:
+
+ insert_return_type insert(const value_type& value);
+ iterator insert(const_iterator, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ public:
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ size_type erase(const key_type& k);
+
+ void clear();
+ void clear(bool clearBuckets);
+ void reset();
+ void rehash(size_type nBucketCount);
+
+ public:
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the hashtable value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (namespaces omitted for brevity):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello"); // Use default hash and compare.
+ ///
+ /// Example usage (note that the predicate uses string as first type and char* as second):
+ /// hash_set<string> hashSet;
+ /// hashSet.find_as("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ template <typename U, typename UHash, typename BinaryPredicate>
+ iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
+
+ template <typename U, typename UHash, typename BinaryPredicate>
+ const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
+
+ template <typename U>
+ iterator find_as(const U& u);
+
+ template <typename U>
+ const_iterator find_as(const U& u) const;
+
+ /// Implements a find whereby the user supplies the node's hash code.
+ ///
+ iterator find_by_hash(hash_code_t c);
+ const_iterator find_by_hash(hash_code_t c) const;
+
+ size_type count(const key_type& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const key_type& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
+
+ public:
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoAllocateNode(const value_type& value);
+ node_type* DoAllocateNodeFromKey(const key_type& key);
+ void DoFreeNode(node_type* pNode);
+ void DoFreeNodes(node_type** pBucketArray, size_type);
+
+ node_type** DoAllocateBuckets(size_type n);
+ void DoFreeBuckets(node_type** pBucketArray, size_type n);
+
+ eastl::pair<iterator, bool> DoInsertValue(const value_type& value, true_type);
+ iterator DoInsertValue(const value_type& value, false_type);
+
+ eastl::pair<iterator, bool> DoInsertKey(const key_type& key, true_type);
+ iterator DoInsertKey(const key_type& key, false_type);
+
+ void DoRehash(size_type nBucketCount);
+ node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const;
+
+ template <typename U, typename BinaryPredicate>
+ node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const;
+
+ node_type* DoFindNode(node_type* pNode, hash_code_t c) const;
+
+ }; // class hashtable
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // node_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable_iterator_base
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator==(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode == b.mpNode; }
+
+ template <typename Value, bool bCacheHashCode>
+ inline bool operator!=(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
+ { return a.mpNode != b.mpNode; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // hashtable
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>
+ ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<RP, hashtable>(),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(ek, eq, h1, h2, h),
+ mnBucketCount(0),
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done.
+ reset();
+ else // Else we are creating a potentially non-empty hashtable...
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((::uint32_t)nBucketCount);
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename FowardIterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
+ const H1& h1, const H2& h2, const H& h,
+ const Eq& eq, const EK& ek, const allocator_type& allocator)
+ : rehash_base<rehash_policy_type, hashtable>(),
+ hash_code_base<key_type, value_type, extract_key_type, key_equal, h1_type, h2_type, h_type, kCacheHashCode>(ek, eq, h1, h2, h),
+ //mnBucketCount(0), // This gets re-assigned below.
+ mnElementCount(0),
+ mRehashPolicy(),
+ mAllocator(allocator)
+ {
+ if(nBucketCount < 2)
+ {
+ const size_type nElementCount = (size_type)eastl::ht_distance(first, last);
+ mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount);
+ }
+ else
+ {
+ EASTL_ASSERT(nBucketCount < 10000000);
+ mnBucketCount = nBucketCount;
+ }
+
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(const this_type& x)
+ : rehash_base<RP, hashtable>(x),
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
+ mnBucketCount(x.mnBucketCount),
+ mnElementCount(x.mnElementCount),
+ mRehashPolicy(x.mRehashPolicy),
+ mAllocator(x.mAllocator)
+ {
+ if(mnElementCount) // If there is anything to copy...
+ {
+ mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(size_type i = 0; i < x.mnBucketCount; ++i)
+ {
+ node_type* pNodeSource = x.mpBucketArray[i];
+ node_type** ppNodeDest = mpBucketArray + i;
+
+ while(pNodeSource)
+ {
+ *ppNodeDest = DoAllocateNode(pNodeSource->mValue);
+ this->copy_code(*ppNodeDest, pNodeSource);
+ ppNodeDest = &(*ppNodeDest)->mpNext;
+ pNodeSource = pNodeSource->mpNext;
+ }
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ throw;
+ }
+ #endif
+ }
+ else
+ {
+ // In this case, instead of allocate memory and copy nothing from x,
+ // we reset ourselves to a zero allocation state.
+ reset();
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ insert(x.begin(), x.end());
+ }
+ return *this;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::~hashtable()
+ {
+ clear();
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(const value_type& value)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(&pNode->mValue) value_type(value);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(const key_type& key)
+ {
+ node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(&pNode->mValue) value_type(key);
+ pNode->mpNext = NULL;
+ return pNode;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ throw;
+ }
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNodes(node_type** pNodeArray, size_type n)
+ {
+ for(size_type i = 0; i < n; ++i)
+ {
+ node_type* pNode = pNodeArray[i];
+ while(pNode)
+ {
+ node_type* const pTempNode = pNode;
+ pNode = pNode->mpNext;
+ DoFreeNode(pTempNode);
+ }
+ pNodeArray[i] = NULL;
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type**
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateBuckets(size_type n)
+ {
+ // We allocate one extra bucket to hold a sentinel, an arbitrary
+ // non-null pointer. Iterator increment relies on this.
+ EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray.
+ EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum.
+ node_type** const pBucketArray = (node_type**)EASTLAllocFlags(mAllocator, (n + 1) * sizeof(node_type*), kAllocFlagBuckets);
+ //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL);
+ memset(pBucketArray, 0, n * sizeof(node_type*));
+ pBucketArray[n] = reinterpret_cast<node_type*>((uintptr_t)~0);
+ return pBucketArray;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeBuckets(node_type** pBucketArray, size_type n)
+ {
+ // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test
+ // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray
+ // than another but pass a hashtable to another. So we go by the size.
+ if(n > 1)
+ EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end.
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::swap(this_type& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // We leave mAllocator as-is.
+ hash_code_base<K, V, EK, Eq, H1, H2, H, bC>::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap.
+ eastl::swap(mRehashPolicy, x.mRehashPolicy);
+ eastl::swap(mpBucketArray, x.mpBucketArray);
+ eastl::swap(mnBucketCount, x.mnBucketCount);
+ eastl::swap(mnElementCount, x.mnElementCount);
+ }
+ else
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash_policy(const rehash_policy_type& rehashPolicy)
+ {
+ mRehashPolicy = rehashPolicy;
+
+ const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount);
+
+ if(nBuckets > mnBucketCount)
+ DoRehash(nBuckets);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (::uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], other, predicate);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename UHash, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
+ {
+ const hash_code_t c = (hash_code_t)uhash(other);
+ const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], other, predicate);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+ /// hashtable_find
+ ///
+ /// Helper function that defaults to using hash<U> and equal_to_2<T, U>.
+ /// This makes it so that by default you don't need to provide these.
+ /// Note that the default hash functions may not be what you want, though.
+ ///
+ /// Example usage. Instead of this:
+ /// hash_set<string> hashSet;
+ /// hashSet.find("hello", hash<char*>(), equal_to_2<string, char*>());
+ ///
+ /// You can use this:
+ /// hash_set<string> hashSet;
+ /// hashtable_find(hashSet, "hello");
+ ///
+ template <typename H, typename U>
+ inline typename H::iterator hashtable_find(H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+ template <typename H, typename U>
+ inline typename H::const_iterator hashtable_find(const H& hashTable, U u)
+ { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other)
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other) const
+ { return eastl::hashtable_find(*this, other); }
+ // VC++ doesn't appear to like the following, though it seems correct to me.
+ // So we implement the workaround above until we can straighten this out.
+ //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_by_hash(hash_code_t c)
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+ return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_by_hash(hash_code_t c) const
+ {
+ const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
+
+ node_type* const pNode = DoFindNode(mpBucketArray[n], c);
+ return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::count(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ size_type result = 0;
+
+ // To do: Make a specialization for bU (unique keys) == true and take
+ // advantage of the fact that the count will always be zero or one in that case.
+ for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ ++result;
+ }
+ return result;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k)
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ iterator first(pNode, head);
+ iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<iterator, iterator>(first, last);
+ }
+
+ return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator,
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k) const
+ {
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ node_type** head = mpBucketArray + n;
+ node_type* pNode = DoFindNode(*head, k, c);
+
+ if(pNode)
+ {
+ node_type* p1 = pNode->mpNext;
+
+ for(; p1; p1 = p1->mpNext)
+ {
+ if(!compare(k, c, p1))
+ break;
+ }
+
+ const_iterator first(pNode, head);
+ const_iterator last(p1, head);
+
+ if(!p1)
+ last.increment_bucket();
+
+ return eastl::pair<const_iterator, const_iterator>(first, last);
+ }
+
+ return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
+ const_iterator(mpBucketArray + mnBucketCount));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(compare(k, c, pNode))
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename U, typename BinaryPredicate>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg.
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, hash_code_t c) const
+ {
+ for(; pNode; pNode = pNode->mpNext)
+ {
+ if(pNode->mnHashCode == c)
+ return pNode;
+ }
+ return NULL;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(const value_type& value, true_type) // true_type means bUniqueKeys is true.
+ {
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+ size_type n = (size_type)bucket_index(k, c, (::uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, ::uint32_t> bRehash = mRehashPolicy.GetRehashRequired((::uint32_t)mnBucketCount, (::uint32_t)mnElementCount, (::uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNode(value);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(k, c, (::uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(const value_type& value, false_type) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const key_type& k = mExtractKey(value);
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNode(value);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ eastl::pair<typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator, bool>
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(const key_type& key, true_type) // true_type means bUniqueKeys is true.
+ {
+ const hash_code_t c = get_hash_code(key);
+ size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+ node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNode == NULL)
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ // Allocate the new node before doing the rehash so that we don't
+ // do a rehash if the allocation throws.
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ if(bRehash.first)
+ {
+ n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
+ DoRehash(bRehash.second);
+ }
+
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ ++mnElementCount;
+
+ return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNodeNew);
+ throw;
+ }
+ #endif
+ }
+
+ return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(const key_type& key, false_type) // false_type means bUniqueKeys is false.
+ {
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ const hash_code_t c = get_hash_code(key);
+ const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
+
+ node_type* const pNodeNew = DoAllocateNodeFromKey(key);
+ set_code(pNodeNew, c); // This is a no-op for most hashtables.
+
+ // To consider: Possibly make this insertion not make equal elements contiguous.
+ // As it stands now, we insert equal values contiguously in the hashtable.
+ // The benefit is that equal_range can work in a sensible manner and that
+ // erase(value) can more quickly find equal values. The downside is that
+ // this insertion operation taking some extra time. How important is it to
+ // us that equal_range span all equal items?
+ node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
+
+ if(pNodePrev == NULL)
+ {
+ EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
+ pNodeNew->mpNext = mpBucketArray[n];
+ mpBucketArray[n] = pNodeNew;
+ }
+ else
+ {
+ pNodeNew->mpNext = pNodePrev->mpNext;
+ pNodePrev->mpNext = pNodeNew;
+ }
+
+ ++mnElementCount;
+
+ return iterator(pNodeNew, mpBucketArray + n);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const value_type& value)
+ {
+ return DoInsertValue(value, integral_constant<bool, bU>());
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, const value_type& value)
+ {
+ // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers.
+
+ #ifdef __MWERKS__ // The Metrowerks compiler has a bug.
+ insert_return_type result = insert(value);
+ return result.first; // Note by Paul Pedriana while perusing this code: This code will fail to compile when bU is false (i.e. for multiset, multimap).
+
+ #elif defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ EASTL_ASSERT(empty()); // This function cannot return the correct return value on GCC 2.x. Unless, that is, the container is empty.
+ DoInsertValue(value, integral_constant<bool, bU>());
+ return begin(); // This is the wrong answer.
+ #else
+ insert_return_type result = DoInsertValue(value, integral_constant<bool, bU>());
+ return result.first; // Note by Paul Pedriana while perusing this code: This code will fail to compile when bU is false (i.e. for multiset, multimap).
+ #endif
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ template <typename InputIterator>
+ void
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last);
+ const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd);
+
+ if(bRehash.first)
+ DoRehash(bRehash.second);
+
+ for(; first != last; ++first)
+ {
+ #ifdef __MWERKS__ // The Metrowerks compiler has a bug.
+ insert(*first);
+ #else
+ DoInsertValue(*first, integral_constant<bool, bU>());
+ #endif
+ }
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(iterator i)
+ {
+ iterator iNext(i);
+ ++iNext;
+
+ node_type* pNode = i.mpNode;
+ node_type* pNodeCurrent = *i.mpBucket;
+
+ if(pNodeCurrent == pNode)
+ *i.mpBucket = pNodeCurrent->mpNext;
+ else
+ {
+ // We have a singly-linked list, so we have no choice but to
+ // walk down it till we find the node before the node at 'i'.
+ node_type* pNodeNext = pNodeCurrent->mpNext;
+
+ while(pNodeNext != pNode)
+ {
+ pNodeCurrent = pNodeNext;
+ pNodeNext = pNodeCurrent->mpNext;
+ }
+
+ pNodeCurrent->mpNext = pNodeNext->mpNext;
+ }
+
+ DoFreeNode(pNode);
+ --mnElementCount;
+
+ return iNext;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(iterator first, iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return first;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reverse_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reverse_iterator
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
+ hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const key_type& k)
+ {
+ // To do: Reimplement this function to do a single loop and not try to be
+ // smart about element contiguity. The mechanism here is only a benefit if the
+ // buckets are heavily overloaded; otherwise this mechanism may be slightly slower.
+
+ const hash_code_t c = get_hash_code(k);
+ const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
+ const size_type nElementCountSaved = mnElementCount;
+
+ node_type** pBucketArray = mpBucketArray + n;
+
+ while(*pBucketArray && !compare(k, c, *pBucketArray))
+ pBucketArray = &(*pBucketArray)->mpNext;
+
+ while(*pBucketArray && compare(k, c, *pBucketArray))
+ {
+ node_type* const pNode = *pBucketArray;
+ *pBucketArray = pNode->mpNext;
+ DoFreeNode(pNode);
+ --mnElementCount;
+ }
+
+ return nElementCountSaved - mnElementCount;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear()
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear(bool clearBuckets)
+ {
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ if(clearBuckets)
+ {
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ reset();
+ }
+ mnElementCount = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mnBucketCount = 1;
+
+ #ifdef _MSC_VER
+ mpBucketArray = (node_type**)&gpEmptyBucketArray[0];
+ #else
+ void* p = &gpEmptyBucketArray[0];
+ memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe.
+ #endif
+
+ mnElementCount = 0;
+ mRehashPolicy.mnNextResize = 0;
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash(size_type nBucketCount)
+ {
+ // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it
+ // up to the next prime number. We leave it at the user's discretion to do such a thing.
+ DoRehash(nBucketCount);
+ }
+
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoRehash(size_type nNewBucketCount)
+ {
+ node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2.
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ node_type* pNode;
+
+ for(size_type i = 0; i < mnBucketCount; ++i)
+ {
+ while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings.
+ {
+ const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (::uint32_t)nNewBucketCount);
+
+ mpBucketArray[i] = pNode->mpNext;
+ pNode->mpNext = pBucketArray[nNewBucketIndex];
+ pBucketArray[nNewBucketIndex] = pNode;
+ }
+ }
+
+ DoFreeBuckets(mpBucketArray, mnBucketCount);
+ mnBucketCount = nNewBucketCount;
+ mpBucketArray = pBucketArray;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ // A failure here means that a hash function threw an exception.
+ // We can't restore the previous state without calling the hash
+ // function again, so the only sensible recovery is to delete everything.
+ DoFreeNodes(pBucketArray, nNewBucketCount);
+ DoFreeBuckets(pBucketArray, nNewBucketCount);
+ DoFreeNodes(mpBucketArray, mnBucketCount);
+ mnElementCount = 0;
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate() const
+ {
+ // Verify our empty bucket array is unmodified.
+ if(gpEmptyBucketArray[0] != NULL)
+ return false;
+
+ if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0))
+ return false;
+
+ // Verify that we have at least one bucket. Calculations can
+ // trigger division by zero exceptions otherwise.
+ if(mnBucketCount == 0)
+ return false;
+
+ // Verify that gpEmptyBucketArray is used correctly.
+ // gpEmptyBucketArray is only used when initially empty.
+ if((void**)mpBucketArray == &gpEmptyBucketArray[0])
+ {
+ if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables.
+ return false;
+
+ if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1.
+ return false;
+ }
+ else
+ {
+ if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray.
+ return false;
+ }
+
+ // Verify that the element count matches mnElementCount.
+ size_type nElementCount = 0;
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ ++nElementCount;
+
+ if(nElementCount != mnElementCount)
+ return false;
+
+ // To do: Verify that individual elements are in the expected buckets.
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ int hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator==(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ // Comparing hash tables for less-ness is an odd thing to do. We provide it for
+ // completeness, though the user is advised to be wary of how they use this.
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ // This requires hash table elements to support operator<. Since the hash table
+ // doesn't compare elements via less (it does so via equals), we must use the
+ // globally defined operator less for the elements.
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator!=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator<=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline bool operator>=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename EK, typename Eq,
+ typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
+ inline void swap(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
+ const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/red_black_tree.h b/UnknownVersion/include/EASTL/internal/red_black_tree.h
new file mode 100644
index 0000000..ae09bd6
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/red_black_tree.h
@@ -0,0 +1,1917 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/red_black_tree.h
+// Written by Paul Pedriana 2005.
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_RED_BLACK_TREE_H
+#define EASTL_RED_BLACK_TREE_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/utility.h>
+#include <EASTL/algorithm.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #include <new>
+ #include <stddef.h>
+ #pragma warning(pop)
+#else
+ #include <new>
+ #include <stddef.h>
+#endif
+
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated
+ #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_RBTREE_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_NAME
+ #define EASTL_RBTREE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " rbtree" // Unless the user overrides something, this is "EASTL rbtree".
+ #endif
+
+
+ /// EASTL_RBTREE_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_RBTREE_DEFAULT_ALLOCATOR
+ #define EASTL_RBTREE_DEFAULT_ALLOCATOR allocator_type(EASTL_RBTREE_DEFAULT_NAME)
+ #endif
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeColor
+ {
+ kRBTreeColorRed,
+ kRBTreeColorBlack
+ };
+
+
+
+ /// RBTreeColor
+ ///
+ enum RBTreeSide
+ {
+ kRBTreeSideLeft,
+ kRBTreeSideRight
+ };
+
+
+
+ /// rbtree_node_base
+ ///
+ /// We define a rbtree_node_base separately from rbtree_node (below), because it
+ /// allows us to have non-templated operations, and it makes it so that the
+ /// rbtree anchor node doesn't carry a T with it, which would waste space and
+ /// possibly lead to surprising the user due to extra Ts existing that the user
+ /// didn't explicitly create. The downside to all of this is that it makes debug
+ /// viewing of an rbtree harder, given that the node pointers are of type
+ /// rbtree_node_base and not rbtree_node.
+ ///
+ struct rbtree_node_base
+ {
+ typedef rbtree_node_base this_type;
+
+ public:
+ this_type* mpNodeRight; // Declared first because it is used most often.
+ this_type* mpNodeLeft;
+ this_type* mpNodeParent;
+ char mColor; // We only need one bit here, would be nice if we could stuff that bit somewhere else.
+ };
+
+
+ /// rbtree_node
+ ///
+ template <typename Value>
+ struct rbtree_node : public rbtree_node_base
+ {
+ Value mValue; // For set and multiset, this is the user's value, for map and multimap, this is a pair of key/value.
+ };
+
+
+
+
+ // rbtree_node_base functions
+ //
+ // These are the fundamental functions that we use to maintain the
+ // tree. The bulk of the work of the tree maintenance is done in
+ // these functions.
+ //
+ EASTL_API rbtree_node_base* RBTreeIncrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeDecrement (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMinChild (const rbtree_node_base* pNode);
+ EASTL_API rbtree_node_base* RBTreeGetMaxChild (const rbtree_node_base* pNode);
+ EASTL_API size_t RBTreeGetBlackCount(const rbtree_node_base* pNodeTop,
+ const rbtree_node_base* pNodeBottom);
+ EASTL_API void RBTreeInsert ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeParent,
+ rbtree_node_base* pNodeAnchor,
+ RBTreeSide insertionSide);
+ EASTL_API void RBTreeErase ( rbtree_node_base* pNode,
+ rbtree_node_base* pNodeAnchor);
+
+
+
+
+
+
+
+ /// rbtree_iterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct rbtree_iterator
+ {
+ typedef rbtree_iterator<T, Pointer, Reference> this_type;
+ typedef rbtree_iterator<T, T*, T&> iterator;
+ typedef rbtree_iterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef rbtree_node_base base_node_type;
+ typedef rbtree_node<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ rbtree_iterator();
+ explicit rbtree_iterator(const node_type* pNode);
+ rbtree_iterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ rbtree_iterator& operator++();
+ rbtree_iterator operator++(int);
+
+ rbtree_iterator& operator--();
+ rbtree_iterator operator--(int);
+
+ }; // rbtree_iterator
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // rb_base
+ //
+ // This class allows us to use a generic rbtree as the basis of map, multimap,
+ // set, and multiset transparently. The vital template parameters for this are
+ // the ExtractKey and the bUniqueKeys parameters.
+ //
+ // If the rbtree has a value type of the form pair<T1, T2> (i.e. it is a map or
+ // multimap and not a set or multiset) and a key extraction policy that returns
+ // the first part of the pair, the rbtree gets a mapped_type typedef.
+ // If it satisfies those criteria and also has unique keys, then it also gets an
+ // operator[] (which only map and set have and multimap and multiset don't have).
+ //
+ ///////////////////////////////////////////////////////////////////////////////
+
+
+
+ /// rb_base
+ /// This specialization is used for 'set'. In this case, Key and Value
+ /// will be the same as each other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, bool bUniqueKeys, typename RBTree>
+ struct rb_base
+ {
+ typedef ExtractKey extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This class is used for 'multiset'.
+ /// In this case, Key and Value will be the same as each
+ /// other and ExtractKey will be eastl::use_self.
+ ///
+ template <typename Key, typename Value, typename Compare, typename ExtractKey, typename RBTree>
+ struct rb_base<Key, Value, Compare, ExtractKey, false, RBTree>
+ {
+ typedef ExtractKey extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'map'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, true, RBTree>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+ /// rb_base
+ /// This specialization is used for 'multimap'.
+ ///
+ template <typename Key, typename Pair, typename Compare, typename RBTree>
+ struct rb_base<Key, Pair, Compare, eastl::use_first<Pair>, false, RBTree>
+ {
+ typedef eastl::use_first<Pair> extract_key;
+
+ public:
+ Compare mCompare; // To do: Make sure that empty Compare classes go away via empty base optimizations.
+
+ public:
+ rb_base() : mCompare() {}
+ rb_base(const Compare& compare) : mCompare(compare) {}
+ };
+
+
+
+
+
+ /// rbtree
+ ///
+ /// rbtree is the red-black tree basis for the map, multimap, set, and multiset
+ /// containers. Just about all the work of those containers is done here, and
+ /// they are merely a shell which sets template policies that govern the code
+ /// generation for this rbtree.
+ ///
+ /// This rbtree implementation is pretty much the same as all other modern
+ /// rbtree implementations, as the topic is well known and researched. We may
+ /// choose to implement a "relaxed balancing" option at some point in the
+ /// future if it is deemed worthwhile. Most rbtree implementations don't do this.
+ ///
+ /// The primary rbtree member variable is mAnchor, which is a node_type and
+ /// acts as the end node. However, like any other node, it has mpNodeLeft,
+ /// mpNodeRight, and mpNodeParent members. We do the conventional trick of
+ /// assigning begin() (left-most rbtree node) to mpNodeLeft, assigning
+ /// 'end() - 1' (a.k.a. rbegin()) to mpNodeRight, and assigning the tree root
+ /// node to mpNodeParent.
+ ///
+ /// Compare (functor): This is a comparison class which defaults to 'less'.
+ /// It is a common STL thing which takes two arguments and returns true if
+ /// the first is less than the second.
+ ///
+ /// ExtractKey (functor): This is a class which gets the key from a stored
+ /// node. With map and set, the node is a pair, whereas with set and multiset
+ /// the node is just the value. ExtractKey will be either eastl::use_first (map and multimap)
+ /// or eastl::use_self (set and multiset).
+ ///
+ /// bMutableIterators (bool): true if rbtree::iterator is a mutable
+ /// iterator, false if iterator and const_iterator are both const iterators.
+ /// It will be true for map and multimap and false for set and multiset.
+ ///
+ /// bUniqueKeys (bool): true if the keys are to be unique, and false if there
+ /// can be multiple instances of a given key. It will be true for set and map
+ /// and false for multiset and multimap.
+ ///
+ /// To consider: Add an option for relaxed tree balancing. This could result
+ /// in performance improvements but would require a more complicated implementation.
+ ///
+ ///////////////////////////////////////////////////////////////////////
+ /// find_as
+ /// In order to support the ability to have a tree of strings but
+ /// be able to do efficiently lookups via char pointers (i.e. so they
+ /// aren't converted to string objects), we provide the find_as
+ /// function. This function allows you to do a find with a key of a
+ /// type other than the tree's key type. See the find_as function
+ /// for more documentation on this.
+ ///
+ template <typename Key, typename Value, typename Compare, typename Allocator,
+ typename ExtractKey, bool bMutableIterators, bool bUniqueKeys>
+ class rbtree
+ : public rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys,
+ rbtree<Key, Value, Compare, Allocator, ExtractKey, bMutableIterators, bUniqueKeys> >
+ {
+ public:
+ typedef ptrdiff_t difference_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef Key key_type;
+ typedef Value value_type;
+ typedef rbtree_node<value_type> node_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef typename type_select<bMutableIterators,
+ rbtree_iterator<value_type, value_type*, value_type&>,
+ rbtree_iterator<value_type, const value_type*, const value_type&> >::type iterator;
+ typedef rbtree_iterator<value_type, const value_type*, const value_type&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ typedef Allocator allocator_type;
+ typedef Compare key_compare;
+ typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type; // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ typedef rbtree<Key, Value, Compare, Allocator,
+ ExtractKey, bMutableIterators, bUniqueKeys> this_type;
+ typedef rb_base<Key, Value, Compare, ExtractKey, bUniqueKeys, this_type> base_type;
+ typedef integral_constant<bool, bUniqueKeys> has_unique_keys_type;
+ typedef typename base_type::extract_key extract_key;
+
+ using base_type::mCompare;
+
+ enum
+ {
+ kKeyAlignment = EASTL_ALIGN_OF(key_type),
+ kKeyAlignmentOffset = 0, // To do: Make sure this really is zero for all uses of this template.
+ kValueAlignment = EASTL_ALIGN_OF(value_type),
+ kValueAlignmentOffset = 0 // To fix: This offset is zero for sets and >0 for maps. Need to fix this.
+ };
+
+ public:
+ rbtree_node_base mAnchor; /// This node acts as end() and its mpLeft points to begin(), and mpRight points to rbegin() (the last node on the right).
+ size_type mnSize; /// Stores the count of nodes in the tree (not counting the anchor node).
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ // ctor/dtor
+ rbtree();
+ rbtree(const allocator_type& allocator);
+ rbtree(const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+ rbtree(const this_type& x);
+
+ template <typename InputIterator>
+ rbtree(InputIterator first, InputIterator last, const Compare& compare, const allocator_type& allocator = EASTL_RBTREE_DEFAULT_ALLOCATOR);
+
+ ~rbtree();
+
+ public:
+ // properties
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ const key_compare& key_comp() const { return mCompare; }
+ key_compare& key_comp() { return mCompare; }
+
+ this_type& operator=(const this_type& x);
+
+ void swap(this_type& x);
+
+ public:
+ // iterators
+ iterator begin();
+ const_iterator begin() const;
+ iterator end();
+ const_iterator end() const;
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+
+ public:
+ bool empty() const;
+ size_type size() const;
+
+ /// map::insert and set::insert return a pair, while multimap::insert and
+ /// multiset::insert return an iterator.
+ insert_return_type insert(const value_type& value);
+
+ // C++ standard: inserts value if and only if there is no element with
+ // key equivalent to the key of t in containers with unique keys; always
+ // inserts value in containers with equivalent keys. Always returns the
+ // iterator pointing to the element with key equivalent to the key of value.
+ // iterator position is a hint pointing to where the insert should start
+ // to search. However, there is a potential defect/improvement report on this behaviour:
+ // LWG issue #233 (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1780.html)
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ iterator insert(iterator position, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last);
+
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ // For some reason, multiple STL versions make a specialization
+ // for erasing an array of key_types. I'm pretty sure we don't
+ // need this, but just to be safe we will follow suit.
+ // The implementation is trivial. Returns void because the values
+ // could well be randomly distributed throughout the tree and thus
+ // a return value would be nearly meaningless.
+ void erase(const key_type* first, const key_type* last);
+
+ void clear();
+ void reset();
+
+ iterator find(const key_type& key);
+ const_iterator find(const key_type& key) const;
+
+ /// Implements a find whereby the user supplies a comparison of a different type
+ /// than the tree's value_type. A useful case of this is one whereby you have
+ /// a container of string objects but want to do searches via passing in char pointers.
+ /// The problem is that without this kind of find, you need to do the expensive operation
+ /// of converting the char pointer to a string so it can be used as the argument to the
+ /// find function.
+ ///
+ /// Example usage (note that the compare uses string as first type and char* as second):
+ /// set<string> strings;
+ /// strings.find_as("hello", less_2<string, const char*>());
+ ///
+ template <typename U, typename Compare2>
+ iterator find_as(const U& u, Compare2 compare2);
+
+ template <typename U, typename Compare2>
+ const_iterator find_as(const U& u, Compare2 compare2) const;
+
+ iterator lower_bound(const key_type& key);
+ const_iterator lower_bound(const key_type& key) const;
+
+ iterator upper_bound(const key_type& key);
+ const_iterator upper_bound(const key_type& key) const;
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ node_type* DoCreateNodeFromKey(const key_type& key);
+ node_type* DoCreateNode(const value_type& value);
+ node_type* DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent);
+
+ node_type* DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest);
+ void DoNukeSubtree(node_type* pNode);
+
+ // Intentionally return a pair and not an iterator for DoInsertValue(..., true_type)
+ // This is because the C++ standard for map and set is to return a pair and not just an iterator.
+ eastl::pair<iterator, bool> DoInsertValue(const value_type& value, true_type); // true_type means keys are unique.
+ iterator DoInsertValue(const value_type& value, false_type); // false_type means keys are not unique.
+
+ eastl::pair<iterator, bool> DoInsertKey(const key_type& key, true_type);
+ iterator DoInsertKey(const key_type& key, false_type);
+
+ iterator DoInsertValue(iterator position, const value_type& value, true_type);
+ iterator DoInsertValue(iterator position, const value_type& value, false_type);
+
+ iterator DoInsertKey(iterator position, const key_type& key, true_type);
+ iterator DoInsertKey(iterator position, const key_type& key, false_type);
+
+ iterator DoInsertValueImpl(node_type* pNodeParent, const value_type& value, bool bForceToLeft);
+ iterator DoInsertKeyImpl(node_type* pNodeParent, const key_type& key, bool bForceToLeft);
+
+ }; // rbtree
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_node_base functions
+ ///////////////////////////////////////////////////////////////////////
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMinChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeLeft)
+ pNodeBase = pNodeBase->mpNodeLeft;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ EASTL_API inline rbtree_node_base* RBTreeGetMaxChild(const rbtree_node_base* pNodeBase)
+ {
+ while(pNodeBase->mpNodeRight)
+ pNodeBase = pNodeBase->mpNodeRight;
+ return const_cast<rbtree_node_base*>(pNodeBase);
+ }
+
+ // The rest of the functions are non-trivial and are found in
+ // the corresponding .cpp file to this file.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree_iterator functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator()
+ : mpNode(NULL) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const node_type* pNode)
+ : mpNode(static_cast<node_type*>(const_cast<node_type*>(pNode))) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ rbtree_iterator<T, Pointer, Reference>::rbtree_iterator(const iterator& x)
+ : mpNode(x.mpNode) { }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::reference
+ rbtree_iterator<T, Pointer, Reference>::operator*() const
+ { return mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::pointer
+ rbtree_iterator<T, Pointer, Reference>::operator->() const
+ { return &mpNode->mValue; }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeIncrement(mpNode));
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type&
+ rbtree_iterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ typename rbtree_iterator<T, Pointer, Reference>::this_type
+ rbtree_iterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(RBTreeDecrement(mpNode));
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const rbtree_iterator<T, PointerA, ReferenceA>& a,
+ const rbtree_iterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const rbtree_iterator<T, Pointer, Reference>& a,
+ const rbtree_iterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rbtree functions
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree()
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(EASTL_RBTREE_DEFAULT_NAME)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const allocator_type& allocator)
+ : mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(const this_type& x)
+ : base_type(x.mCompare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(x.mAllocator)
+ {
+ reset();
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ inline rbtree<K, V, C, A, E, bM, bU>::rbtree(InputIterator first, InputIterator last, const C& compare, const allocator_type& allocator)
+ : base_type(compare),
+ mAnchor(),
+ mnSize(0),
+ mAllocator(allocator)
+ {
+ reset();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ for(; first != last; ++first)
+ insert(*first);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ clear();
+ throw;
+ }
+ #endif
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline rbtree<K, V, C, A, E, bM, bU>::~rbtree()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::allocator_type&
+ rbtree<K, V, C, A, E, bM, bU>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::size_type
+ rbtree<K, V, C, A, E, bM, bU>::size() const
+ { return mnSize; }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline bool rbtree<K, V, C, A, E, bM, bU>::empty() const
+ { return (mnSize == 0); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin()
+ { return iterator(static_cast<node_type*>(mAnchor.mpNodeLeft)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::begin() const
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(mAnchor.mpNodeLeft))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::end()
+ { return iterator(static_cast<node_type*>(&mAnchor)); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::end() const
+ { return const_iterator(static_cast<node_type*>(const_cast<rbtree_node_base*>(&mAnchor))); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin()
+ { return reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rbegin() const
+ { return const_reverse_iterator(end()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend()
+ { return reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::rend() const
+ { return const_reverse_iterator(begin()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::this_type&
+ rbtree<K, V, C, A, E, bM, bU>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ clear();
+
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ base_type::mCompare = x.mCompare;
+
+ if(x.mAnchor.mpNodeParent) // mAnchor.mpNodeParent is the rb_tree root node.
+ {
+ mAnchor.mpNodeParent = DoCopySubtree((const node_type*)x.mAnchor.mpNodeParent, (node_type*)&mAnchor);
+ mAnchor.mpNodeRight = RBTreeGetMaxChild(mAnchor.mpNodeParent);
+ mAnchor.mpNodeLeft = RBTreeGetMinChild(mAnchor.mpNodeParent);
+ mnSize = x.mnSize;
+ }
+ }
+ return *this;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::swap(this_type& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // Most of our members can be exchaged by a basic swap:
+ // We leave mAllocator as-is.
+ eastl::swap(mnSize, x.mnSize);
+ eastl::swap(base_type::mCompare, x.mCompare);
+
+ // However, because our anchor node is a part of our class instance and not
+ // dynamically allocated, we can't do a swap of it but must do a more elaborate
+ // procedure. This is the downside to having the mAnchor be like this, but
+ // otherwise we consider it a good idea to avoid allocating memory for a
+ // nominal container instance.
+
+ // We optimize for the expected most common case: both pointers being non-null.
+ if(mAnchor.mpNodeParent && x.mAnchor.mpNodeParent) // If both pointers are non-null...
+ {
+ eastl::swap(mAnchor.mpNodeRight, x.mAnchor.mpNodeRight);
+ eastl::swap(mAnchor.mpNodeLeft, x.mAnchor.mpNodeLeft);
+ eastl::swap(mAnchor.mpNodeParent, x.mAnchor.mpNodeParent);
+
+ // We need to fix up the anchors to point to themselves (we can't just swap them).
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+ }
+ else if(mAnchor.mpNodeParent)
+ {
+ x.mAnchor.mpNodeRight = mAnchor.mpNodeRight;
+ x.mAnchor.mpNodeLeft = mAnchor.mpNodeLeft;
+ x.mAnchor.mpNodeParent = mAnchor.mpNodeParent;
+ x.mAnchor.mpNodeParent->mpNodeParent = &x.mAnchor;
+
+ // We need to fix up our anchor to point it itself (we can't have it swap with x).
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ }
+ else if(x.mAnchor.mpNodeParent)
+ {
+ mAnchor.mpNodeRight = x.mAnchor.mpNodeRight;
+ mAnchor.mpNodeLeft = x.mAnchor.mpNodeLeft;
+ mAnchor.mpNodeParent = x.mAnchor.mpNodeParent;
+ mAnchor.mpNodeParent->mpNodeParent = &mAnchor;
+
+ // We need to fix up x's anchor to point it itself (we can't have it swap with us).
+ x.mAnchor.mpNodeRight = &x.mAnchor;
+ x.mAnchor.mpNodeLeft = &x.mAnchor;
+ x.mAnchor.mpNodeParent = NULL;
+ } // Else both are NULL and there is nothing to do.
+ }
+ else
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::insert_return_type // map/set::insert return a pair, multimap/multiset::iterator return an iterator.
+ rbtree<K, V, C, A, E, bM, bU>::insert(const value_type& value)
+ { return DoInsertValue(value, has_unique_keys_type()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::insert(iterator position, const value_type& value)
+ { return DoInsertValue(position, value, has_unique_keys_type()); }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(const value_type& value, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ // Note that we return a pair and not an iterator. This is because the C++ standard for map
+ // and set is to return a pair and not just an iterator.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = mCompare(extractKey(value), extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), extractKey(value))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ const iterator itResult(DoInsertValueImpl(pLowerBound, value, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(mCompare(extractKey(pLowerBound->mValue), extractKey(value))) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ const iterator itResult(DoInsertValueImpl(pParent, value, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ return pair<iterator, bool>(iterator(pLowerBound), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(const value_type& value, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(mCompare(extractKey(value), extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), extractKey(value))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return DoInsertValueImpl(pRangeEnd, value, false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ eastl::pair<typename rbtree<K, V, C, A, E, bM, bU>::iterator, bool>
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(const key_type& key, true_type) // true_type means keys are unique.
+ {
+ // This code is essentially a slightly modified copy of the the rbtree::insert
+ // function whereby this version takes a key and not a full value_type.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pLowerBound = (node_type*)&mAnchor; // Set it to the container end for now.
+ node_type* pParent; // This will be where we insert the new node.
+
+ bool bValueLessThanNode = true; // If the tree is empty, this will result in an insertion at the front.
+
+ // Find insertion position of the value. This will either be a position which
+ // already contains the value, a position which is greater than the value or
+ // end(), which we treat like a position which is greater than the value.
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ bValueLessThanNode = mCompare(key, extractKey(pCurrent->mValue));
+ pLowerBound = pCurrent;
+
+ if(bValueLessThanNode)
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ pParent = pLowerBound; // pLowerBound is actually upper bound right now (i.e. it is > value instead of <=), but we will make it the lower bound below.
+
+ if(bValueLessThanNode) // If we ended up on the left side of the last parent node...
+ {
+ if(EASTL_LIKELY(pLowerBound != (node_type*)mAnchor.mpNodeLeft)) // If the tree was empty or if we otherwise need to insert at the very front of the tree...
+ {
+ // At this point, pLowerBound points to a node which is > than value.
+ // Move it back by one, so that it points to a node which is <= value.
+ pLowerBound = (node_type*)RBTreeDecrement(pLowerBound);
+ }
+ else
+ {
+ const iterator itResult(DoInsertKeyImpl(pLowerBound, key, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+ }
+
+ // Since here we require values to be unique, we will do nothing if the value already exists.
+ if(mCompare(extractKey(pLowerBound->mValue), key)) // If the node is < the value (i.e. if value is >= the node)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pLowerBound->mValue))); // Validate that the compare function is sane.
+ const iterator itResult(DoInsertKeyImpl(pParent, key, false));
+ return pair<iterator, bool>(itResult, true);
+ }
+
+ // The item already exists (as found by the compare directly above), so return false.
+ return pair<iterator, bool>(iterator(pLowerBound), false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(const key_type& key, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+ extract_key extractKey;
+
+ while(pCurrent)
+ {
+ pRangeEnd = pCurrent;
+
+ if(mCompare(key, extractKey(pCurrent->mValue)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return DoInsertKeyImpl(pRangeEnd, key, false);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(iterator position, const value_type& value, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = mCompare(extractKey(position.mpNode->mValue), extractKey(value));
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = mCompare(extractKey(value), extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // if (value < *itNext)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(itNext.mpNode->mValue), extractKey(value))); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ return DoInsertValueImpl(itNext.mpNode, value, true);
+ return DoInsertValueImpl(position.mpNode, value, false);
+ }
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()).first;
+ }
+
+ if(mnSize && mCompare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), extractKey(value)))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(value), extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ return DoInsertValueImpl((node_type*)mAnchor.mpNodeRight, value, false);
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValue(iterator position, const value_type& value, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+
+ if(!mCompare(extractKey(value), extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !mCompare(extractKey(itNext.mpNode->mValue), extractKey(value))) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ return DoInsertValueImpl(itNext.mpNode, value, true); // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return DoInsertValueImpl(position.mpNode, value, false);
+ }
+
+ return DoInsertValue(value, has_unique_keys_type()); // If the above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+
+ if(mnSize && !mCompare(extractKey(value), extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ return DoInsertValueImpl((node_type*)mAnchor.mpNodeRight, value, false); // Insert after the last node (doesn't matter if we force left or not).
+
+ return DoInsertValue(value, has_unique_keys_type()); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(iterator position, const key_type& key, true_type) // true_type means keys are unique.
+ {
+ // This is the pathway for insertion of unique keys (map and set, but not multimap and multiset).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ const bool bPositionLessThanValue = mCompare(extractKey(position.mpNode->mValue), key);
+
+ if(bPositionLessThanValue) // If (value > *position)...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(position.mpNode->mValue))); // Validate that the compare function is sane.
+
+ const bool bValueLessThanNext = mCompare(key, extractKey(itNext.mpNode->mValue));
+
+ if(bValueLessThanNext) // If value < *itNext...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(itNext.mpNode->mValue), key)); // Validate that the compare function is sane.
+
+ if(position.mpNode->mpNodeRight)
+ return DoInsertKeyImpl(itNext.mpNode, key, true);
+ return DoInsertKeyImpl(position.mpNode, key, false);
+ }
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()).first;
+ }
+
+ if(mnSize && mCompare(extractKey(((node_type*)mAnchor.mpNodeRight)->mValue), key))
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))); // Validate that the compare function is sane.
+ return DoInsertKeyImpl((node_type*)mAnchor.mpNodeRight, key, false);
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()).first;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKey(iterator position, const key_type& key, false_type) // false_type means keys are not unique.
+ {
+ // This is the pathway for insertion of non-unique keys (multimap and multiset, but not map and set).
+ //
+ // We follow the same approach as SGI STL/STLPort and use the position as
+ // a forced insertion position for the value when possible.
+ extract_key extractKey;
+
+ if((position.mpNode != mAnchor.mpNodeRight) && (position.mpNode != &mAnchor)) // If the user specified a specific insertion position...
+ {
+ iterator itNext(position);
+ ++itNext;
+
+ // To consider: Change this so that 'position' specifies the position after
+ // where the insertion goes and not the position before where the insertion goes.
+ // Doing so would make this more in line with user expectations and with LWG #233.
+ if(!mCompare(key, extractKey(position.mpNode->mValue)) && // If value >= *position &&
+ !mCompare(extractKey(itNext.mpNode->mValue), key)) // if value <= *itNext...
+ {
+ if(position.mpNode->mpNodeRight) // If there are any nodes to the right... [this expression will always be true as long as we aren't at the end()]
+ return DoInsertKeyImpl(itNext.mpNode, key, true); // Specifically insert in front of (to the left of) itNext (and thus after 'position').
+ return DoInsertKeyImpl(position.mpNode, key, false);
+ }
+
+ return DoInsertKey(key, has_unique_keys_type()); // If the above specified hint was not useful, then we do a regular insertion.
+ }
+
+ // This pathway shouldn't be commonly executed, as the user shouldn't be calling
+ // this hinted version of insert if the user isn't providing a useful hint.
+ if(mnSize && !mCompare(key, extractKey(((node_type*)mAnchor.mpNodeRight)->mValue))) // If we are non-empty and the value is >= the last node...
+ return DoInsertKeyImpl((node_type*)mAnchor.mpNodeRight, key, false); // Insert after the last node (doesn't matter if we force left or not).
+
+ return DoInsertKey(key, has_unique_keys_type()); // We are empty or we are inserting at the end.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertValueImpl(node_type* pNodeParent, const value_type& value, bool bForceToLeft)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || mCompare(extractKey(value), extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNode(value); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::DoInsertKeyImpl(node_type* pNodeParent, const key_type& key, bool bForceToLeft)
+ {
+ RBTreeSide side;
+ extract_key extractKey;
+
+ // The reason we may want to have bForceToLeft == true is that pNodeParent->mValue and value may be equal.
+ // In that case it doesn't matter what side we insert on, except that the C++ LWG #233 improvement report
+ // suggests that we should use the insert hint position to force an ordering. So that's what we do.
+ if(bForceToLeft || (pNodeParent == &mAnchor) || mCompare(key, extractKey(pNodeParent->mValue)))
+ side = kRBTreeSideLeft;
+ else
+ side = kRBTreeSideRight;
+
+ node_type* const pNodeNew = DoCreateNodeFromKey(key); // Note that pNodeNew->mpLeft, mpRight, mpParent, will be uninitialized.
+ RBTreeInsert(pNodeNew, pNodeParent, &mAnchor, side);
+ mnSize++;
+
+ return iterator(pNodeNew);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename InputIterator>
+ void rbtree<K, V, C, A, E, bM, bU>::insert(InputIterator first, InputIterator last)
+ {
+ for( ; first != last; ++first)
+ DoInsertValue(*first, has_unique_keys_type()); // Or maybe we should call 'insert(end(), *first)' instead. If the first-last range was sorted then this might make some sense.
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::clear()
+ {
+ // Erase the entire tree. DoNukeSubtree is not a
+ // conventional erase function, as it does no rebalancing.
+ DoNukeSubtree((node_type*)mAnchor.mpNodeParent);
+ reset();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mAnchor.mpNodeRight = &mAnchor;
+ mAnchor.mpNodeLeft = &mAnchor;
+ mAnchor.mpNodeParent = NULL;
+ mAnchor.mColor = kRBTreeColorRed;
+ mnSize = 0;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(iterator position)
+ {
+ const iterator iErase(position);
+ --mnSize; // Interleave this between the two references to itNext. We expect no exceptions to occur during the code below.
+ ++position;
+ RBTreeErase(iErase.mpNode, &mAnchor);
+ DoFreeNode(iErase.mpNode);
+ return position;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(iterator first, iterator last)
+ {
+ // We expect that if the user means to clear the container, they will call clear.
+ if(EASTL_LIKELY((first.mpNode != mAnchor.mpNodeLeft) || (last.mpNode != &mAnchor))) // If (first != begin or last != end) ...
+ {
+ // Basic implementation:
+ while(first != last)
+ first = erase(first);
+ return first;
+
+ // Inlined implementation:
+ //size_type n = 0;
+ //while(first != last)
+ //{
+ // const iterator itErase(first);
+ // ++n;
+ // ++first;
+ // RBTreeErase(itErase.mpNode, &mAnchor);
+ // DoFreeNode(itErase.mpNode);
+ //}
+ //mnSize -= n;
+ //return first;
+ }
+
+ clear();
+ return iterator((node_type*)&mAnchor); // Same as: return end();
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::reverse_iterator
+ rbtree<K, V, C, A, E, bM, bU>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::erase(const key_type* first, const key_type* last)
+ {
+ // We have no choice but to run a loop like this, as the first/last range could
+ // have values that are discontiguously located in the tree. And some may not
+ // even be in the tree.
+ while(first != last)
+ erase(*first++);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key)
+ {
+ // To consider: Implement this instead via calling lower_bound and
+ // inspecting the result. The following is an implementation of this:
+ // const iterator it(lower_bound(key));
+ // return ((it.mpNode == &mAnchor) || mCompare(key, extractKey(it.mpNode->mValue))) ? iterator(&mAnchor) : it;
+ // We don't currently implement the above because in practice people tend to call
+ // find a lot with trees, but very uncommonly call lower_bound.
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!mCompare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !mCompare(key, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!compare2(extractKey(pCurrent->mValue), u))) // If pCurrent is >= u...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!compare2(u, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ if(EASTL_LIKELY((pRangeEnd != &mAnchor) && !compare2(u, extractKey(pRangeEnd->mValue))))
+ return iterator(pRangeEnd);
+ return iterator((node_type*)&mAnchor);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ template <typename U, typename Compare2>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::find_as(const U& u, Compare2 compare2) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->find_as(u, compare2));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(!mCompare(extractKey(pCurrent->mValue), key))) // If pCurrent is >= key...
+ {
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(key, extractKey(pCurrent->mValue))); // Validate that the compare function is sane.
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::lower_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->lower_bound(key));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key)
+ {
+ extract_key extractKey;
+
+ node_type* pCurrent = (node_type*)mAnchor.mpNodeParent; // Start with the root node.
+ node_type* pRangeEnd = (node_type*)&mAnchor; // Set it to the container end for now.
+
+ while(EASTL_LIKELY(pCurrent)) // Do a walk down the tree.
+ {
+ if(EASTL_LIKELY(mCompare(key, extractKey(pCurrent->mValue)))) // If key is < pCurrent...
+ {
+ EASTL_VALIDATE_COMPARE(!mCompare(extractKey(pCurrent->mValue), key)); // Validate that the compare function is sane.
+ pRangeEnd = pCurrent;
+ pCurrent = (node_type*)pCurrent->mpNodeLeft;
+ }
+ else
+ pCurrent = (node_type*)pCurrent->mpNodeRight;
+ }
+
+ return iterator(pRangeEnd);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::const_iterator
+ rbtree<K, V, C, A, E, bM, bU>::upper_bound(const key_type& key) const
+ {
+ typedef rbtree<K, V, C, A, E, bM, bU> rbtree_type;
+ return const_iterator(const_cast<rbtree_type*>(this)->upper_bound(key));
+ }
+
+
+ // To do: Move this validate function entirely to a template-less implementation.
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ bool rbtree<K, V, C, A, E, bM, bU>::validate() const
+ {
+ // Red-black trees have the following canonical properties which we validate here:
+ // 1 Every node is either red or black.
+ // 2 Every leaf (NULL) is black by defintion. Any number of black nodes may appear in a sequence.
+ // 3 If a node is red, then both its children are black. Thus, on any path from
+ // the root to a leaf, red nodes must not be adjacent.
+ // 4 Every simple path from a node to a descendant leaf contains the same number of black nodes.
+ // 5 The mnSize member of the tree must equal the number of nodes in the tree.
+ // 6 The tree is sorted as per a conventional binary tree.
+ // 7 The comparison function is sane; it obeys strict weak ordering. If mCompare(a,b) is true, then mCompare(b,a) must be false. Both cannot be true.
+
+ extract_key extractKey;
+
+ if(mnSize)
+ {
+ // Verify basic integrity.
+ //if(!mAnchor.mpNodeParent || (mAnchor.mpNodeLeft == mAnchor.mpNodeRight))
+ // return false; // Fix this for case of empty tree.
+
+ if(mAnchor.mpNodeLeft != RBTreeGetMinChild(mAnchor.mpNodeParent))
+ return false;
+
+ if(mAnchor.mpNodeRight != RBTreeGetMaxChild(mAnchor.mpNodeParent))
+ return false;
+
+ const size_t nBlackCount = RBTreeGetBlackCount(mAnchor.mpNodeParent, mAnchor.mpNodeLeft);
+ size_type nIteratedSize = 0;
+
+ for(const_iterator it = begin(); it != end(); ++it, ++nIteratedSize)
+ {
+ const node_type* const pNode = (const node_type*)it.mpNode;
+ const node_type* const pNodeRight = (const node_type*)pNode->mpNodeRight;
+ const node_type* const pNodeLeft = (const node_type*)pNode->mpNodeLeft;
+
+ // Verify #7 above.
+ if(pNodeRight && mCompare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)) && mCompare(extractKey(pNode->mValue), extractKey(pNodeRight->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify #7 above.
+ if(pNodeLeft && mCompare(extractKey(pNodeLeft->mValue), extractKey(pNode->mValue)) && mCompare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue))) // Validate that the compare function is sane.
+ return false;
+
+ // Verify item #1 above.
+ if((pNode->mColor != kRBTreeColorRed) && (pNode->mColor != kRBTreeColorBlack))
+ return false;
+
+ // Verify item #3 above.
+ if(pNode->mColor == kRBTreeColorRed)
+ {
+ if((pNodeRight && (pNodeRight->mColor == kRBTreeColorRed)) ||
+ (pNodeLeft && (pNodeLeft->mColor == kRBTreeColorRed)))
+ return false;
+ }
+
+ // Verify item #6 above.
+ if(pNodeRight && mCompare(extractKey(pNodeRight->mValue), extractKey(pNode->mValue)))
+ return false;
+
+ if(pNodeLeft && mCompare(extractKey(pNode->mValue), extractKey(pNodeLeft->mValue)))
+ return false;
+
+ if(!pNodeRight && !pNodeLeft) // If we are at a bottom node of the tree...
+ {
+ // Verify item #4 above.
+ if(RBTreeGetBlackCount(mAnchor.mpNodeParent, pNode) != nBlackCount)
+ return false;
+ }
+ }
+
+ // Verify item #5 above.
+ if(nIteratedSize != mnSize)
+ return false;
+
+ return true;
+ }
+ else
+ {
+ if((mAnchor.mpNodeLeft != &mAnchor) || (mAnchor.mpNodeRight != &mAnchor))
+ return false;
+ }
+
+ return true;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline int rbtree<K, V, C, A, E, bM, bU>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoAllocateNode()
+ {
+ return (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ inline void rbtree<K, V, C, A, E, bM, bU>::DoFreeNode(node_type* pNode)
+ {
+ pNode->~node_type();
+ EASTLFree(mAllocator, pNode, sizeof(node_type));
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNodeFromKey(const key_type& key)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(&pNode->mValue) value_type(key);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const value_type& value)
+ {
+ // Note that this function intentionally leaves the node pointers uninitialized.
+ // The caller would otherwise just turn right around and modify them, so there's
+ // no point in us initializing them to anything (except in a debug build).
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ ::new(&pNode->mValue) value_type(value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #endif
+
+ #if EASTL_DEBUG
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = NULL;
+ pNode->mColor = kRBTreeColorBlack;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCreateNode(const node_type* pNodeSource, node_type* pNodeParent)
+ {
+ node_type* const pNode = DoCreateNode(pNodeSource->mValue);
+
+ pNode->mpNodeRight = NULL;
+ pNode->mpNodeLeft = NULL;
+ pNode->mpNodeParent = pNodeParent;
+ pNode->mColor = pNodeSource->mColor;
+
+ return pNode;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ typename rbtree<K, V, C, A, E, bM, bU>::node_type*
+ rbtree<K, V, C, A, E, bM, bU>::DoCopySubtree(const node_type* pNodeSource, node_type* pNodeDest)
+ {
+ node_type* const pNewNodeRoot = DoCreateNode(pNodeSource, pNodeDest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeRoot->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeRoot);
+
+ node_type* pNewNodeLeft;
+
+ for(pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeRoot;
+ pNodeSource;
+ pNodeSource = (node_type*)pNodeSource->mpNodeLeft, pNodeDest = pNewNodeLeft)
+ {
+ pNewNodeLeft = DoCreateNode(pNodeSource, pNodeDest);
+
+ pNodeDest->mpNodeLeft = pNewNodeLeft;
+
+ // Copy the right side of the tree recursively.
+ if(pNodeSource->mpNodeRight)
+ pNewNodeLeft->mpNodeRight = DoCopySubtree((const node_type*)pNodeSource->mpNodeRight, pNewNodeLeft);
+ }
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ DoNukeSubtree(pNewNodeRoot);
+ throw;
+ }
+ #endif
+
+ return pNewNodeRoot;
+ }
+
+
+ template <typename K, typename V, typename C, typename A, typename E, bool bM, bool bU>
+ void rbtree<K, V, C, A, E, bM, bU>::DoNukeSubtree(node_type* pNode)
+ {
+ while(pNode) // Recursively traverse the tree and destroy items as we go.
+ {
+ DoNukeSubtree((node_type*)pNode->mpNodeRight);
+
+ node_type* const pNodeLeft = (node_type*)pNode->mpNodeLeft;
+ DoFreeNode(pNode);
+ pNode = pNodeLeft;
+ }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator==(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
+ }
+
+
+ // Note that in operator< we do comparisons based on the tree value_type with operator<() of the
+ // value_type instead of the tree's Compare function. For set/multiset, the value_type is T, while
+ // for map/multimap the value_type is a pair<Key, T>. operator< for pair can be seen by looking
+ // utility.h, but it basically is uses the operator< for pair.first and pair.second. The C++ standard
+ // appears to require this behaviour, whether intentionally or not. If anything, a good reason to do
+ // this is for consistency. A map and a vector that contain the same items should compare the same.
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator!=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator<=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline bool operator>=(const rbtree<K, V, C, A, E, bM, bU>& a, const rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename K, typename V, typename A, typename C, typename E, bool bM, bool bU>
+ inline void swap(rbtree<K, V, C, A, E, bM, bU>& a, rbtree<K, V, C, A, E, bM, bU>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/type_compound.h b/UnknownVersion/include/EASTL/internal/type_compound.h
new file mode 100644
index 0000000..8bbe9ae
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/type_compound.h
@@ -0,0 +1,485 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_compound.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_COMPOUND_H
+#define EASTL_INTERNAL_TYPE_COMPOUND_H
+
+
+namespace eastl
+{
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+ // is_array
+ // is_pointer
+ // is_reference
+ // is_member_object_pointer
+ // is_member_function_pointer
+ // is_member_pointer
+ // is_enum
+ // is_union
+ // is_class
+ // is_polymorphic
+ // is_function
+ // is_object
+ // is_scalar
+ // is_compound
+ // is_same
+ // is_convertible
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_array
+ //
+ // is_array<T>::value == true if and only if T is an array type.
+ // As of this writing, the SNC compiler (EDG-based) doesn't compile
+ // the code below and says that returning an array is illegal.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ T (*is_array_tester1(empty<T>))(empty<T>);
+ char is_array_tester1(...); // May need to use __cdecl under VC++.
+
+ template <typename T>
+ no_type is_array_tester2(T(*)(empty<T>));
+ yes_type is_array_tester2(...); // May need to use __cdecl under VC++.
+
+ template <typename T>
+ struct is_array_helper {
+ static empty<T> emptyInstance;
+ };
+
+ template <typename T>
+ struct is_array : public integral_constant<bool,
+ sizeof(is_array_tester2(is_array_tester1(is_array_helper<T>::emptyInstance))) == 1
+ >{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_reference
+ //
+ // is_reference<T>::value == true if and only if T is a reference type.
+ // This category includes reference to function types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_reference : public false_type{};
+ template <typename T> struct is_reference<T&> : public true_type{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_function_pointer
+ //
+ // is_member_function_pointer<T>::value == true if and only if T is a
+ // pointer to member function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ // We detect member functions with 0 to N arguments. We can extend this
+ // for additional arguments if necessary.
+ // To do: Make volatile and const volatile versions of these in addition to non-const and const.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_mem_fun_pointer_value : public false_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)()> : public true_type{};
+ template <typename R, typename T> struct is_mem_fun_pointer_value<R (T::*)() const> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0)> : public true_type{};
+ template <typename R, typename T, typename Arg0> struct is_mem_fun_pointer_value<R (T::*)(Arg0) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6) const> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7)> : public true_type{};
+ template <typename R, typename T, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_mem_fun_pointer_value<R (T::*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7) const> : public true_type{};
+
+ template <typename T>
+ struct is_member_function_pointer : public integral_constant<bool, is_mem_fun_pointer_value<T>::value>{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_member_pointer
+ //
+ // is_member_pointer<T>::value == true if and only if:
+ // is_member_object_pointer<T>::value == true, or
+ // is_member_function_pointer<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_member_pointer : public integral_constant<bool, is_member_function_pointer<T>::value>{};
+
+ template <typename T, typename U> struct is_member_pointer<U T::*> : public true_type{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pointer
+ //
+ // is_pointer<T>::value == true if and only if T is a pointer type.
+ // This category includes function pointer types, but not pointer to
+ // member types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_pointer_helper : public false_type{};
+
+ template <typename T> struct is_pointer_helper<T*> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* volatile> : public true_type{};
+ template <typename T> struct is_pointer_helper<T* const volatile> : public true_type{};
+
+ template <typename T>
+ struct is_pointer_value : public type_and<is_pointer_helper<T>::value, type_not<is_member_pointer<T>::value>::value> {};
+
+ template <typename T>
+ struct is_pointer : public integral_constant<bool, is_pointer_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_same
+ //
+ // Given two (possibly identical) types T and U, is_same<T, U>::value == true
+ // if and only if T and U are the same type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template<typename T, typename U>
+ struct is_same : public false_type { };
+
+ template<typename T>
+ struct is_same<T, T> : public true_type { };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_convertible
+ //
+ // Given two (possible identical) types From and To, is_convertible<From, To>::value == true
+ // if and only if an lvalue of type From can be implicitly converted to type To,
+ // or is_void<To>::value == true
+ //
+ // is_convertible may only be applied to complete types.
+ // Type To may not be an abstract type.
+ // If the conversion is ambiguous, the program is ill-formed.
+ // If either or both of From and To are class types, and the conversion would invoke
+ // non-public member functions of either From or To (such as a private constructor of To,
+ // or a private conversion operator of From), the program is ill-formed.
+ //
+ // Note that without compiler help, both is_convertible and is_base
+ // can produce compiler errors if the conversion is ambiguous.
+ // Example:
+ // struct A {};
+ // struct B : A {};
+ // struct C : A {};
+ // struct D : B, C {};
+ // is_convertible<D*, A*>::value; // Generates compiler error.
+ ///////////////////////////////////////////////////////////////////////
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x doesn't like the code below.
+ template <typename From, typename To, bool is_from_void = false, bool is_to_void = false>
+ struct is_convertible_helper {
+ static yes_type Test(To); // May need to use __cdecl under VC++.
+ static no_type Test(...); // May need to use __cdecl under VC++.
+ static From from;
+ typedef integral_constant<bool, sizeof(Test(from)) == sizeof(yes_type)> result;
+ };
+
+ // void is not convertible to non-void
+ template <typename From, typename To>
+ struct is_convertible_helper<From, To, true, false> { typedef false_type result; };
+
+ // Anything is convertible to void
+ template <typename From, typename To, bool is_from_void>
+ struct is_convertible_helper<From, To, is_from_void, true> { typedef true_type result; };
+
+ template <typename From, typename To>
+ struct is_convertible : public is_convertible_helper<From, To, is_void<From>::value, is_void<To>::value>::result {};
+
+ #else
+ template <typename From, typename To>
+ struct is_convertible : public false_type{};
+ #endif
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_union
+ //
+ // is_union<T>::value == true if and only if T is a union type.
+ //
+ // There is no way to tell if a type is a union without compiler help.
+ // As of this writing, only Metrowerks v8+ supports such functionality
+ // via 'msl::is_union<T>::value'. The user can force something to be
+ // evaluated as a union via EASTL_DECLARE_UNION.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_union : public false_type{};
+
+ #define EASTL_DECLARE_UNION(T) namespace eastl{ template <> struct is_union<T> : public true_type{}; template <> struct is_union<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_class
+ //
+ // is_class<T>::value == true if and only if T is a class or struct
+ // type (and not a union type).
+ //
+ // Without specific compiler help, it is not possible to
+ // distinguish between unions and classes. As a result, is_class
+ // will erroneously evaluate to true for union types.
+ ///////////////////////////////////////////////////////////////////////
+ #if defined(__MWERKS__)
+ // To do: Switch this to use msl_utility type traits.
+ template <typename T>
+ struct is_class : public false_type{};
+ #elif !defined(__GNUC__) || (((__GNUC__ * 100) + __GNUC_MINOR__) >= 304) // Not GCC or GCC 3.4+
+ template <typename U> static yes_type is_class_helper(void (U::*)());
+ template <typename U> static no_type is_class_helper(...);
+
+ template <typename T>
+ struct is_class : public integral_constant<bool,
+ sizeof(is_class_helper<T>(0)) == sizeof(yes_type) && !is_union<T>::value
+ >{};
+ #else
+ // GCC 2.x version, due to GCC being broken.
+ template <typename T>
+ struct is_class : public false_type{};
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_enum
+ //
+ // is_enum<T>::value == true if and only if T is an enumeration type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ struct int_convertible{ int_convertible(int); };
+
+ template <bool is_arithmetic_or_reference>
+ struct is_enum_helper { template <typename T> struct nest : public is_convertible<T, int_convertible>{}; };
+
+ template <>
+ struct is_enum_helper<true> { template <typename T> struct nest : public false_type {}; };
+
+ template <typename T>
+ struct is_enum_helper2
+ {
+ typedef type_or<is_arithmetic<T>::value, is_reference<T>::value, is_class<T>::value> selector;
+ typedef is_enum_helper<selector::value> helper_t;
+ typedef typename add_reference<T>::type ref_t;
+ typedef typename helper_t::template nest<ref_t> result;
+ };
+
+ template <typename T>
+ struct is_enum : public integral_constant<bool, is_enum_helper2<T>::result::value>{};
+
+ template <> struct is_enum<void> : public false_type {};
+ template <> struct is_enum<void const> : public false_type {};
+ template <> struct is_enum<void volatile> : public false_type {};
+ template <> struct is_enum<void const volatile> : public false_type {};
+
+ #define EASTL_DECLARE_ENUM(T) namespace eastl{ template <> struct is_enum<T> : public true_type{}; template <> struct is_enum<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_polymorphic
+ //
+ // is_polymorphic<T>::value == true if and only if T is a class or struct
+ // that declares or inherits a virtual function. is_polymorphic may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_polymorphic_imp1
+ {
+ typedef typename remove_cv<T>::type t;
+
+ struct helper_1 : public t
+ {
+ helper_1();
+ ~helper_1() throw();
+ char pad[64];
+ };
+
+ struct helper_2 : public t
+ {
+ helper_2();
+ virtual ~helper_2() throw();
+ #ifndef _MSC_VER
+ virtual void foo();
+ #endif
+ char pad[64];
+ };
+
+ static const bool value = (sizeof(helper_1) == sizeof(helper_2));
+ };
+
+ template <typename T>
+ struct is_polymorphic_imp2{ static const bool value = false; };
+
+ template <bool is_class>
+ struct is_polymorphic_selector{ template <typename T> struct rebind{ typedef is_polymorphic_imp2<T> type; }; };
+
+ template <>
+ struct is_polymorphic_selector<true>{ template <typename T> struct rebind{ typedef is_polymorphic_imp1<T> type; }; };
+
+ template <typename T>
+ struct is_polymorphic_value{
+ typedef is_polymorphic_selector<is_class<T>::value> selector;
+ typedef typename selector::template rebind<T> binder;
+ typedef typename binder::type imp_type;
+ static const bool value = imp_type::value;
+ };
+
+ template <typename T>
+ struct is_polymorphic : public integral_constant<bool, is_polymorphic_value<T>::value>{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_function
+ //
+ // is_function<T>::value == true if and only if T is a function type.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename R> struct is_function_ptr_helper : public false_type{};
+ template <typename R> struct is_function_ptr_helper<R (*)()> : public true_type{};
+ template <typename R, typename Arg0> struct is_function_ptr_helper<R (*)(Arg0)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1> struct is_function_ptr_helper<R (*)(Arg0, Arg1)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6)> : public true_type{};
+ template <typename R, typename Arg0, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6, typename Arg7> struct is_function_ptr_helper<R (*)(Arg0, Arg1, Arg2, Arg3, Arg4, Arg5, Arg6, Arg7)> : public true_type{};
+
+ template <bool is_ref = true>
+ struct is_function_chooser{ template <typename T> struct result_ : public false_type{}; };
+
+ template <>
+ struct is_function_chooser<false>{ template <typename T> struct result_ : public is_function_ptr_helper<T*>{}; };
+
+ template <typename T>
+ struct is_function_value : public is_function_chooser<is_reference<T>::value>::template result_<T>{};
+
+ template <typename T>
+ struct is_function : public integral_constant<bool, is_function_value<T>::value>{};
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_object
+ //
+ // is_object<T>::value == true if and only if:
+ // is_reference<T>::value == false, and
+ // is_function<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ // The C++ standard, section 3.9p9, states: "An object type is a
+ // (possibly cv-qualified) type that is not a function type, not a
+ // reference type, and not incomplete (except for an incompletely
+ // defined object type).
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct is_object : public integral_constant<bool,
+ !is_reference<T>::value && !is_void<T>::value && !is_function<T>::value
+ >{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_scalar
+ //
+ // is_scalar<T>::value == true if and only if:
+ // is_arithmetic<T>::value == true, or
+ // is_enum<T>::value == true, or
+ // is_pointer<T>::value == true, or
+ // is_member_pointer<T>::value
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_scalar : public integral_constant<bool, is_arithmetic<T>::value || is_enum<T>::value>{};
+
+ template <typename T> struct is_scalar<T*> : public true_type {};
+ template <typename T> struct is_scalar<T* const> : public true_type {};
+ template <typename T> struct is_scalar<T* volatile> : public true_type {};
+ template <typename T> struct is_scalar<T* const volatile> : public true_type {};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_compound
+ //
+ // Compound means anything but fundamental. See C++ standard, section 3.9.2.
+ //
+ // is_compound<T>::value == true if and only if:
+ // is_fundamental<T>::value == false
+ //
+ // Thus, is_compound<T>::value == true if and only if:
+ // is_floating_point<T>::value == false, and
+ // is_integral<T>::value == false, and
+ // is_void<T>::value == false
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_compound : public integral_constant<bool, !is_fundamental<T>::value>{};
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/type_fundamental.h b/UnknownVersion/include/EASTL/internal/type_fundamental.h
new file mode 100644
index 0000000..cc39865
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/type_fundamental.h
@@ -0,0 +1,187 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_fundamental.h
+//
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+#define EASTL_INTERNAL_TYPE_FUNDAMENTAL_H
+
+
+namespace eastl
+{
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_void
+ //
+ // is_void<T>::value == true if and only if T is one of the following types:
+ // [const][volatile] void
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_void : public false_type{};
+
+ template <> struct is_void<void> : public true_type{};
+ template <> struct is_void<void const> : public true_type{};
+ template <> struct is_void<void volatile> : public true_type{};
+ template <> struct is_void<void const volatile> : public true_type{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_integral
+ //
+ // is_integral<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] bool
+ // [const] [volatile] char
+ // [const] [volatile] signed char
+ // [const] [volatile] unsigned char
+ // [const] [volatile] wchar_t
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_integral : public false_type{};
+
+ // To do: Need to define volatile and const volatile versions of these.
+ template <> struct is_integral<unsigned char> : public true_type{};
+ template <> struct is_integral<const unsigned char> : public true_type{};
+ template <> struct is_integral<unsigned short> : public true_type{};
+ template <> struct is_integral<const unsigned short> : public true_type{};
+ template <> struct is_integral<unsigned int> : public true_type{};
+ template <> struct is_integral<const unsigned int> : public true_type{};
+ template <> struct is_integral<unsigned long> : public true_type{};
+ template <> struct is_integral<const unsigned long> : public true_type{};
+ template <> struct is_integral<unsigned long long> : public true_type{};
+ template <> struct is_integral<const unsigned long long> : public true_type{};
+
+ template <> struct is_integral<signed char> : public true_type{};
+ template <> struct is_integral<const signed char> : public true_type{};
+ template <> struct is_integral<signed short> : public true_type{};
+ template <> struct is_integral<const signed short> : public true_type{};
+ template <> struct is_integral<signed int> : public true_type{};
+ template <> struct is_integral<const signed int> : public true_type{};
+ template <> struct is_integral<signed long> : public true_type{};
+ template <> struct is_integral<const signed long> : public true_type{};
+ template <> struct is_integral<signed long long> : public true_type{};
+ template <> struct is_integral<const signed long long> : public true_type{};
+
+ template <> struct is_integral<bool> : public true_type{};
+ template <> struct is_integral<const bool> : public true_type{};
+ template <> struct is_integral<char> : public true_type{};
+ template <> struct is_integral<const char> : public true_type{};
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ template <> struct is_integral<wchar_t> : public true_type{};
+ template <> struct is_integral<const wchar_t> : public true_type{};
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_floating_point
+ //
+ // is_floating_point<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] float
+ // [const] [volatile] double
+ // [const] [volatile] long double
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_floating_point : public false_type{};
+
+ // To do: Need to define volatile and const volatile versions of these.
+ template <> struct is_floating_point<float> : public true_type{};
+ template <> struct is_floating_point<const float> : public true_type{};
+ template <> struct is_floating_point<double> : public true_type{};
+ template <> struct is_floating_point<const double> : public true_type{};
+ template <> struct is_floating_point<long double> : public true_type{};
+ template <> struct is_floating_point<const long double> : public true_type{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_arithmetic
+ //
+ // is_arithmetic<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_arithmetic : public integral_constant<bool,
+ is_integral<T>::value || is_floating_point<T>::value
+ >{};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_fundamental
+ //
+ // is_fundamental<T>::value == true if and only if:
+ // is_floating_point<T>::value == true, or
+ // is_integral<T>::value == true, or
+ // is_void<T>::value == true
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_fundamental : public integral_constant<bool,
+ is_void<T>::value || is_integral<T>::value || is_floating_point<T>::value
+ >{};
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/type_pod.h b/UnknownVersion/include/EASTL/internal/type_pod.h
new file mode 100644
index 0000000..afcf68e
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/type_pod.h
@@ -0,0 +1,306 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_pod.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_POD_H
+#define EASTL_INTERNAL_TYPE_POD_H
+
+
+#include <limits.h>
+
+
+namespace eastl
+{
+
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+ // is_empty
+ // is_pod
+ // has_trivial_constructor
+ // has_trivial_copy
+ // has_trivial_assign
+ // has_trivial_destructor
+ // has_trivial_relocate -- EA extension to the C++ standard proposal.
+ // has_nothrow_constructor
+ // has_nothrow_copy
+ // has_nothrow_assign
+ // has_virtual_destructor
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_empty
+ //
+ // is_empty<T>::value == true if and only if T is an empty class or struct.
+ // is_empty may only be applied to complete types.
+ //
+ // is_empty cannot be used with union types until is_union can be made to work.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_empty_helper_t1 : public T { char m[64]; };
+ struct is_empty_helper_t2 { char m[64]; };
+
+ // The inheritance in empty_helper_t1 will not work with non-class types
+ template <typename T, bool is_a_class = false>
+ struct is_empty_helper : public false_type{};
+
+ template <typename T>
+ struct is_empty_helper<T, true> : public integral_constant<bool,
+ sizeof(is_empty_helper_t1<T>) == sizeof(is_empty_helper_t2)
+ >{};
+
+ template <typename T>
+ struct is_empty_helper2
+ {
+ typedef typename remove_cv<T>::type _T;
+ typedef is_empty_helper<_T, is_class<_T>::value> type;
+ };
+
+ template <typename T>
+ struct is_empty : public is_empty_helper2<T>::type {};
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_pod
+ //
+ // is_pod<T>::value == true if and only if, for a given type T:
+ // - is_scalar<T>::value == true, or
+ // - T is a class or struct that has no user-defined copy
+ // assignment operator or destructor, and T has no non-static
+ // data members M for which is_pod<M>::value == false, and no
+ // members of reference type, or
+ // - T is a class or struct that has no user-defined copy assignment
+ // operator or destructor, and T has no non-static data members M for
+ // which is_pod<M>::value == false, and no members of reference type, or
+ // - T is the type of an array of objects E for which is_pod<E>::value == true
+ //
+ // is_pod may only be applied to complete types.
+ //
+ // Without some help from the compiler or user, is_pod will not report
+ // that a struct or class is a POD, but will correctly report that
+ // built-in types such as int are PODs. The user can help the compiler
+ // by using the EASTL_DECLARE_POD macro on a class.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> // There's not much we can do here without some compiler extension.
+ struct is_pod : public integral_constant<bool, is_void<T>::value || is_scalar<T>::value>{};
+
+ template <typename T, size_t N>
+ struct is_pod<T[N]> : public is_pod<T>{};
+
+ template <typename T>
+ struct is_POD : public is_pod<T>{};
+
+ #define EASTL_DECLARE_POD(T) namespace eastl{ template <> struct is_pod<T> : public true_type{}; template <> struct is_pod<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_constructor
+ //
+ // has_trivial_constructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial constructor. A constructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_constructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_constructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_constructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_constructor will not
+ // report that a class or struct has a trivial constructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_CONSTRUCTOR to help the compiler.
+ //
+ // A default constructor for a class X is a constructor of class X that
+ // can be called without an argument.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_constructor : public is_pod<T> {};
+
+ #define EASTL_DECLARE_TRIVIAL_CONSTRUCTOR(T) namespace eastl{ template <> struct has_trivial_constructor<T> : public true_type{}; template <> struct has_trivial_constructor<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_copy
+ //
+ // has_trivial_copy<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy constructor. A copy constructor is
+ // trivial if
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_copy<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_copy<M>::value == true, where M is the
+ // type of the data member
+ //
+ // has_trivial_copy may only be applied to complete types.
+ //
+ // Another way of looking at this is:
+ // A copy constructor for class X is trivial if it is implicitly
+ // declared and if all the following are true:
+ // - Class X has no virtual functions (10.3) and no virtual base classes (10.1).
+ // - Each direct base class of X has a trivial copy constructor.
+ // - For all the nonstatic data members of X that are of class type
+ // (or array thereof), each such class type has a trivial copy constructor;
+ // otherwise the copy constructor is nontrivial.
+ //
+ // Without from the compiler or user, has_trivial_copy will not report
+ // that a class or struct has a trivial copy constructor. The user can
+ // use EASTL_DECLARE_TRIVIAL_COPY to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct has_trivial_copy : public integral_constant<bool, is_pod<T>::value && !is_volatile<T>::value>{};
+
+ #define EASTL_DECLARE_TRIVIAL_COPY(T) namespace eastl{ template <> struct has_trivial_copy<T> : public true_type{}; template <> struct has_trivial_copy<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_assign
+ //
+ // has_trivial_assign<T>::value == true if and only if T is a class or
+ // struct that has a trivial copy assignment operator. A copy assignment
+ // operator is trivial if:
+ // - it is implicitly defined by the compiler, and
+ // - is_polymorphic<T>::value == false, and
+ // - T has no virtual base classes, and
+ // - for every direct base class of T, has_trivial_assign<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or array
+ // of class type, has_trivial_assign<M>::value == true, where M is
+ // the type of the data member.
+ //
+ // has_trivial_assign may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_assign will not
+ // report that a class or struct has trivial assignment. The user
+ // can use EASTL_DECLARE_TRIVIAL_ASSIGN to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T>
+ struct has_trivial_assign : public integral_constant<bool,
+ is_pod<T>::value && !is_const<T>::value && !is_volatile<T>::value
+ >{};
+
+ #define EASTL_DECLARE_TRIVIAL_ASSIGN(T) namespace eastl{ template <> struct has_trivial_assign<T> : public true_type{}; template <> struct has_trivial_assign<const T> : public true_type{}; }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_destructor
+ //
+ // has_trivial_destructor<T>::value == true if and only if T is a class
+ // or struct that has a trivial destructor. A destructor is trivial if
+ // - it is implicitly defined by the compiler, and
+ // - for every direct base class of T, has_trivial_destructor<B>::value == true,
+ // where B is the type of the base class, and
+ // - for every nonstatic data member of T that has class type or
+ // array of class type, has_trivial_destructor<M>::value == true,
+ // where M is the type of the data member
+ //
+ // has_trivial_destructor may only be applied to complete types.
+ //
+ // Without from the compiler or user, has_trivial_destructor will not
+ // report that a class or struct has a trivial destructor.
+ // The user can use EASTL_DECLARE_TRIVIAL_DESTRUCTOR to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_destructor : public is_pod<T>{};
+
+ #define EASTL_DECLARE_TRIVIAL_DESTRUCTOR(T) namespace eastl{ template <> struct has_trivial_destructor<T> : public true_type{}; template <> struct has_trivial_destructor<const T> : public true_type{}; }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // has_trivial_relocate
+ //
+ // This is an EA extension to the type traits standard.
+ //
+ // A trivially relocatable object is one that can be safely memmove'd
+ // to uninitialized memory. construction, assignment, and destruction
+ // properties are not addressed by this trait. A type that has the
+ // is_fundamental trait would always have the has_trivial_relocate trait.
+ // A type that has the has_trivial_constructor, has_trivial_copy or
+ // has_trivial_assign traits would usally have the has_trivial_relocate
+ // trait, but this is not strictly guaranteed.
+ //
+ // The user can use EASTL_DECLARE_TRIVIAL_RELOCATE to help the compiler.
+ ///////////////////////////////////////////////////////////////////////
+
+ // With current compilers, this is all we can do.
+ template <typename T>
+ struct has_trivial_relocate : public integral_constant<bool, is_pod<T>::value && !is_volatile<T>::value>{};
+
+ #define EASTL_DECLARE_TRIVIAL_RELOCATE(T) namespace eastl{ template <> struct has_trivial_relocate<T> : public true_type{}; template <> struct has_trivial_relocate<const T> : public true_type{}; }
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/type_properties.h b/UnknownVersion/include/EASTL/internal/type_properties.h
new file mode 100644
index 0000000..c07a7c0
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/type_properties.h
@@ -0,0 +1,283 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_properties.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_PROPERTIES_H
+#define EASTL_INTERNAL_TYPE_PROPERTIES_H
+
+
+#include <limits.h>
+
+
+namespace eastl
+{
+
+ // The following properties or relations are defined here. If the given
+ // item is missing then it simply hasn't been implemented, at least not yet.
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_const
+ //
+ // is_const<T>::value == true if and only if T has const-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_const_value : public false_type{};
+ template <typename T> struct is_const_value<const T*> : public true_type{};
+ template <typename T> struct is_const_value<const volatile T*> : public true_type{};
+
+ template <typename T> struct is_const : public is_const_value<T*>{};
+ template <typename T> struct is_const<T&> : public false_type{}; // Note here that T is const, not the reference to T. So is_const is false. See section 8.3.2p1 of the C++ standard.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_volatile
+ //
+ // is_volatile<T>::value == true if and only if T has volatile-qualification.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T> struct is_volatile_value : public false_type{};
+ template <typename T> struct is_volatile_value<volatile T*> : public true_type{};
+ template <typename T> struct is_volatile_value<const volatile T*> : public true_type{};
+
+ template <typename T> struct is_volatile : public is_volatile_value<T*>{};
+ template <typename T> struct is_volatile<T&> : public false_type{}; // Note here that T is volatile, not the reference to T. So is_const is false. See section 8.3.2p1 of the C++ standard.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_abstract
+ //
+ // is_abstract<T>::value == true if and only if T is a class or struct
+ // that has at least one pure virtual function. is_abstract may only
+ // be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_signed
+ //
+ // is_signed<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] signed char
+ // [const] [volatile] short
+ // [const] [volatile] int
+ // [const] [volatile] long
+ // [const] [volatile] long long
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_SIGNED macro to allow you to
+ // set a given class to be identified as a signed type.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_signed : public false_type{};
+
+ template <> struct is_signed<signed char> : public true_type{};
+ template <> struct is_signed<const signed char> : public true_type{};
+ template <> struct is_signed<signed short> : public true_type{};
+ template <> struct is_signed<const signed short> : public true_type{};
+ template <> struct is_signed<signed int> : public true_type{};
+ template <> struct is_signed<const signed int> : public true_type{};
+ template <> struct is_signed<signed long> : public true_type{};
+ template <> struct is_signed<const signed long> : public true_type{};
+ template <> struct is_signed<signed long long> : public true_type{};
+ template <> struct is_signed<const signed long long> : public true_type{};
+
+ #if (CHAR_MAX == SCHAR_MAX)
+ template <> struct is_signed<char> : public true_type{};
+ template <> struct is_signed<const char> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 2147483647) || (__WCHAR_MAX__ == 32767)) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_signed<wchar_t> : public true_type{};
+ template <> struct is_signed<const wchar_t> : public true_type{};
+ #endif
+ #endif
+
+ #define EASTL_DECLARE_SIGNED(T) namespace eastl{ template <> struct is_signed<T> : public true_type{}; template <> struct is_signed<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_unsigned
+ //
+ // is_unsigned<T>::value == true if and only if T is one of the following types:
+ // [const] [volatile] char (maybe)
+ // [const] [volatile] unsigned char
+ // [const] [volatile] unsigned short
+ // [const] [volatile] unsigned int
+ // [const] [volatile] unsigned long
+ // [const] [volatile] unsigned long long
+ //
+ // Used to determine if a integral type is signed or unsigned.
+ // Given that there are some user-made classes which emulate integral
+ // types, we provide the EASTL_DECLARE_UNSIGNED macro to allow you to
+ // set a given class to be identified as an unsigned type.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct is_unsigned : public false_type{};
+
+ template <> struct is_unsigned<unsigned char> : public true_type{};
+ template <> struct is_unsigned<const unsigned char> : public true_type{};
+ template <> struct is_unsigned<unsigned short> : public true_type{};
+ template <> struct is_unsigned<const unsigned short> : public true_type{};
+ template <> struct is_unsigned<unsigned int> : public true_type{};
+ template <> struct is_unsigned<const unsigned int> : public true_type{};
+ template <> struct is_unsigned<unsigned long> : public true_type{};
+ template <> struct is_unsigned<const unsigned long> : public true_type{};
+ template <> struct is_unsigned<unsigned long long> : public true_type{};
+ template <> struct is_unsigned<const unsigned long long> : public true_type{};
+
+ #if (CHAR_MAX == UCHAR_MAX)
+ template <> struct is_unsigned<char> : public true_type{};
+ template <> struct is_unsigned<const char> : public true_type{};
+ #endif
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if defined(_MSC_VER) || (defined(__WCHAR_MAX__) && ((__WCHAR_MAX__ == 4294967295U) || (__WCHAR_MAX__ == 65535))) // GCC defines __WCHAR_MAX__ for most platforms.
+ template <> struct is_unsigned<wchar_t> : public true_type{};
+ template <> struct is_unsigned<const wchar_t> : public true_type{};
+ #endif
+ #endif
+
+ #define EASTL_DECLARE_UNSIGNED(T) namespace eastl{ template <> struct is_unsigned<T> : public true_type{}; template <> struct is_unsigned<const T> : public true_type{}; }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // alignment_of
+ //
+ // alignment_of<T>::value is an integral value representing, in bytes,
+ // the memory alignment of objects of type T.
+ //
+ // alignment_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct alignment_of_value{ static const size_t value = EASTL_ALIGN_OF(T); };
+
+ template <typename T>
+ struct alignment_of : public integral_constant<size_t, alignment_of_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_aligned
+ //
+ // Defined as true if the type has alignment requirements greater
+ // than default alignment, which is taken to be 8. This allows for
+ // doing specialized object allocation and placement for such types.
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct is_aligned_value{ static const bool value = (EASTL_ALIGN_OF(T) > 8); };
+
+ template <typename T>
+ struct is_aligned : public integral_constant<bool, is_aligned_value<T>::value>{};
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // rank
+ //
+ // rank<T>::value is an integral value representing the number of
+ // dimensions possessed by an array type. For example, given a
+ // multi-dimensional array type T[M][N], std::tr1::rank<T[M][N]>::value == 2.
+ // For a given non-array type T, std::tr1::rank<T>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // extent
+ //
+ // extent<T, I>::value is an integral type representing the number of
+ // elements in the Ith dimension of array type T.
+ //
+ // For a given array type T[N], std::tr1::extent<T[N]>::value == N.
+ // For a given multi-dimensional array type T[M][N], std::tr1::extent<T[M][N], 0>::value == N.
+ // For a given multi-dimensional array type T[M][N], std::tr1::extent<T[M][N], 1>::value == M.
+ // For a given array type T and a given dimension I where I >= rank<T>::value, std::tr1::extent<T, I>::value == 0.
+ // For a given array type of unknown extent T[], std::tr1::extent<T[], 0>::value == 0.
+ // For a given non-array type T and an arbitrary dimension I, std::tr1::extent<T, I>::value == 0.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // is_base_of
+ //
+ // Given two (possibly identical) types Base and Derived, is_base_of<Base, Derived>::value == true
+ // if and only if Base is a direct or indirect base class of Derived,
+ // or Base and Derived are the same type.
+ //
+ // is_base_of may only be applied to complete types.
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ // Not implemented yet.
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/internal/type_transformations.h b/UnknownVersion/include/EASTL/internal/type_transformations.h
new file mode 100644
index 0000000..f086c0b
--- /dev/null
+++ b/UnknownVersion/include/EASTL/internal/type_transformations.h
@@ -0,0 +1,244 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/internal/type_transformations.h
+// Written and maintained by Paul Pedriana - 2005
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+#define EASTL_INTERNAL_TYPE_TRANFORMATIONS_H
+
+
+namespace eastl
+{
+
+
+ // The following transformations are defined here. If the given item
+ // is missing then it simply hasn't been implemented, at least not yet.
+ // add_unsigned
+ // add_signed
+ // remove_const
+ // remove_volatile
+ // remove_cv
+ // add_const
+ // add_volatile
+ // add_cv
+ // remove_reference
+ // add_reference
+ // remove_extent
+ // remove_all_extents
+ // remove_pointer
+ // add_pointer
+ // aligned_storage
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_signed
+ //
+ // Adds signed-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_signed<int>::type is int
+ // add_signed<unsigned int>::type is int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_signed
+ { typedef T type; };
+
+ template<>
+ struct add_signed<unsigned char>
+ { typedef signed char type; };
+
+ #if (defined(CHAR_MAX) && defined(UCHAR_MAX) && (CHAR_MAX == UCHAR_MAX)) // If char is unsigned (which is usually not the case)...
+ template<>
+ struct add_signed<char>
+ { typedef signed char type; };
+ #endif
+
+ template<>
+ struct add_signed<unsigned short>
+ { typedef short type; };
+
+ template<>
+ struct add_signed<unsigned int>
+ { typedef int type; };
+
+ template<>
+ struct add_signed<unsigned long>
+ { typedef long type; };
+
+ template<>
+ struct add_signed<unsigned long long>
+ { typedef long long type; };
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 4294967295U)) // If wchar_t is a 32 bit unsigned value...
+ template<>
+ struct add_signed<wchar_t>
+ { typedef int32_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 65535)) // If wchar_t is a 16 bit unsigned value...
+ template<>
+ struct add_signed<wchar_t>
+ { typedef int16_t type; };
+ #endif
+ #endif
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_unsigned
+ //
+ // Adds unsigned-ness to the given type.
+ // Modifies only integral values; has no effect on others.
+ // add_unsigned<int>::type is unsigned int
+ // add_unsigned<unsigned int>::type is unsigned int
+ //
+ ///////////////////////////////////////////////////////////////////////
+
+ template<class T>
+ struct add_unsigned
+ { typedef T type; };
+
+ template<>
+ struct add_unsigned<signed char>
+ { typedef unsigned char type; };
+
+ #if (defined(CHAR_MAX) && defined(SCHAR_MAX) && (CHAR_MAX == SCHAR_MAX)) // If char is signed (which is usually so)...
+ template<>
+ struct add_unsigned<char>
+ { typedef unsigned char type; };
+ #endif
+
+ template<>
+ struct add_unsigned<short>
+ { typedef unsigned short type; };
+
+ template<>
+ struct add_unsigned<int>
+ { typedef unsigned int type; };
+
+ template<>
+ struct add_unsigned<long>
+ { typedef unsigned long type; };
+
+ template<>
+ struct add_unsigned<long long>
+ { typedef unsigned long long type; };
+
+ #ifndef EA_WCHAR_T_NON_NATIVE // If wchar_t is a native type instead of simply a define to an existing type...
+ #if (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 2147483647)) // If wchar_t is a 32 bit signed value...
+ template<>
+ struct add_unsigned<wchar_t>
+ { typedef uint32_t type; };
+ #elif (defined(__WCHAR_MAX__) && (__WCHAR_MAX__ == 32767)) // If wchar_t is a 16 bit signed value...
+ template<>
+ struct add_unsigned<wchar_t>
+ { typedef uint16_t type; };
+ #endif
+ #endif
+
+ ///////////////////////////////////////////////////////////////////////
+ // remove_cv
+ //
+ // Remove const and volatile from a type.
+ //
+ // The remove_cv transformation trait removes top-level const and/or volatile
+ // qualification (if any) from the type to which it is applied. For a given type T,
+ // remove_cv<T const volatile>::type is equivalent to T. For example,
+ // remove_cv<char* volatile>::type is equivalent to char*, while remove_cv<const char*>::type
+ // is equivalent to const char*. In the latter case, the const qualifier modifies
+ // char, not *, and is therefore not at the top level.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T> struct remove_cv_imp{};
+ template <typename T> struct remove_cv_imp<T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<const T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<volatile T*> { typedef T unqualified_type; };
+ template <typename T> struct remove_cv_imp<const volatile T*> { typedef T unqualified_type; };
+
+ template <typename T> struct remove_cv{ typedef typename remove_cv_imp<T*>::unqualified_type type; };
+ template <typename T> struct remove_cv<T&>{ typedef T& type; }; // References are automatically not const nor volatile. See section 8.3.2p1 of the C++ standard.
+
+ template <typename T, size_t N> struct remove_cv<T const[N]> { typedef T type[N]; };
+ template <typename T, size_t N> struct remove_cv<T volatile[N]> { typedef T type[N]; };
+ template <typename T, size_t N> struct remove_cv<T const volatile[N]>{ typedef T type[N]; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // add_reference
+ //
+ // Add reference to a type.
+ //
+ // The add_reference transformation trait adds a level of indirection
+ // by reference to the type to which it is applied. For a given type T,
+ // add_reference<T>::type is equivalent to T& if is_reference<T>::value == false,
+ // and T otherwise.
+ //
+ ///////////////////////////////////////////////////////////////////////
+ template <typename T>
+ struct add_reference_impl{ typedef T& type; };
+
+ template <typename T>
+ struct add_reference_impl<T&>{ typedef T& type; };
+
+ template <>
+ struct add_reference_impl<void>{ typedef void type; };
+
+ template <typename T>
+ struct add_reference { typedef typename add_reference_impl<T>::type type; };
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/iterator.h b/UnknownVersion/include/EASTL/iterator.h
new file mode 100644
index 0000000..09ba9e6
--- /dev/null
+++ b/UnknownVersion/include/EASTL/iterator.h
@@ -0,0 +1,621 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/iterator.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_ITERATOR_H
+#define EASTL_ITERATOR_H
+
+
+#include <EASTL/internal/config.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+
+#include <stddef.h>
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+// If the user has specified that we use std iterator
+// categories instead of EASTL iterator categories,
+// then #include <iterator>.
+#if EASTL_STD_ITERATOR_CATEGORY_ENABLED
+ #ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #endif
+ #include <iterator>
+ #ifdef _MSC_VER
+ #pragma warning(pop)
+ #endif
+#endif
+
+
+#ifdef _MSC_VER
+ #pragma warning(push) // VC++ generates a bogus warning that you cannot code away.
+ #pragma warning(disable: 4619) // There is no warning number 'number'.
+ #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction.
+#elif defined(__SNC__)
+ #pragma control %push diag
+ #pragma diag_suppress=187 // Pointless comparison of unsigned integer with zero
+#endif
+
+
+namespace eastl
+{
+ /// iterator_status_flag
+ ///
+ /// Defines the validity status of an iterator. This is primarily used for
+ /// iterator validation in debug builds. These are implemented as OR-able
+ /// flags (as opposed to mutually exclusive values) in order to deal with
+ /// the nature of iterator status. In particular, an iterator may be valid
+ /// but not dereferencable, as in the case with an iterator to container end().
+ /// An iterator may be valid but also dereferencable, as in the case with an
+ /// iterator to container begin().
+ ///
+ enum iterator_status_flag
+ {
+ isf_none = 0x00, /// This is called none and not called invalid because it is not strictly the opposite of invalid.
+ isf_valid = 0x01, /// The iterator is valid, which means it is in the range of [begin, end].
+ isf_current = 0x02, /// The iterator is valid and points to the same element it did when created. For example, if an iterator points to vector::begin() but an element is inserted at the front, the iterator is valid but not current. Modification of elements in place do not make iterators non-current.
+ isf_can_dereference = 0x04 /// The iterator is dereferencable, which means it is in the range of [begin, end). It may or may not be current.
+ };
+
+
+
+ // The following declarations are taken directly from the C++ standard document.
+ // input_iterator_tag, etc.
+ // iterator
+ // iterator_traits
+ // reverse_iterator
+
+ // Iterator categories
+ // Every iterator is defined as belonging to one of the iterator categories that
+ // we define here. These categories come directly from the C++ standard.
+ #if !EASTL_STD_ITERATOR_CATEGORY_ENABLED // If we are to use our own iterator category definitions...
+ struct input_iterator_tag { };
+ struct output_iterator_tag { };
+ struct forward_iterator_tag : public input_iterator_tag { };
+ struct bidirectional_iterator_tag : public forward_iterator_tag { };
+ struct random_access_iterator_tag : public bidirectional_iterator_tag { };
+ struct contiguous_iterator_tag : public random_access_iterator_tag { }; // Extension to the C++ standard. Contiguous ranges are more than random access, they are physically contiguous.
+ #endif
+
+
+ // struct iterator
+ template <typename Category, typename T, typename Distance = ptrdiff_t,
+ typename Pointer = T*, typename Reference = T&>
+ struct iterator
+ {
+ typedef Category iterator_category;
+ typedef T value_type;
+ typedef Distance difference_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ };
+
+
+ // struct iterator_traits
+ template <typename Iterator>
+ struct iterator_traits
+ {
+ typedef typename Iterator::iterator_category iterator_category;
+ typedef typename Iterator::value_type value_type;
+ typedef typename Iterator::difference_type difference_type;
+ typedef typename Iterator::pointer pointer;
+ typedef typename Iterator::reference reference;
+ };
+
+ template <typename T>
+ struct iterator_traits<T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category; // To consider: Change this to contiguous_iterator_tag for the case that
+ typedef T value_type; // EASTL_ITC_NS is "eastl" instead of "std".
+ typedef ptrdiff_t difference_type;
+ typedef T* pointer;
+ typedef T& reference;
+ };
+
+ template <typename T>
+ struct iterator_traits<const T*>
+ {
+ typedef EASTL_ITC_NS::random_access_iterator_tag iterator_category;
+ typedef T value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const T* pointer;
+ typedef const T& reference;
+ };
+
+
+
+
+
+ /// reverse_iterator
+ ///
+ /// From the C++ standard:
+ /// Bidirectional and random access iterators have corresponding reverse
+ /// iterator adaptors that iterate through the data structure in the
+ /// opposite direction. They have the same signatures as the corresponding
+ /// iterators. The fundamental relation between a reverse iterator and its
+ /// corresponding iterator i is established by the identity:
+ /// &*(reverse_iterator(i)) == &*(i - 1).
+ /// This mapping is dictated by the fact that while there is always a pointer
+ /// past the end of an array, there might not be a valid pointer before the
+ /// beginning of an array.
+ ///
+ template <typename Iterator>
+ class reverse_iterator : public iterator<typename eastl::iterator_traits<Iterator>::iterator_category,
+ typename eastl::iterator_traits<Iterator>::value_type,
+ typename eastl::iterator_traits<Iterator>::difference_type,
+ typename eastl::iterator_traits<Iterator>::pointer,
+ typename eastl::iterator_traits<Iterator>::reference>
+ {
+ public:
+ typedef Iterator iterator_type;
+ typedef typename eastl::iterator_traits<Iterator>::pointer pointer;
+ typedef typename eastl::iterator_traits<Iterator>::reference reference;
+ typedef typename eastl::iterator_traits<Iterator>::difference_type difference_type;
+
+ protected:
+ Iterator mIterator;
+
+ public:
+ reverse_iterator() // It's important that we construct mIterator, because if Iterator
+ : mIterator() { } // is a pointer, there's a difference between doing it and not.
+
+ explicit reverse_iterator(iterator_type i)
+ : mIterator(i) { }
+
+ reverse_iterator(const reverse_iterator& ri)
+ : mIterator(ri.mIterator) { }
+
+ template <typename U>
+ reverse_iterator(const reverse_iterator<U>& ri)
+ : mIterator(ri.base()) { }
+
+ // This operator= isn't in the standard, but the the C++
+ // library working group has tentatively approved it, as it
+ // allows const and non-const reverse_iterators to interoperate.
+ template <typename U>
+ reverse_iterator<Iterator>& operator=(const reverse_iterator<U>& ri)
+ { mIterator = ri.base(); return *this; }
+
+ iterator_type base() const
+ { return mIterator; }
+
+ reference operator*() const
+ {
+ iterator_type i(mIterator);
+ return *--i;
+ }
+
+ pointer operator->() const
+ { return &(operator*()); }
+
+ reverse_iterator& operator++()
+ { --mIterator; return *this; }
+
+ reverse_iterator operator++(int)
+ {
+ reverse_iterator ri(*this);
+ --mIterator;
+ return ri;
+ }
+
+ reverse_iterator& operator--()
+ { ++mIterator; return *this; }
+
+ reverse_iterator operator--(int)
+ {
+ reverse_iterator ri(*this);
+ ++mIterator;
+ return ri;
+ }
+
+ reverse_iterator operator+(difference_type n) const
+ { return reverse_iterator(mIterator - n); }
+
+ reverse_iterator& operator+=(difference_type n)
+ { mIterator -= n; return *this; }
+
+ reverse_iterator operator-(difference_type n) const
+ { return reverse_iterator(mIterator + n); }
+
+ reverse_iterator& operator-=(difference_type n)
+ { mIterator += n; return *this; }
+
+ reference operator[](difference_type n) const
+ { return mIterator[-n - 1]; }
+ };
+
+
+ // The C++ library working group has tentatively approved the usage of two
+ // template parameters (Iterator1 and Iterator2) in order to allow reverse_iterators
+ // and const_reverse iterators to be comparable. This is a similar issue to the
+ // C++ defect report #179 regarding comparison of container iterators and const_iterators.
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator==(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() == b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator<(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() > b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator!=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() != b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator>(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() < b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator<=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() >= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline bool
+ operator>=(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return a.base() <= b.base(); }
+
+
+ template <typename Iterator1, typename Iterator2>
+ inline typename reverse_iterator<Iterator1>::difference_type
+ operator-(const reverse_iterator<Iterator1>& a, const reverse_iterator<Iterator2>& b)
+ { return b.base() - a.base(); }
+
+
+ template <typename Iterator>
+ inline reverse_iterator<Iterator>
+ operator+(typename reverse_iterator<Iterator>::difference_type n, const reverse_iterator<Iterator>& a)
+ { return reverse_iterator<Iterator>(a.base() - n); }
+
+
+
+
+
+
+
+ /// back_insert_iterator
+ ///
+ /// A back_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_back on the container with the value.
+ ///
+ template <typename Container>
+ class back_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ explicit back_insert_iterator(Container& x)
+ : container(x) { }
+
+ back_insert_iterator& operator=(const_reference value)
+ { container.push_back(value); return *this; }
+
+ back_insert_iterator& operator*()
+ { return *this; }
+
+ back_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ back_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+ };
+
+
+ /// back_inserter
+ ///
+ /// Creates an instance of a back_insert_iterator.
+ ///
+ template <typename Container>
+ inline back_insert_iterator<Container>
+ back_inserter(Container& x)
+ { return back_insert_iterator<Container>(x); }
+
+
+
+
+ /// front_insert_iterator
+ ///
+ /// A front_insert_iterator is simply a class that acts like an iterator but when you
+ /// assign a value to it, it calls push_front on the container with the value.
+ ///
+ template <typename Container>
+ class front_insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+
+ public:
+ explicit front_insert_iterator(Container& x)
+ : container(x) { }
+
+ front_insert_iterator& operator=(const_reference value)
+ { container.push_front(value); return *this; }
+
+ front_insert_iterator& operator*()
+ { return *this; }
+
+ front_insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ front_insert_iterator operator++(int)
+ { return *this; } // This is by design.
+ };
+
+
+ /// front_inserter
+ ///
+ /// Creates an instance of a front_insert_iterator.
+ ///
+ template <typename Container>
+ inline front_insert_iterator<Container>
+ front_inserter(Container& x)
+ { return front_insert_iterator<Container>(x); }
+
+
+
+
+ /// insert_iterator
+ ///
+ /// An insert_iterator is like an iterator except that when you assign a value to it,
+ /// the insert_iterator inserts the value into the container and increments the iterator.
+ ///
+ /// insert_iterator is an iterator adaptor that functions as an OutputIterator:
+ /// assignment through an insert_iterator inserts an object into a container.
+ /// Specifically, if ii is an insert_iterator, then ii keeps track of a container c and
+ /// an insertion point p; the expression *ii = x performs the insertion c.insert(p, x).
+ ///
+ /// If you assign through an insert_iterator several times, then you will be inserting
+ /// several elements into the underlying container. In the case of a sequence, they will
+ /// appear at a particular location in the underlying sequence, in the order in which
+ /// they were inserted: one of the arguments to insert_iterator's constructor is an
+ /// iterator p, and the new range will be inserted immediately before p.
+ ///
+ template <typename Container>
+ class insert_iterator : public iterator<EASTL_ITC_NS::output_iterator_tag, void, void, void, void>
+ {
+ public:
+ typedef Container container_type;
+ typedef typename Container::iterator iterator_type;
+ typedef typename Container::const_reference const_reference;
+
+ protected:
+ Container& container;
+ iterator_type it;
+
+ public:
+ // This assignment operator is defined more to stop compiler warnings (e.g. VC++ C4512)
+ // than to be useful. However, it does an insert_iterator to be assigned to another
+ // insert iterator provided that they point to the same container.
+ insert_iterator& operator=(const insert_iterator& x)
+ {
+ EASTL_ASSERT(&x.container == &container);
+ it = x.it;
+ return *this;
+ }
+
+ insert_iterator(Container& x, iterator_type itNew)
+ : container(x), it(itNew) {}
+
+ insert_iterator& operator=(const_reference value)
+ {
+ it = container.insert(it, value);
+ ++it;
+ return *this;
+ }
+
+ insert_iterator& operator*()
+ { return *this; }
+
+ insert_iterator& operator++()
+ { return *this; } // This is by design.
+
+ insert_iterator& operator++(int)
+ { return *this; } // This is by design.
+
+ }; // insert_iterator
+
+
+ /// inserter
+ ///
+ /// Creates an instance of an insert_iterator.
+ ///
+ template <typename Container, typename Iterator>
+ inline eastl::insert_iterator<Container>
+ inserter(Container& x, Iterator i)
+ {
+ typedef typename Container::iterator iterator;
+ return eastl::insert_iterator<Container>(x, iterator(i));
+ }
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// distance
+ ///
+ /// Implements the distance() function. There are two versions, one for
+ /// random access iterators (e.g. with vector) and one for regular input
+ /// iterators (e.g. with list). The former is more efficient.
+ ///
+ template <typename InputIterator>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ distance_impl(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ typename eastl::iterator_traits<InputIterator>::difference_type n = 0;
+
+ while(first != last)
+ {
+ ++first;
+ ++n;
+ }
+ return n;
+ }
+
+ template <typename RandomAccessIterator>
+ inline typename eastl::iterator_traits<RandomAccessIterator>::difference_type
+ distance_impl(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ return last - first;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator>
+ //inline typename eastl::iterator_traits<InputIterator>::difference_type
+ //distance_impl(InputIterator first, InputIterator last, ...)
+ //{
+ // typename eastl::iterator_traits<InputIterator>::difference_type n = 0;
+ //
+ // while(first != last)
+ // {
+ // ++first;
+ // ++n;
+ // }
+ // return n;
+ //}
+
+ template <typename InputIterator>
+ inline typename eastl::iterator_traits<InputIterator>::difference_type
+ distance(InputIterator first, InputIterator last)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ return eastl::distance_impl(first, last, IC());
+ }
+
+
+
+
+ //////////////////////////////////////////////////////////////////////////////////
+ /// advance
+ ///
+ /// Implements the advance() function. There are three versions, one for
+ /// random access iterators (e.g. with vector), one for bidirectional
+ /// iterators (list) and one for regular input iterators (e.g. with slist).
+ ///
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance_impl(InputIterator& i, Distance n, EASTL_ITC_NS::input_iterator_tag)
+ {
+ while(n--)
+ ++i;
+ }
+
+ template <typename BidirectionalIterator, typename Distance>
+ inline void
+ advance_impl(BidirectionalIterator& i, Distance n, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+ if(n > 0)
+ {
+ while(n--)
+ ++i;
+ }
+ else
+ {
+ while(n++)
+ --i;
+ }
+ }
+
+ template <typename RandomAccessIterator, typename Distance>
+ inline void
+ advance_impl(RandomAccessIterator& i, Distance n, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ i += n;
+ }
+
+ // Special version defined so that std C++ iterators can be recognized by
+ // this function. Unfortunately, this function treats all foreign iterators
+ // as InputIterators and thus can seriously hamper performance in the case
+ // of large ranges of bidirectional_iterator_tag iterators.
+ //template <typename InputIterator, typename Distance>
+ //inline void
+ //advance_impl(InputIterator& i, Distance n, ...)
+ //{
+ // while(n--)
+ // ++i;
+ //}
+
+ template <typename InputIterator, typename Distance>
+ inline void
+ advance(InputIterator& i, Distance n)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+
+ eastl::advance_impl(i, n, IC());
+ }
+
+
+} // namespace eastl
+
+
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#elif defined(__SNC__)
+ #pragma control %pop diag
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/list.h b/UnknownVersion/include/EASTL/list.h
new file mode 100644
index 0000000..440d686
--- /dev/null
+++ b/UnknownVersion/include/EASTL/list.h
@@ -0,0 +1,1863 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/list.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a doubly-linked list, much like the C++ std::list class.
+// The primary distinctions between this list and std::list are:
+// - list doesn't implement some of the less-frequently used functions
+// of std::list. Any required functions can be added at a later time.
+// - list has a couple extension functions that increase performance.
+// - list can contain objects with alignment requirements. std::list cannot
+// do so without a bit of tedious non-portable effort.
+// - list has optimizations that don't exist in the STL implementations
+// supplied by library vendors for our targeted platforms.
+// - list supports debug memory naming natively.
+// - list::size() by default is not a constant time function, like the list::size
+// in some std implementations such as STLPort and SGI STL but unlike the
+// list in Dinkumware and Metrowerks. The EASTL_LIST_SIZE_CACHE option can change this.
+// - list provides a guaranteed portable node definition that allows users
+// to write custom fixed size node allocators that are portable.
+// - list is easier to read, debug, and visualize.
+// - list is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - list has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - list doesn't keep a member size variable. This means that list is
+// smaller than std::list (depends on std::list) and that for most operations
+// it is faster than std::list. However, the list::size function is slower.
+// - list::size_type is defined as eastl_size_t instead of size_t in order to
+// save memory and run faster on 64 bit systems.
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_LIST_H
+#define EASTL_LIST_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #include <new>
+ #include <stddef.h>
+ #pragma warning(pop)
+#else
+ #include <new>
+ #include <stddef.h>
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+ #pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_LIST_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_LIST_DEFAULT_NAME
+ #define EASTL_LIST_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " list" // Unless the user overrides something, this is "EASTL list".
+ #endif
+
+
+ /// EASTL_LIST_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_LIST_DEFAULT_ALLOCATOR
+ #define EASTL_LIST_DEFAULT_ALLOCATOR allocator_type(EASTL_LIST_DEFAULT_NAME)
+ #endif
+
+
+
+ /// ListNodeBase
+ ///
+ /// We define a ListNodeBase separately from ListNode (below), because it allows
+ /// us to have non-templated operations such as insert, remove (below), and it
+ /// makes it so that the list anchor node doesn't carry a T with it, which would
+ /// waste space and possibly lead to surprising the user due to extra Ts existing
+ /// that the user didn't explicitly create. The downside to all of this is that
+ /// it makes debug viewing of a list harder, given that the node pointers are of
+ /// type ListNodeBase and not ListNode. However, see ListNodeBaseProxy below.
+ ///
+ struct ListNodeBase
+ {
+ ListNodeBase* mpNext;
+ ListNodeBase* mpPrev;
+
+ void insert(ListNodeBase* pNext);
+ void remove();
+ void splice(ListNodeBase* pFirst, ListNodeBase* pLast);
+ void reverse();
+ static void swap(ListNodeBase& a, ListNodeBase& b);
+ } EASTL_LIST_PROXY_MAY_ALIAS;
+
+
+ #if EASTL_LIST_PROXY_ENABLED
+
+ /// ListNodeBaseProxy
+ ///
+ /// In debug builds, we define ListNodeBaseProxy to be the same thing as
+ /// ListNodeBase, except it is templated on the parent ListNode class.
+ /// We do this because we want users in debug builds to be able to easily
+ /// view the list's contents in a debugger GUI. We do this only in a debug
+ /// build for the reasons described above: that ListNodeBase needs to be
+ /// as efficient as possible and not cause code bloat or extra function
+ /// calls (inlined or not).
+ ///
+ /// ListNodeBaseProxy *must* be separate from its parent class ListNode
+ /// because the list class must have a member node which contains no T value.
+ /// It is thus incorrect for us to have one single ListNode class which
+ /// has mpNext, mpPrev, and mValue. So we do a recursive template trick in
+ /// the definition and use of SListNodeBaseProxy.
+ ///
+ template <typename LN>
+ struct ListNodeBaseProxy
+ {
+ LN* mpNext;
+ LN* mpPrev;
+ };
+
+ template <typename T>
+ struct ListNode : public ListNodeBaseProxy< ListNode<T> >
+ {
+ T mValue;
+ };
+
+ #else
+
+ template <typename T>
+ struct ListNode : public ListNodeBase
+ {
+ T mValue;
+ };
+
+ #endif
+
+
+
+
+ /// ListIterator
+ ///
+ template <typename T, typename Pointer, typename Reference>
+ struct ListIterator
+ {
+ typedef ListIterator<T, Pointer, Reference> this_type;
+ typedef ListIterator<T, T*, T&> iterator;
+ typedef ListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef ListNode<T> node_type;
+ typedef Pointer pointer;
+ typedef Reference reference;
+ typedef EASTL_ITC_NS::bidirectional_iterator_tag iterator_category;
+
+ public:
+ node_type* mpNode;
+
+ public:
+ ListIterator();
+ ListIterator(const ListNodeBase* pNode);
+ ListIterator(const iterator& x);
+
+ reference operator*() const;
+ pointer operator->() const;
+
+ this_type& operator++();
+ this_type operator++(int);
+
+ this_type& operator--();
+ this_type operator--(int);
+
+ }; // ListIterator
+
+
+
+
+ /// ListBase
+ ///
+ /// See VectorBase (class vector) for an explanation of why we
+ /// create this separate base class.
+ ///
+ template <typename T, typename Allocator>
+ class ListBase
+ {
+ public:
+ typedef T value_type;
+ typedef Allocator allocator_type;
+ typedef ListNode<T> node_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef ptrdiff_t difference_type;
+ #if EASTL_LIST_PROXY_ENABLED
+ typedef ListNodeBaseProxy< ListNode<T> > base_node_type;
+ #else
+ typedef ListNodeBase base_node_type; // We use ListNodeBase instead of ListNode<T> because we don't want to create a T.
+ #endif
+
+ enum
+ {
+ kAlignment = EASTL_ALIGN_OF(T),
+ kAlignmentOffset = 0 // offsetof(node_type, mValue);
+ };
+
+ protected:
+ base_node_type mNode;
+ #if EASTL_LIST_SIZE_CACHE
+ size_type mSize;
+ #endif
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ ListBase();
+ ListBase(const allocator_type& a);
+ ~ListBase();
+
+ node_type* DoAllocateNode();
+ void DoFreeNode(node_type* pNode);
+
+ void DoInit();
+ void DoClear();
+
+ }; // ListBase
+
+
+
+
+ /// list
+ ///
+ /// -- size() is O(n) --
+ /// Note that as of this writing, list::size() is an O(n) operation. That is, getting the size
+ /// of the list is not a fast operation, as it requires traversing the list and counting the nodes.
+ /// We could make list::size() be fast by having a member mSize variable. There are reasons for
+ /// having such functionality and reasons for not having such functionality. We currently choose
+ /// to not have a member mSize variable as it would add four bytes to the class, add a tiny amount
+ /// of processing to functions such as insert and erase, and would only serve to improve the size
+ /// function, but no others. The alternative argument is that the C++ standard states that std::list
+ /// should be an O(1) operation (i.e. have a member size variable), most C++ standard library list
+ /// implementations do so, the size is but an integer which is quick to update, and many users
+ /// expect to have a fast size function. The EASTL_LIST_SIZE_CACHE option changes this.
+ /// To consider: Make size caching an optional template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a list container, your pool
+ /// needs to contain items of type list::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef list<Widget, MemoryPool> WidgetList; // Delare your WidgetList type.
+ /// MemoryPool myPool(sizeof(WidgetList::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetList myList(&myPool); // Create a list that uses the pool.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class list : public ListBase<T, Allocator>
+ {
+ typedef ListBase<T, Allocator> base_type;
+ typedef list<T, Allocator> this_type;
+
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef ListIterator<T, T*, T&> iterator;
+ typedef ListIterator<T, const T*, const T&> const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::base_node_type base_node_type;
+
+ using base_type::mNode;
+ using base_type::mAllocator;
+ using base_type::DoAllocateNode;
+ using base_type::DoFreeNode;
+ using base_type::DoClear;
+ using base_type::DoInit;
+ using base_type::get_allocator;
+ #if EASTL_LIST_SIZE_CACHE
+ using base_type::mSize;
+ #endif
+
+ public:
+ list();
+ list(const allocator_type& allocator);
+ explicit list(size_type n, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ list(size_type n, const value_type& value, const allocator_type& allocator = EASTL_LIST_DEFAULT_ALLOCATOR);
+ list(const this_type& x);
+
+ template <typename InputIterator>
+ list(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ this_type& operator=(const this_type& x);
+ void swap(this_type& x);
+
+ void assign(size_type n, const value_type& value);
+
+ template <typename InputIterator> // It turns out that the C++ std::list specifies a two argument
+ void assign(InputIterator first, InputIterator last); // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+ iterator begin();
+ const_iterator begin() const;
+ iterator end();
+ const_iterator end() const;
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+
+ bool empty() const;
+ size_type size() const;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_front(const value_type& value);
+ reference push_front();
+ void* push_front_uninitialized();
+
+ void push_back(const value_type& value);
+ reference push_back();
+ void* push_back_uninitialized();
+
+ void pop_front();
+ void pop_back();
+
+ iterator insert(iterator position);
+ iterator insert(iterator position, const value_type& value);
+
+ void insert(iterator position, size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ void clear();
+ void reset();
+
+ void remove(const T& x);
+
+ template <typename Predicate>
+ void remove_if(Predicate);
+
+ void reverse();
+
+ void splice(iterator position, this_type& x);
+ void splice(iterator position, this_type& x, iterator i);
+ void splice(iterator position, this_type& x, iterator first, iterator last);
+
+ public:
+ // Sorting functionality
+ // This is independent of the global sort algorithms, as lists are
+ // linked nodes and can be sorted more efficiently by moving nodes
+ // around in ways that global sort algorithms aren't privy to.
+
+ void merge(this_type& x);
+
+ template <typename Compare>
+ void merge(this_type& x, Compare compare);
+
+ void unique();
+
+ template <typename BinaryPredicate>
+ void unique(BinaryPredicate);
+
+ void sort();
+
+ template<typename Compare>
+ void sort(Compare compare);
+
+ public:
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ node_type* DoCreateNode();
+ node_type* DoCreateNode(const value_type& value);
+
+ template <typename Integer>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename Integer>
+ void DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type);
+
+ void DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value);
+
+ void DoInsertValue(ListNodeBase* pNode, const value_type& value);
+
+ void DoErase(ListNodeBase* pNode);
+
+ }; // class list
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListNodeBase
+ ///////////////////////////////////////////////////////////////////////
+
+ inline void ListNodeBase::swap(ListNodeBase& a, ListNodeBase& b)
+ {
+ const ListNodeBase temp(a);
+ a = b;
+ b = temp;
+
+ if(a.mpNext == &b)
+ a.mpNext = a.mpPrev = &a;
+ else
+ a.mpNext->mpPrev = a.mpPrev->mpNext = &a;
+
+ if(b.mpNext == &a)
+ b.mpNext = b.mpPrev = &b;
+ else
+ b.mpNext->mpPrev = b.mpPrev->mpNext = &b;
+ }
+
+
+ inline void ListNodeBase::splice(ListNodeBase* first, ListNodeBase* last)
+ {
+ // We assume that [first, last] are not within our list.
+ last->mpPrev->mpNext = this;
+ first->mpPrev->mpNext = last;
+ this->mpPrev->mpNext = first;
+
+ ListNodeBase* const pTemp = this->mpPrev;
+ this->mpPrev = last->mpPrev;
+ last->mpPrev = first->mpPrev;
+ first->mpPrev = pTemp;
+ }
+
+
+ inline void ListNodeBase::reverse()
+ {
+ ListNodeBase* pNode = this;
+ do
+ {
+ ListNodeBase* const pTemp = pNode->mpNext;
+ pNode->mpNext = pNode->mpPrev;
+ pNode->mpPrev = pTemp;
+ pNode = pNode->mpPrev;
+ }
+ while(pNode != this);
+ }
+
+
+ inline void ListNodeBase::insert(ListNodeBase* pNext)
+ {
+ mpNext = pNext;
+ mpPrev = pNext->mpPrev;
+ pNext->mpPrev->mpNext = this;
+ pNext->mpPrev = this;
+ }
+
+
+ inline void ListNodeBase::remove()
+ {
+ mpPrev->mpNext = mpNext;
+ mpNext->mpPrev = mpPrev;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListIterator
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator()
+ : mpNode() // To consider: Do we really need to intialize mpNode?
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator(const ListNodeBase* pNode)
+ : mpNode(static_cast<node_type*>((ListNode<T>*)const_cast<ListNodeBase*>(pNode))) // All this casting is in the name of making runtime debugging much easier on the user.
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline ListIterator<T, Pointer, Reference>::ListIterator(const iterator& x)
+ : mpNode(const_cast<node_type*>(x.mpNode))
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::reference
+ ListIterator<T, Pointer, Reference>::operator*() const
+ {
+ return mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::pointer
+ ListIterator<T, Pointer, Reference>::operator->() const
+ {
+ return &mpNode->mValue;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type&
+ ListIterator<T, Pointer, Reference>::operator++()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::operator++(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpNext);
+ return temp;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type&
+ ListIterator<T, Pointer, Reference>::operator--()
+ {
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return *this;
+ }
+
+
+ template <typename T, typename Pointer, typename Reference>
+ inline typename ListIterator<T, Pointer, Reference>::this_type
+ ListIterator<T, Pointer, Reference>::operator--(int)
+ {
+ this_type temp(*this);
+ mpNode = static_cast<node_type*>(mpNode->mpPrev);
+ return temp;
+ }
+
+
+ // The C++ defect report #179 requires that we support comparisons between const and non-const iterators.
+ // Thus we provide additional template paremeters here to support this. The defect report does not
+ // require us to support comparisons between reverse_iterators and const_reverse_iterators.
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator==(const ListIterator<T, PointerA, ReferenceA>& a,
+ const ListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode == b.mpNode;
+ }
+
+
+ template <typename T, typename PointerA, typename ReferenceA, typename PointerB, typename ReferenceB>
+ inline bool operator!=(const ListIterator<T, PointerA, ReferenceA>& a,
+ const ListIterator<T, PointerB, ReferenceB>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+ // We provide a version of operator!= for the case where the iterators are of the
+ // same type. This helps prevent ambiguity errors in the presence of rel_ops.
+ template <typename T, typename Pointer, typename Reference>
+ inline bool operator!=(const ListIterator<T, Pointer, Reference>& a,
+ const ListIterator<T, Pointer, Reference>& b)
+ {
+ return a.mpNode != b.mpNode;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // ListBase
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::ListBase()
+ : mNode(),
+ #if EASTL_LIST_SIZE_CACHE
+ mSize(0),
+ #endif
+ mAllocator(EASTL_LIST_DEFAULT_NAME)
+ {
+ DoInit();
+ }
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::ListBase(const allocator_type& allocator)
+ : mNode(),
+ #if EASTL_LIST_SIZE_CACHE
+ mSize(0),
+ #endif
+ mAllocator(allocator)
+ {
+ DoInit();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline ListBase<T, Allocator>::~ListBase()
+ {
+ DoClear();
+ }
+
+
+ template <typename T, typename Allocator>
+ typename ListBase<T, Allocator>::allocator_type&
+ ListBase<T, Allocator>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename ListBase<T, Allocator>::node_type*
+ ListBase<T, Allocator>::DoAllocateNode()
+ {
+ return (node_type*)allocate_memory(mAllocator, sizeof(node_type), kAlignment, kAlignmentOffset);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoFreeNode(node_type* p)
+ {
+ EASTLFree(mAllocator, p, sizeof(node_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoInit()
+ {
+ mNode.mpNext = (ListNode<T>*)&mNode;
+ mNode.mpPrev = (ListNode<T>*)&mNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void ListBase<T, Allocator>::DoClear()
+ {
+ node_type* p = static_cast<node_type*>(mNode.mpNext);
+
+ while(p != &mNode)
+ {
+ node_type* const pTemp = p;
+ p = static_cast<node_type*>(p->mpNext);
+ pTemp->~node_type();
+ EASTLFree(mAllocator, pTemp, sizeof(node_type));
+ }
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // list
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list()
+ : base_type()
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(size_type n, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ //insert(iterator((ListNodeBase*)&mNode), n, value_type());
+ DoInsertValues((ListNodeBase*)&mNode, n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // insert(iterator((ListNodeBase*)&mNode), n, value);
+ DoInsertValues((ListNodeBase*)&mNode, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline list<T, Allocator>::list(const this_type& x)
+ : base_type(x.mAllocator)
+ {
+ //insert(iterator((ListNodeBase*)&mNode), const_iterator((ListNodeBase*)x.mNode.mpNext), const_iterator((ListNodeBase*)&x.mNode));
+ DoInsert((ListNodeBase*)&mNode, const_iterator((const ListNodeBase*)x.mNode.mpNext), const_iterator((const ListNodeBase*)&x.mNode), false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ list<T, Allocator>::list(InputIterator first, InputIterator last)
+ : base_type(EASTL_LIST_DEFAULT_ALLOCATOR)
+ {
+ //insert(iterator((ListNodeBase*)&mNode), first, last);
+ DoInsert((ListNodeBase*)&mNode, first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::iterator
+ inline list<T, Allocator>::begin()
+ {
+ return iterator((ListNodeBase*)mNode.mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::begin() const
+ {
+ return const_iterator((ListNodeBase*)mNode.mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::end()
+ {
+ return iterator((ListNodeBase*)&mNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_iterator
+ list<T, Allocator>::end() const
+ {
+ return const_iterator((const ListNodeBase*)&mNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::rbegin()
+ {
+ return reverse_iterator((ListNodeBase*)&mNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::rbegin() const
+ {
+ return const_reverse_iterator((const ListNodeBase*)&mNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::rend()
+ {
+ return reverse_iterator((ListNodeBase*)mNode.mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reverse_iterator
+ list<T, Allocator>::rend() const
+ {
+ return const_reverse_iterator((ListNodeBase*)mNode.mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::front()
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::front -- empty container");
+ #endif
+
+ return static_cast<node_type*>(mNode.mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reference
+ list<T, Allocator>::front() const
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::front -- empty container");
+ #endif
+
+ return static_cast<node_type*>(mNode.mpNext)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::back()
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::back -- empty container");
+ #endif
+
+ return static_cast<node_type*>(mNode.mpPrev)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::const_reference
+ list<T, Allocator>::back() const
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::back -- empty container");
+ #endif
+
+ return static_cast<node_type*>(mNode.mpPrev)->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool list<T, Allocator>::empty() const
+ {
+ return static_cast<node_type*>(mNode.mpNext) == &mNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::size_type
+ list<T, Allocator>::size() const
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ return mSize;
+ #else
+ #if EASTL_DEBUG
+ const ListNodeBase* p = (ListNodeBase*)mNode.mpNext;
+ size_type n = 0;
+ while(p != (const ListNodeBase*)&mNode)
+ {
+ ++n;
+ p = (const ListNodeBase*)p->mpNext;
+ }
+ return n;
+ #else
+ // The following optimizes to slightly better code than the code above.
+ return (size_type)eastl::distance(const_iterator((const ListNodeBase*)mNode.mpNext), const_iterator((const ListNodeBase*)&mNode));
+ #endif
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::this_type&
+ list<T, Allocator>::operator=(const this_type& x)
+ {
+ if(this != &x)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ iterator current((ListNodeBase*)mNode.mpNext);
+ const_iterator first((const ListNodeBase*)x.mNode.mpNext);
+ const_iterator last((const ListNodeBase*)&x.mNode);
+
+ while((current.mpNode != &mNode) && (first != last))
+ {
+ *current = *first;
+ ++first;
+ ++current;
+ }
+
+ if(first == last)
+ erase(current, (ListNodeBase*)&mNode);
+ else
+ insert((ListNodeBase*)&mNode, first, last);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::assign(size_type n, const value_type& value)
+ {
+ DoAssignValues(n, value);
+ }
+
+
+ // It turns out that the C++ std::list specifies a two argument
+ // version of assign that takes (int size, int value). These are not
+ // iterators, so we need to do a template compiler trick to do the right thing.
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void list<T, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::clear()
+ {
+ DoClear();
+ DoInit();
+ #if EASTL_LIST_SIZE_CACHE
+ mSize = 0;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ DoInit();
+ #if EASTL_LIST_SIZE_CACHE
+ mSize = 0;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::resize(size_type n, const value_type& value)
+ {
+ iterator current((ListNodeBase*)mNode.mpNext);
+ size_type i = 0;
+
+ while((current.mpNode != &mNode) && (i < n))
+ {
+ ++current;
+ ++i;
+ }
+ if(i == n)
+ erase(current, (ListNodeBase*)&mNode);
+ else
+ insert((ListNodeBase*)&mNode, n - i, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::resize(size_type n)
+ {
+ resize(n, value_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_front(const value_type& value)
+ {
+ DoInsertValue((ListNodeBase*)mNode.mpNext, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::push_front()
+ {
+ node_type* const pNode = DoCreateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)mNode.mpNext);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return static_cast<node_type*>(mNode.mpNext)->mValue; // Same as return front();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* list<T, Allocator>::push_front_uninitialized()
+ {
+ node_type* const pNode = DoAllocateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)mNode.mpNext);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return &pNode->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::pop_front()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::pop_front -- empty container");
+ #endif
+
+ DoErase((ListNodeBase*)mNode.mpNext);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::push_back(const value_type& value)
+ {
+ DoInsertValue((ListNodeBase*)&mNode, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reference
+ list<T, Allocator>::push_back()
+ {
+ node_type* const pNode = DoCreateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)&mNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return static_cast<node_type*>(mNode.mpPrev)->mValue; // Same as return back();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* list<T, Allocator>::push_back_uninitialized()
+ {
+ node_type* const pNode = DoAllocateNode();
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)&mNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return &pNode->mValue;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(static_cast<node_type*>(mNode.mpNext) == &mNode))
+ EASTL_FAIL_MSG("list::pop_back -- empty container");
+ #endif
+
+ DoErase((ListNodeBase*)mNode.mpPrev);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(iterator position)
+ {
+ node_type* const pNode = DoCreateNode(value_type());
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return (ListNodeBase*)pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::insert(iterator position, const value_type& value)
+ {
+ node_type* const pNode = DoCreateNode(value);
+ ((ListNodeBase*)pNode)->insert((ListNodeBase*)position.mpNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ return (ListNodeBase*)pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::insert(iterator position, size_type n, const value_type& value)
+ {
+ // To do: Get rid of DoInsertValues and put its implementation directly here.
+ DoInsertValues((ListNodeBase*)position.mpNode, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void list<T, Allocator>::insert(iterator position, InputIterator first, InputIterator last)
+ {
+ DoInsert((ListNodeBase*)position.mpNode, first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::iterator
+ list<T, Allocator>::erase(iterator position)
+ {
+ ++position;
+ DoErase((ListNodeBase*)position.mpNode->mpPrev);
+ return position;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::iterator
+ list<T, Allocator>::erase(iterator first, iterator last)
+ {
+ while(first != last)
+ first = erase(first);
+ return last;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename list<T, Allocator>::reverse_iterator
+ list<T, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::remove(const value_type& value)
+ {
+ iterator current((ListNodeBase*)mNode.mpNext);
+
+ while(current.mpNode != &mNode)
+ {
+ if(EASTL_LIKELY(!(*current == value)))
+ ++current; // We have duplicate '++current' statements here and below, but the logic here forces this.
+ else
+ {
+ ++current;
+ DoErase((ListNodeBase*)current.mpNode->mpPrev);
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Predicate>
+ inline void list<T, Allocator>::remove_if(Predicate predicate)
+ {
+ for(iterator first((ListNodeBase*)mNode.mpNext), last((ListNodeBase*)&mNode); first != last; )
+ {
+ iterator temp(first);
+ ++temp;
+ if(predicate(first.mpNode->mValue))
+ DoErase((ListNodeBase*)first.mpNode);
+ first = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::reverse()
+ {
+ ((ListNodeBase&)mNode).reverse();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(iterator position, this_type& x)
+ {
+ // Splicing operations cannot succeed if the two containers use unequal allocators.
+ // This issue is not addressed in the C++ 1998 standard but is discussed in the
+ // LWG defect reports, such as #431. There is no simple solution to this problem.
+ // One option is to throw an exception. For now our answer is simply: don't do this.
+ // EASTL_ASSERT(mAllocator == x.mAllocator); // Disabled because our member sort function uses splice but with allocators that may be unequal. There isn't a simple workaround aside from disabling this assert.
+
+ // Disabled until the performance hit of this code is deemed worthwhile and until we are sure we want to disallow unequal allocators.
+ //#if EASTL_EXCEPTIONS_ENABLED
+ // if(EASTL_UNLIKELY(!(mAllocator == x.mAllocator)))
+ // throw std::runtime_error("list::splice -- unequal allocators");
+ //#endif
+
+ #if EASTL_LIST_SIZE_CACHE
+ if(x.mSize)
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.mNode.mpNext, (ListNodeBase*)&x.mNode);
+ mSize += x.mSize;
+ x.mSize = 0;
+ }
+ #else
+ if(!x.empty())
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)x.mNode.mpNext, (ListNodeBase*)&x.mNode);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(iterator position, list& x, iterator i)
+ {
+ (void)x; // Avoid potential unused variable warnings.
+
+ // See notes in the other splice function regarding this assertion.
+ // EASTL_ASSERT(mAllocator == x.mAllocator); // Disabled because our member sort function uses splice but with allocators that may be unequal. There isn't a simple workaround aside from disabling this assert.
+
+ // Disabled until the performance hit of this code is deemed worthwhile and until we are sure we want to disallow unequal allocators.
+ //#if EASTL_EXCEPTIONS_ENABLED
+ // if(EASTL_UNLIKELY(!(mAllocator == x.mAllocator)))
+ // throw std::runtime_error("list::splice -- unequal allocators");
+ //#endif
+
+ iterator i2(i);
+ ++i2;
+ if((position != i) && (position != i2))
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)i.mpNode, (ListNodeBase*)i2.mpNode);
+
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ --x.mSize;
+ #endif
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::splice(iterator position, this_type& x, iterator first, iterator last)
+ {
+ (void)x; // Avoid potential unused variable warnings.
+
+ // See notes in the other splice function regarding this assertion.
+ // EASTL_ASSERT(mAllocator == x.mAllocator); // Disabled because our member sort function uses splice but with allocators that may be unequal. There isn't a simple workaround aside from disabling this assert.
+
+ // Disabled until the performance hit of this code is deemed worthwhile and until we are sure we want to disallow unequal allocators.
+ //#if EASTL_EXCEPTIONS_ENABLED
+ // if(EASTL_UNLIKELY(!(mAllocator == x.mAllocator)))
+ // throw std::runtime_error("list::splice -- unequal allocators");
+ //#endif
+
+ #if EASTL_LIST_SIZE_CACHE
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ if(n)
+ {
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode);
+ mSize += n;
+ x.mSize -= n;
+ }
+ #else
+ if(first != last)
+ ((ListNodeBase*)position.mpNode)->splice((ListNodeBase*)first.mpNode, (ListNodeBase*)last.mpNode);
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::swap(this_type& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // We leave mAllocator as-is.
+ ListNodeBase::swap((ListNodeBase&)mNode, (ListNodeBase&)x.mNode); // We need to implement a special swap because we can't do a shallow swap.
+
+ #if EASTL_LIST_SIZE_CACHE
+ eastl::swap(mSize, x.mSize);
+ #endif
+ }
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+
+ // Alternative implementation:
+ //const iterator pos((ListNodeBase*)mNode.mpNext);
+ //splice(pos, x);
+ //x.splice(x.begin(), *this, pos, iterator((ListNodeBase*)&mNode));
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::merge(this_type& x)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(*firstX < *first)
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Compare>
+ void list<T, Allocator>::merge(this_type& x, Compare compare)
+ {
+ if(this != &x)
+ {
+ iterator first(begin());
+ iterator firstX(x.begin());
+ const iterator last(end());
+ const iterator lastX(x.end());
+
+ while((first != last) && (firstX != lastX))
+ {
+ if(compare(*firstX, *first))
+ {
+ iterator next(firstX);
+
+ splice(first, x, firstX, ++next);
+ firstX = next;
+ }
+ else
+ ++first;
+ }
+
+ if(firstX != lastX)
+ splice(last, x, firstX, lastX);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::unique()
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(*first == *next)
+ DoErase((ListNodeBase*)next.mpNode);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename BinaryPredicate>
+ void list<T, Allocator>::unique(BinaryPredicate predicate)
+ {
+ iterator first(begin());
+ const iterator last(end());
+
+ if(first != last)
+ {
+ iterator next(first);
+
+ while(++next != last)
+ {
+ if(predicate(*first, *next))
+ DoErase((ListNodeBase*)next.mpNode);
+ else
+ first = next;
+ next = first;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::sort()
+ {
+ // We implement the algorithm employed by Chris Caulfield whereby we use recursive
+ // function calls to sort the list. The sorting of a very large list may fail due to stack overflow
+ // if the stack is exhausted. The limit depends on the platform and the avaialable stack space.
+
+ // Easier-to-understand version of the 'if' statement:
+ // iterator i(begin());
+ // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)...
+
+ // Faster, more inlinable version of the 'if' statement:
+ if((static_cast<node_type*>(mNode.mpNext) != &mNode) &&
+ (static_cast<node_type*>(mNode.mpNext) != static_cast<node_type*>(mNode.mpPrev)))
+ {
+ // We may have a stack space problem here if sizeof(this_type) is large (usually due to
+ // usage of a fixed_list). The only current resolution is to find an alternative way of
+ // doing things. I (Paul Pedriana) believe that the best long-term solution to this problem
+ // is to revise this sort function to not use this_type but instead use a ListNodeBase
+ // which involves no allocators and sort at that level, entirely with node pointers.
+
+ // Split the array into 2 roughly equal halves.
+ this_type leftList(get_allocator()); // This should cause no memory allocation.
+ this_type rightList(get_allocator());
+
+ // We find an iterator which is in the middle of the list. The fastest way to do
+ // this is to iterate from the base node both forwards and backwards with two
+ // iterators and stop when they meet each other. Recall that our size() function
+ // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled.
+ #if EASTL_LIST_SIZE_CACHE
+ iterator mid(begin());
+ eastl::advance(mid, size() / 2);
+ #else
+ iterator mid(begin()), tail(end());
+
+ while((mid != tail) && (++mid != tail))
+ --tail;
+ #endif
+
+ // Move the left half of this into leftList and the right half into rightList.
+ leftList.splice(leftList.begin(), *this, begin(), mid);
+ rightList.splice(rightList.begin(), *this);
+
+ // Sort the sub-lists.
+ leftList.sort();
+ rightList.sort();
+
+ // Merge the two halves into this list.
+ splice(begin(), leftList);
+ merge(rightList);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template<typename Compare>
+ void list<T, Allocator>::sort(Compare compare)
+ {
+ // We implement the algorithm employed by Chris Caulfield whereby we use recursive
+ // function calls to sort the list. The sorting of a very large list may fail due to stack overflow
+ // if the stack is exhausted. The limit depends on the platform and the avaialble stack space.
+
+ // Easier-to-understand version of the 'if' statement:
+ // iterator i(begin());
+ // if((i != end()) && (++i != end())) // If the size is >= 2 (without calling the more expensive size() function)...
+
+ // Faster, more inlinable version of the 'if' statement:
+ if((static_cast<node_type*>(mNode.mpNext) != &mNode) &&
+ (static_cast<node_type*>(mNode.mpNext) != static_cast<node_type*>(mNode.mpPrev)))
+ {
+ // We may have a stack space problem here if sizeof(this_type) is large (usually due to
+ // usage of a fixed_list). The only current resolution is to find an alternative way of
+ // doing things. I (Paul Pedriana) believe that the best long-term solution to this problem
+ // is to revise this sort function to not use this_type but instead use a ListNodeBase
+ // which involves no allocators and sort at that level, entirely with node pointers.
+
+ // Split the array into 2 roughly equal halves.
+ this_type leftList(get_allocator()); // This should cause no memory allocation.
+ this_type rightList(get_allocator());
+
+ // We find an iterator which is in the middle of the list. The fastest way to do
+ // this is to iterate from the base node both forwards and backwards with two
+ // iterators and stop when they meet each other. Recall that our size() function
+ // is not O(1) but is instead O(n), at least when EASTL_LIST_SIZE_CACHE is disabled.
+ #if EASTL_LIST_SIZE_CACHE
+ iterator mid(begin());
+ eastl::advance(mid, size() / 2);
+ #else
+ iterator mid(begin()), tail(end());
+
+ while((mid != tail) && (++mid != tail))
+ --tail;
+ #endif
+
+ // Move the left half of this into leftList and the right half into rightList.
+ leftList.splice(leftList.begin(), *this, begin(), mid);
+ rightList.splice(rightList.begin(), *this);
+
+ // Sort the sub-lists.
+ leftList.sort(compare);
+ rightList.sort(compare);
+
+ // Merge the two halves into this list.
+ splice(begin(), leftList);
+ merge(rightList, compare);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::node_type*
+ list<T, Allocator>::DoCreateNode(const value_type& value)
+ {
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new(&pNode->mValue) value_type(value);
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new(&pNode->mValue) value_type(value);
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename list<T, Allocator>::node_type*
+ list<T, Allocator>::DoCreateNode()
+ {
+ node_type* const pNode = DoAllocateNode();
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ ::new(&pNode->mValue) value_type();
+ }
+ catch(...)
+ {
+ DoFreeNode(pNode);
+ throw;
+ }
+ #else
+ ::new(&pNode->mValue) value_type;
+ #endif
+
+ return pNode;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void list<T, Allocator>::DoAssign(Integer n, Integer value, true_type)
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ void list<T, Allocator>::DoAssign(InputIterator first, InputIterator last, false_type)
+ {
+ node_type* pNode = static_cast<node_type*>(mNode.mpNext);
+
+ for(; (pNode != &mNode) && (first != last); ++first)
+ {
+ pNode->mValue = *first;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(first == last)
+ erase(iterator((ListNodeBase*)pNode), (ListNodeBase*)&mNode);
+ else
+ DoInsert((ListNodeBase*)&mNode, first, last, false_type());
+ }
+
+
+ template <typename T, typename Allocator>
+ void list<T, Allocator>::DoAssignValues(size_type n, const value_type& value)
+ {
+ node_type* pNode = static_cast<node_type*>(mNode.mpNext);
+
+ for(; (pNode != &mNode) && (n > 0); --n)
+ {
+ pNode->mValue = value;
+ pNode = static_cast<node_type*>(pNode->mpNext);
+ }
+
+ if(n)
+ DoInsertValues((ListNodeBase*)&mNode, n, value);
+ else
+ erase(iterator((ListNodeBase*)pNode), (ListNodeBase*)&mNode);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void list<T, Allocator>::DoInsert(ListNodeBase* pNode, Integer n, Integer value, true_type)
+ {
+ DoInsertValues(pNode, static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void list<T, Allocator>::DoInsert(ListNodeBase* pNode, InputIterator first, InputIterator last, false_type)
+ {
+ for(; first != last; ++first)
+ DoInsertValue(pNode, *first);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoInsertValues(ListNodeBase* pNode, size_type n, const value_type& value)
+ {
+ for(; n > 0; --n)
+ DoInsertValue(pNode, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoInsertValue(ListNodeBase* pNode, const value_type& value)
+ {
+ node_type* const pNodeNew = DoCreateNode(value);
+ ((ListNodeBase*)pNodeNew)->insert((ListNodeBase*)pNode);
+ #if EASTL_LIST_SIZE_CACHE
+ ++mSize;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void list<T, Allocator>::DoErase(ListNodeBase* pNode)
+ {
+ pNode->remove();
+ ((node_type*)pNode)->~node_type();
+ DoFreeNode(((node_type*)pNode));
+ #if EASTL_LIST_SIZE_CACHE
+ --mSize;
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool list<T, Allocator>::validate() const
+ {
+ #if EASTL_LIST_SIZE_CACHE
+ size_type n = 0;
+
+ for(const_iterator i(begin()), iEnd(end()); i != iEnd; ++i)
+ ++n;
+
+ if(n != mSize)
+ return false;
+ #endif
+
+ // To do: More validation.
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int list<T, Allocator>::validate_iterator(const_iterator i) const
+ {
+ // To do: Come up with a more efficient mechanism of doing this.
+
+ for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
+ {
+ if(temp == i)
+ return (isf_valid | isf_current | isf_can_dereference);
+ }
+
+ if(i == end())
+ return (isf_valid | isf_current);
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ bool operator==(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ typename list<T, Allocator>::const_iterator ia = a.begin();
+ typename list<T, Allocator>::const_iterator ib = b.begin();
+ typename list<T, Allocator>::const_iterator enda = a.end();
+
+ #if EASTL_LIST_SIZE_CACHE
+ if(a.size() == b.size())
+ {
+ while((ia != enda) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda);
+ }
+ return false;
+ #else
+ typename list<T, Allocator>::const_iterator endb = b.end();
+
+ while((ia != enda) && (ib != endb) && (*ia == *ib))
+ {
+ ++ia;
+ ++ib;
+ }
+ return (ia == enda) && (ib == endb);
+ #endif
+ }
+
+ template <typename T, typename Allocator>
+ bool operator<(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+ template <typename T, typename Allocator>
+ bool operator!=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+ template <typename T, typename Allocator>
+ bool operator>(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+ template <typename T, typename Allocator>
+ bool operator<=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+ template <typename T, typename Allocator>
+ bool operator>=(const list<T, Allocator>& a, const list<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+ template <typename T, typename Allocator>
+ void swap(list<T, Allocator>& a, list<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/map.h b/UnknownVersion/include/EASTL/map.h
new file mode 100644
index 0000000..d94ca37
--- /dev/null
+++ b/UnknownVersion/include/EASTL/map.h
@@ -0,0 +1,520 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/map.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written by Paul Pedriana.
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_MAP_H
+#define EASTL_MAP_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/red_black_tree.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+
+
+namespace eastl
+{
+
+ /// EASTL_MAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MAP_DEFAULT_NAME
+ #define EASTL_MAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " map" // Unless the user overrides something, this is "EASTL map".
+ #endif
+
+
+ /// EASTL_MULTIMAP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_NAME
+ #define EASTL_MULTIMAP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multimap" // Unless the user overrides something, this is "EASTL multimap".
+ #endif
+
+
+ /// EASTL_MAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MAP_DEFAULT_ALLOCATOR
+ #define EASTL_MAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MAP_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTIMAP_DEFAULT_ALLOCATOR
+ #define EASTL_MULTIMAP_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTIMAP_DEFAULT_NAME)
+ #endif
+
+
+
+ /// map
+ ///
+ /// Implements a canonical map.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a map container, your pool
+ /// needs to contain items of type map::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef map<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class map
+ : public rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator, eastl::use_first<eastl::pair<const Key, T> >, true, true>
+ {
+ public:
+ typedef rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >, true, true> base_type;
+ typedef map<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ using base_type::erase;
+ #endif
+
+ public:
+ map(const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const Compare& compare, const allocator_type& allocator = EASTL_MAP_DEFAULT_ALLOCATOR);
+ map(const this_type& x);
+
+ template <typename Iterator>
+ map(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const Key& key);
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(iterator position, const value_type& value) { return base_type::insert(position, value); }
+ iterator erase(iterator position) { return base_type::erase(position); }
+ iterator erase(iterator first, iterator last) { return base_type::erase(first, last); }
+ #endif
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ T& operator[](const Key& key); // Of map, multimap, set, and multimap, only map has operator[].
+
+ }; // map
+
+
+
+
+
+
+ /// multimap
+ ///
+ /// Implements a canonical multimap.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multimap container, your pool
+ /// needs to contain items of type multimap::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multimap<Widget, int, less<Widget>, MemoryPool> WidgetMap; // Delare your WidgetMap type.
+ /// MemoryPool myPool(sizeof(WidgetMap::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetMap myMap(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename T, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multimap
+ : public rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator, eastl::use_first<eastl::pair<const Key, T> >, true, false>
+ {
+ public:
+ typedef rbtree<Key, eastl::pair<const Key, T>, Compare, Allocator,
+ eastl::use_first<eastl::pair<const Key, T> >, true, false> base_type;
+ typedef multimap<Key, T, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::key_type key_type;
+ typedef T mapped_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::node_type node_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ typedef typename base_type::insert_return_type insert_return_type;
+ typedef typename base_type::extract_key extract_key;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ #if !defined(__GNUC__) || (__GNUC__ >= 3) // GCC 2.x has a bug which we work around.
+ using base_type::insert;
+ using base_type::erase;
+ #endif
+
+ public:
+ multimap(const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const Compare& compare, const allocator_type& allocator = EASTL_MULTIMAP_DEFAULT_ALLOCATOR);
+ multimap(const this_type& x);
+
+ template <typename Iterator>
+ multimap(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To consider: Make a second version of this function without a default arg.
+
+ public:
+ /// This is an extension to the C++ standard. We insert a default-constructed
+ /// element with the given key. The reason for this is that we can avoid the
+ /// potentially expensive operation of creating and/or copying a mapped_type
+ /// object on the stack.
+ insert_return_type insert(const Key& key);
+
+ #if defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
+ template <typename InputIterator>
+ void insert(InputIterator first, InputIterator last) { return base_type::insert(first, last); }
+ insert_return_type insert(const value_type& value) { return base_type::insert(value); }
+ iterator insert(iterator position, const value_type& value) { return base_type::insert(position, value); }
+ iterator erase(iterator position) { return base_type::erase(position); }
+ iterator erase(iterator first, iterator last) { return base_type::erase(first, last); }
+ #endif
+
+ size_type erase(const Key& key);
+ size_type count(const Key& key) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& key) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const Key& key);
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const Key& key) const;
+
+ }; // multimap
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // map
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const allocator_type& allocator)
+ : base_type(allocator) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline map<Key, T, Compare, Allocator>::map(const this_type& x)
+ : base_type(x) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline map<Key, T, Compare, Allocator>::map(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MAP_DEFAULT_ALLOCATOR) { }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::insert_return_type
+ map<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(key, true_type());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const iterator it(find(key));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename map<Key, T, Compare, Allocator>::size_type
+ map<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const const_iterator it(find(key));
+ return (it != end()) ? 1 : 0;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::iterator,
+ typename map<Key, T, Compare, Allocator>::iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || mCompare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename map<Key, T, Compare, Allocator>::const_iterator,
+ typename map<Key, T, Compare, Allocator>::const_iterator>
+ map<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(key));
+
+ if((itLower == end()) || mCompare(key, itLower.mpNode->mValue.first)) // If at the end or if (key is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline T& map<Key, T, Compare, Allocator>::operator[](const Key& key)
+ {
+ iterator itLower(lower_bound(key)); // itLower->first is >= key.
+
+ if((itLower == end()) || mCompare(key, (*itLower).first))
+ {
+ itLower = base_type::insert(itLower, value_type(key, T()));
+
+ // To do: Convert this to use the more efficient:
+ // itLower = DoInsertKey(itLower, key, true_type());
+ // when we gain confidence in that function.
+ }
+
+ return (*itLower).second;
+
+ // Reference implementation of this function, which may not be as fast:
+ //iterator it(base_type::insert(eastl::pair<iterator, iterator>(key, T())).first);
+ //return it->second;
+ }
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // multimap
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multimap<Key, T, Compare, Allocator>::multimap(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTIMAP_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::insert_return_type
+ multimap<Key, T, Compare, Allocator>::insert(const Key& key)
+ {
+ return base_type::DoInsertKey(key, false_type());
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::erase(const Key& key)
+ {
+ const eastl::pair<iterator, iterator> range(equal_range(key));
+ const size_type n = (size_type)eastl::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline typename multimap<Key, T, Compare, Allocator>::size_type
+ multimap<Key, T, Compare, Allocator>::count(const Key& key) const
+ {
+ const eastl::pair<const_iterator, const_iterator> range(equal_range(key));
+ return (size_type)eastl::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return eastl::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return eastl::pair<iterator, iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range(const Key& key) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return eastl::pair<const_iterator, const_iterator>(lower_bound(key), upper_bound(key));
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::iterator,
+ typename multimap<Key, T, Compare, Allocator>::iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(key));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename T, typename Compare, typename Allocator>
+ inline eastl::pair<typename multimap<Key, T, Compare, Allocator>::const_iterator,
+ typename multimap<Key, T, Compare, Allocator>::const_iterator>
+ multimap<Key, T, Compare, Allocator>::equal_range_small(const Key& key) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(key));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(key, itUpper.mpNode->mValue.first))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/memory.h b/UnknownVersion/include/EASTL/memory.h
new file mode 100644
index 0000000..7f1d96c
--- /dev/null
+++ b/UnknownVersion/include/EASTL/memory.h
@@ -0,0 +1,698 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/memory.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+// The uninitialized_move function was written by Ryan Ingram.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements the following functions from the C++ standard that
+// are found in the <memory> header:
+//
+// Temporary memory:
+// get_temporary_buffer
+// return_temporary_buffer
+//
+// Uninitialized operations:
+// These are the same as the copy, fill, and fill_n algorithms, except that
+// they *construct* the destination with the source values rather than assign
+// the destination with the source values.
+//
+// uninitialized_copy
+// uninitialized_fill
+// uninitialized_fill_n
+// uninitialized_move - Extention to standard functionality.
+// uninitialized_copy_ptr - Extention to standard functionality.
+// uninitialized_fill_ptr - Extention to standard functionality.
+// uninitialized_fill_n_ptr - Extention to standard functionality.
+// uninitialized_copy_fill - Extention to standard functionality.
+// uninitialized_fill_copy - Extention to standard functionality.
+// uninitialized_copy_copy - Extention to standard functionality.
+//
+// In-place destructor helpers:
+// destruct(T*)
+// destruct(first, last)
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_MEMORY_H
+#define EASTL_MEMORY_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/generic_iterator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/allocator.h>
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stdlib.h>
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_TEMP_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_TEMP_DEFAULT_NAME
+ #define EASTL_TEMP_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " temp" // Unless the user overrides something, this is "EASTL temp".
+ #endif
+
+
+ /// get_temporary_buffer
+ ///
+ /// From the C++ standard, section 20.4.3:
+ /// 1 Effects: Obtains a pointer to storage sufficient to store up to n adjacent T objects.
+ /// 2 Returns: A pair containing the buffer's address and capacity (in the units of sizeof(T)),
+ /// or a pair of 0 values if no storage can be obtained.
+ ///
+ /// Note: The return value is space to hold T elements, but no T elements are constructed.
+ ///
+ /// Our implementation here differs slightly in that we have alignment, alignmentOffset, and pName arguments.
+ /// Note that you can use the EASTL_NAME_VAL macro to make names go away in release builds.
+ ///
+ /// Example usage:
+ /// pair<int*, ptrdiff_t> pr = get_temporary_buffer<int>(100, 0, 0, EASTL_NAME_VAL("Temp int array"));
+ /// memset(pr.first, 0, 100 * sizeof(int));
+ /// return_temporary_buffer(pr.first);
+ ///
+ template <typename T>
+ pair<T*, ptrdiff_t> get_temporary_buffer(ptrdiff_t n, size_t alignment = 0, size_t alignmentOffset = 0, const char* pName = EASTL_TEMP_DEFAULT_NAME)
+ {
+ EASTLAllocatorType allocator(*EASTLAllocatorDefault(), pName);
+ return eastl::pair<T*, ptrdiff_t>(static_cast<T*>(EASTLAllocAligned(allocator, n * sizeof(T), alignment, alignmentOffset)), n);
+ }
+
+
+ /// return_temporary_buffer
+ ///
+ /// From the C++ standard, section 20.4.3:
+ /// 3 Effects: Deallocates the buffer to which p points.
+ /// 4 Requires: The buffer shall have been previously allocated by get_temporary_buffer.
+ ///
+ /// Note: This function merely frees space and does not destruct any T elements.
+ ///
+ /// Example usage:
+ /// pair<int*, ptrdiff_t> pr = get_temporary_buffer<int>(300);
+ /// memset(pr.first, 0, 300 * sizeof(int));
+ /// return_temporary_buffer(pr.first, pr.second);
+ ///
+ template <typename T>
+ void return_temporary_buffer(T* p, ptrdiff_t n = 0)
+ {
+ EASTLAllocatorType& allocator(*EASTLAllocatorDefault());
+ EASTLFree(allocator, p, n * sizeof(T));
+ }
+
+
+
+ /// uninitialized_move
+ ///
+ /// uninitialized_move takes a constructed sequence of objects and an
+ /// uninitialized destination buffer. In the case of any exception thrown
+ /// while moving the objects, any newly constructed objects are guaranteed
+ /// to be destructed and the input left fully constructed.
+ ///
+ /// In the case where you need to do multiple moves atomically, split the
+ /// calls into uninitialized_move_start/abort/commit.
+ ///
+ /// uninitialized_move_start can possibly throw an exception. If it does,
+ /// you don't need to do anything. However, if it returns without throwing
+ /// an exception you need to guarantee that either uninitialize_move_abort
+ /// or uninitialized_move_commit is called.
+ ///
+ /// Both uninitialize_move_abort and uninitialize_move_commit are
+ /// guaranteed to not throw C++ exceptions.
+
+ template <bool hasTrivialMove, typename iteratorTag>
+ struct uninitialized_move_impl
+ {
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ ForwardIteratorDest origDest(dest);
+ try
+ {
+ for(; first != last; ++first, ++dest)
+ ::new(&*dest) value_type(*first);
+ }
+ catch(...)
+ {
+ for(; origDest < dest; ++origDest)
+ origDest->~value_type();
+ throw;
+ }
+ #else
+ for(; first != last; ++first, ++dest)
+ ::new(&*dest) value_type(*first);
+ #endif
+
+ return dest;
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw()
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ for(; first != last; ++first, ++dest)
+ first->~value_type();
+
+ return dest;
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ static ForwardIteratorDest do_move_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest) //throw()
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ for(; first != last; ++first, ++dest)
+ dest->~value_type();
+ return dest;
+ }
+ };
+
+ template <>
+ struct uninitialized_move_impl<true, EASTL_ITC_NS::random_access_iterator_tag>
+ {
+ template <typename T>
+ static T* do_move_start(T* first, T* last, T* dest)
+ {
+ return (T*)memcpy(dest, first, (size_t)((uintptr_t)last - (uintptr_t)first)) + (last - first);
+ }
+
+ template <typename T>
+ static T* do_move_commit(T* first, T* last, T* dest)
+ {
+ return dest + (last - first);
+ }
+
+ template <typename T>
+ static T* do_move_abort(T* first, T* last, T* dest)
+ {
+ return dest + (last - first);
+ }
+ };
+
+
+ /// uninitialized_move_start, uninitialized_move_commit, uninitialized_move_abort
+ ///
+ /// After calling uninitialized_move_start, if it doesn't throw an exception,
+ /// both the source and destination iterators point to undefined data. If it
+ /// does throw an exception, the destination remains uninitialized and the source
+ /// is as it was before.
+ ///
+ /// In order to make the iterators valid again you need to call either uninitialized_move_abort
+ /// or uninitialized_move_commit. The abort call makes the original source
+ /// iterator valid again, and commit makes the destination valid. Both abort
+ /// and commit are guaranteed to not throw C++ exceptions.
+ ///
+ /// Example usage:
+ /// iterator dest2 = uninitialized_move_start(first, last, dest);
+ /// try {
+ /// // some code here that might throw an exception
+ /// }
+ /// catch(...)
+ /// {
+ /// uninitialized_move_abort(first, last, dest);
+ /// throw;
+ /// }
+ /// uninitialized_move_commit(first, last, dest);
+ ///
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_move_start(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return eastl::uninitialized_move_impl<bHasTrivialMove, IC>::do_move_start(first, last, dest);
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_move_commit(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return eastl::uninitialized_move_impl<bHasTrivialMove, IC>::do_move_commit(first, last, dest);
+ }
+
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_move_abort(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::iterator_category IC;
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type_input;
+ typedef typename eastl::iterator_traits<ForwardIteratorDest>::value_type value_type_output;
+
+ const bool bHasTrivialMove = type_and<has_trivial_relocate<value_type_input>::value,
+ is_pointer<ForwardIterator>::value,
+ is_pointer<ForwardIteratorDest>::value,
+ is_same<value_type_input, value_type_output>::value>::value;
+
+ return eastl::uninitialized_move_impl<bHasTrivialMove, IC>::do_move_abort(first, last, dest);
+ }
+
+ /// uninitialized_move
+ ///
+ /// uninitialized_move takes a constructed sequence of objects and an
+ /// uninitialized destination buffer. In the case of any exception thrown
+ /// while moving the objects, any newly constructed objects are guaranteed
+ /// to be destructed and the input left as it was before.
+ ///
+ template <typename ForwardIterator, typename ForwardIteratorDest>
+ inline ForwardIteratorDest uninitialized_move(ForwardIterator first, ForwardIterator last, ForwardIteratorDest dest)
+ {
+ ForwardIteratorDest result = uninitialized_move_start(first, last, dest);
+ uninitialized_move_commit(first, last, dest);
+
+ return result;
+ }
+
+
+
+
+
+ // uninitialized_copy
+ //
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, true_type)
+ {
+ return eastl::copy(first, last, dest); // The copy() in turn will use memcpy for POD types.
+ }
+
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy_impl(InputIterator first, InputIterator last, ForwardIterator dest, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(dest);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ for(; first != last; ++first, ++currentDest)
+ ::new(&*currentDest) value_type(*first);
+ }
+ catch(...)
+ {
+ for(; dest < currentDest; ++dest)
+ dest->~value_type();
+ throw;
+ }
+ #else
+ for(; first != last; ++first, ++currentDest)
+ ::new(&*currentDest) value_type(*first);
+ #endif
+
+ return currentDest;
+ }
+
+ /// uninitialized_copy
+ ///
+ /// Copies a source range to a destination, copy-constructing the destination with
+ /// the source values (and not *assigning* the destination with the source values).
+ /// Returns the end of the destination range (i.e. dest + (last - first)).
+ ///
+ /// Declaration:
+ /// template <typename InputIterator, typename ForwardIterator>
+ /// ForwardIterator uninitialized_copy(InputIterator sourceFirst, InputIterator sourceLast, ForwardIterator destination);
+ ///
+ /// Example usage:
+ /// SomeClass* pArray = malloc(10 * sizeof(SomeClass));
+ /// uninitialized_copy(pSourceDataBegin, pSourceDataBegin + 10, pArray);
+ ///
+ template <typename InputIterator, typename ForwardIterator>
+ inline ForwardIterator uninitialized_copy(InputIterator first, InputIterator last, ForwardIterator result)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ // Note: has_trivial_assign isn't actually the right thing to use here, as it
+ // refers to assignment as opposed to construction. Bug Paul Pedriana if this
+ // is becoming a problem. In the meantime, this code assumes that if has_trivial_assign
+ // is present for a type, then has_trivial_copy is as well.
+ return uninitialized_copy_impl(first, last, result, has_trivial_assign<value_type>());
+ }
+
+ /// uninitialized_copy_ptr
+ ///
+ /// This is a specialization of uninitialized_copy for iterators that are pointers.
+ /// It exists so that we can declare a value_type for the iterator, which you
+ /// can't do with a pointer by itself.
+ ///
+ template <typename First, typename Last, typename Result>
+ inline Result uninitialized_copy_ptr(First first, Last last, Result result)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<Result, void> >::value_type value_type;
+ const generic_iterator<Result, void> i(uninitialized_copy_impl(generic_iterator<First, void>(first),
+ generic_iterator<Last, void>(last),
+ generic_iterator<Result, void>(result),
+ has_trivial_assign<value_type>()));
+ return i.base();
+ }
+
+
+
+
+ // uninitialized_fill
+ //
+ template <typename ForwardIterator, typename T>
+ inline void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, true_type)
+ {
+ eastl::fill(first, last, value);
+ }
+
+ template <typename ForwardIterator, typename T>
+ void uninitialized_fill_impl(ForwardIterator first, ForwardIterator last, const T& value, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ for(; currentDest != last; ++currentDest)
+ ::new(&*currentDest) value_type(value);
+ }
+ catch(...)
+ {
+ for(; first < currentDest; ++first)
+ first->~value_type();
+ throw;
+ }
+ #else
+ for(; currentDest != last; ++currentDest)
+ ::new(&*currentDest) value_type(value);
+ #endif
+ }
+
+ /// uninitialized_fill
+ ///
+ /// Copy-constructs the elements in the destination range with the given input value.
+ /// Returns void. It wouldn't be useful to return the end of the destination range,
+ /// as that is the same as the 'last' input parameter.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename T>
+ /// void uninitialized_fill(ForwardIterator destinationFirst, ForwardIterator destinationLast, const T& value);
+ ///
+ template <typename ForwardIterator, typename T>
+ inline void uninitialized_fill(ForwardIterator first, ForwardIterator last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ uninitialized_fill_impl(first, last, value, has_trivial_assign<value_type>());
+ }
+
+ /// uninitialized_fill_ptr
+ ///
+ /// This is a specialization of uninitialized_fill for iterators that are pointers.
+ /// It exists so that we can declare a value_type for the iterator, which you
+ /// can't do with a pointer by itself.
+ ///
+ template <typename T>
+ inline void uninitialized_fill_ptr(T* first, T* last, const T& value)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<T*, void> >::value_type value_type;
+ uninitialized_fill_impl(generic_iterator<T*, void>(first), generic_iterator<T*, void>(last), value, has_trivial_assign<value_type>());
+ }
+
+
+
+
+ // uninitialized_fill_n
+ //
+ template <typename ForwardIterator, typename Count, typename T>
+ inline void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, true_type)
+ {
+ eastl::fill_n(first, n, value);
+ }
+
+ template <typename ForwardIterator, typename Count, typename T>
+ void uninitialized_fill_n_impl(ForwardIterator first, Count n, const T& value, false_type)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ ForwardIterator currentDest(first);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ for(; n > 0; --n, ++currentDest)
+ ::new(&*currentDest) value_type(value);
+ }
+ catch(...)
+ {
+ for(; first < currentDest; ++first)
+ first->~value_type();
+ throw;
+ }
+ #else
+ for(; n > 0; --n, ++currentDest)
+ ::new(&*currentDest) value_type(value);
+ #endif
+ }
+
+ /// uninitialized_fill_n
+ ///
+ /// Copy-constructs the range of [first, first + n) with the given input value.
+ /// Returns void as per the C++ standard, though returning the end input iterator
+ /// value may be of use.
+ ///
+ /// Declaration:
+ /// template <typename ForwardIterator, typename Count, typename T>
+ /// void uninitialized_fill_n(ForwardIterator destination, Count n, const T& value);
+ ///
+ template <typename ForwardIterator, typename Count, typename T>
+ inline void uninitialized_fill_n(ForwardIterator first, Count n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ uninitialized_fill_n_impl(first, n, value, has_trivial_assign<value_type>());
+ }
+
+ /// uninitialized_fill_n_ptr
+ ///
+ /// This is a specialization of uninitialized_fill_n for iterators that are pointers.
+ /// It exists so that we can declare a value_type for the iterator, which you
+ /// can't do with a pointer by itself.
+ ///
+ template <typename T, typename Count>
+ inline void uninitialized_fill_n_ptr(T* first, Count n, const T& value)
+ {
+ typedef typename eastl::iterator_traits<generic_iterator<T*, void> >::value_type value_type;
+ uninitialized_fill_n_impl(generic_iterator<T*, void>(first), n, value, has_trivial_assign<value_type>());
+ }
+
+
+
+
+ /// uninitialized_copy_fill
+ ///
+ /// Copies [first1, last1) into [first2, first2 + (last1 - first1)) then
+ /// fills [first2 + (last1 - first1), last2) with value.
+ ///
+ template <typename InputIterator, typename ForwardIterator, typename T>
+ inline void uninitialized_copy_fill(InputIterator first1, InputIterator last1,
+ ForwardIterator first2, ForwardIterator last2, const T& value)
+ {
+ const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, first2));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ eastl::uninitialized_fill(mid, last2, value);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; first2 < mid; ++first2)
+ first2->~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+
+
+ /// uninitialized_fill_copy
+ ///
+ /// Fills [result, mid) with value then copies [first, last) into [mid, mid + (last - first)).
+ ///
+ template <typename ForwardIterator, typename T, typename InputIterator>
+ inline ForwardIterator
+ uninitialized_fill_copy(ForwardIterator result, ForwardIterator mid, const T& value, InputIterator first, InputIterator last)
+ {
+ eastl::uninitialized_fill(result, mid, value);
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ return eastl::uninitialized_copy(first, last, mid);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; result < mid; ++result)
+ result->~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+ /// uninitialized_copy_copy
+ ///
+ /// Copies [first1, last1) into [result, result + (last1 - first1)) then
+ /// copies [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)).
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename ForwardIterator>
+ inline ForwardIterator
+ uninitialized_copy_copy(InputIterator1 first1, InputIterator1 last1,
+ InputIterator2 first2, InputIterator2 last2,
+ ForwardIterator result)
+ {
+ const ForwardIterator mid(eastl::uninitialized_copy(first1, last1, result));
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ return eastl::uninitialized_copy(first2, last2, mid);
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ for(; result < mid; ++result)
+ result->~value_type();
+ throw;
+ }
+ #endif
+ }
+
+
+
+ /// destruct
+ ///
+ /// Calls the destructor of a given object.
+ ///
+ /// Note that we don't have a specialized version of this for objects
+ /// with trivial destructors, such as integers. This is because the
+ /// compiler can already see in our version here that the destructor
+ /// is a no-op.
+ ///
+ template <typename T>
+ inline void destruct(T* p)
+ {
+ p->~T();
+ }
+
+
+
+ // destruct(first, last)
+ //
+ template <typename ForwardIterator>
+ inline void destruct_impl(ForwardIterator /*first*/, ForwardIterator /*last*/, true_type) // true means the type has a trivial destructor.
+ {
+ // Empty. The type has a trivial destructor.
+ }
+
+ template <typename ForwardIterator>
+ inline void destruct_impl(ForwardIterator first, ForwardIterator last, false_type) // false means the type has a significant destructor.
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+
+ for(; first != last; ++first)
+ (*first).~value_type();
+ }
+
+ /// destruct
+ ///
+ /// Calls the destructor on a range of objects.
+ ///
+ /// We have a specialization for objects with trivial destructors, such as
+ /// PODs. In this specialization the destruction of the range is a no-op.
+ ///
+ template <typename ForwardIterator>
+ inline void destruct(ForwardIterator first, ForwardIterator last)
+ {
+ typedef typename eastl::iterator_traits<ForwardIterator>::value_type value_type;
+ destruct_impl(first, last, eastl::has_trivial_destructor<value_type>());
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/set.h b/UnknownVersion/include/EASTL/set.h
new file mode 100644
index 0000000..72ce2e2
--- /dev/null
+++ b/UnknownVersion/include/EASTL/set.h
@@ -0,0 +1,567 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/set.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written by Paul Pedriana.
+//////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_SET_H
+#define EASTL_SET_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/internal/red_black_tree.h>
+#include <EASTL/functional.h>
+#include <EASTL/utility.h>
+
+
+
+namespace eastl
+{
+
+ /// EASTL_SET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_SET_DEFAULT_NAME
+ #define EASTL_SET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " set" // Unless the user overrides something, this is "EASTL set".
+ #endif
+
+
+ /// EASTL_MULTISET_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_NAME
+ #define EASTL_MULTISET_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " multiset" // Unless the user overrides something, this is "EASTL multiset".
+ #endif
+
+
+ /// EASTL_SET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_SET_DEFAULT_ALLOCATOR
+ #define EASTL_SET_DEFAULT_ALLOCATOR allocator_type(EASTL_SET_DEFAULT_NAME)
+ #endif
+
+ /// EASTL_MULTISET_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_MULTISET_DEFAULT_ALLOCATOR
+ #define EASTL_MULTISET_DEFAULT_ALLOCATOR allocator_type(EASTL_MULTISET_DEFAULT_NAME)
+ #endif
+
+
+
+ /// set
+ ///
+ /// Implements a canonical set.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// Note that the 'bMutableIterators' template parameter to rbtree is set to false.
+ /// This means that set::iterator is const and the same as set::const_iterator.
+ /// This is by design and it follows the C++ standard defect report recommendation.
+ /// If the user wants to modify a container element, the user needs to either use
+ /// mutable data members or use const_cast on the iterator's data member. Both of
+ /// these solutions are recommended by the C++ standard defect report.
+ /// To consider: Expose the bMutableIterators template policy here at the set level
+ /// so the user can have non-const set iterators via a template parameter.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a set container, your pool
+ /// needs to contain items of type set::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef set<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class set
+ : public rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, true>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, true> base_type;
+ typedef set<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ public:
+ set(const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const Compare& compare, const allocator_type& allocator = EASTL_SET_DEFAULT_ALLOCATOR);
+ set(const this_type& x);
+
+ template <typename Iterator>
+ set(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ public:
+ size_type erase(const Key& k);
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ }; // set
+
+
+
+
+
+ /// multiset
+ ///
+ /// Implements a canonical multiset.
+ ///
+ /// The large majority of the implementation of this class is found in the rbtree
+ /// base class. We control the behaviour of rbtree via template parameters.
+ ///
+ /// See notes above in 'set' regarding multable iterators.
+ ///
+ /// Pool allocation
+ /// If you want to make a custom memory pool for a multiset container, your pool
+ /// needs to contain items of type multiset::node_type. So if you have a memory
+ /// pool that has a constructor that takes the size of pool items and the
+ /// count of pool items, you would do this (assuming that MemoryPool implements
+ /// the Allocator interface):
+ /// typedef multiset<Widget, less<Widget>, MemoryPool> WidgetSet; // Delare your WidgetSet type.
+ /// MemoryPool myPool(sizeof(WidgetSet::node_type), 100); // Make a pool of 100 Widget nodes.
+ /// WidgetSet mySet(&myPool); // Create a map that uses the pool.
+ ///
+ template <typename Key, typename Compare = eastl::less<Key>, typename Allocator = EASTLAllocatorType>
+ class multiset
+ : public rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, false>
+ {
+ public:
+ typedef rbtree<Key, Key, Compare, Allocator, eastl::use_self<Key>, false, false> base_type;
+ typedef multiset<Key, Compare, Allocator> this_type;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::value_type value_type;
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename base_type::allocator_type allocator_type;
+ // Other types are inherited from the base class.
+
+ using base_type::begin;
+ using base_type::end;
+ using base_type::find;
+ using base_type::lower_bound;
+ using base_type::upper_bound;
+ using base_type::mCompare;
+
+ public:
+ multiset(const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const Compare& compare, const allocator_type& allocator = EASTL_MULTISET_DEFAULT_ALLOCATOR);
+ multiset(const this_type& x);
+
+ template <typename Iterator>
+ multiset(Iterator itBegin, Iterator itEnd); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ public:
+ size_type erase(const Key& k);
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ size_type count(const Key& k) const;
+
+ eastl::pair<iterator, iterator> equal_range(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range(const Key& k) const;
+
+ /// equal_range_small
+ /// This is a special version of equal_range which is optimized for the
+ /// case of there being few or no duplicated keys in the tree.
+ eastl::pair<iterator, iterator> equal_range_small(const Key& k);
+ eastl::pair<const_iterator, const_iterator> equal_range_small(const Key& k) const;
+
+ }; // multiset
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // set
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline set<Key, Compare, Allocator>::set(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline set<Key, Compare, Allocator>::set(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_SET_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const iterator it(find(k));
+
+ if(it != end()) // If it exists...
+ {
+ base_type::erase(it);
+ return 1;
+ }
+ return 0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::iterator
+ set<Key, Compare, Allocator>::erase(iterator first, iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::size_type
+ set<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const const_iterator it(find(k));
+ return (it != end()) ? (size_type)1 : (size_type)0;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename set<Key, Compare, Allocator>::reverse_iterator
+ set<Key, Compare, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename set<Key, Compare, Allocator>::iterator,
+ typename set<Key, Compare, Allocator>::iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // The resulting range will either be empty or have one element,
+ // so instead of doing two tree searches (one for lower_bound and
+ // one for upper_bound), we do just lower_bound and see if the
+ // result is a range of size zero or one.
+ const iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<iterator, iterator>(itLower, itLower);
+
+ iterator itUpper(itLower);
+ return eastl::pair<iterator, iterator>(itLower, ++itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename set<Key, Compare, Allocator>::const_iterator,
+ typename set<Key, Compare, Allocator>::const_iterator>
+ set<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See equal_range above for comments.
+ const const_iterator itLower(lower_bound(k));
+
+ if((itLower == end()) || mCompare(k, *itLower)) // If at the end or if (k is < itLower)...
+ return eastl::pair<const_iterator, const_iterator>(itLower, itLower);
+
+ const_iterator itUpper(itLower);
+ return eastl::pair<const_iterator, const_iterator>(itLower, ++itUpper);
+ }
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // multiset
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const Compare& compare, const allocator_type& allocator)
+ : base_type(compare, allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline multiset<Key, Compare, Allocator>::multiset(const this_type& x)
+ : base_type(x)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ template <typename Iterator>
+ inline multiset<Key, Compare, Allocator>::multiset(Iterator itBegin, Iterator itEnd)
+ : base_type(itBegin, itEnd, Compare(), EASTL_MULTISET_DEFAULT_ALLOCATOR)
+ {
+ // Empty
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::erase(const Key& k)
+ {
+ const eastl::pair<iterator, iterator> range(equal_range(k));
+ const size_type n = (size_type)eastl::distance(range.first, range.second);
+ base_type::erase(range.first, range.second);
+ return n;
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(iterator position)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(position);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::iterator
+ multiset<Key, Compare, Allocator>::erase(iterator first, iterator last)
+ {
+ // We need to provide this version because we override another version
+ // and C++ hiding rules would make the base version of this hidden.
+ return base_type::erase(first, last);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::size_type
+ multiset<Key, Compare, Allocator>::count(const Key& k) const
+ {
+ const eastl::pair<const_iterator, const_iterator> range(equal_range(k));
+ return (size_type)eastl::distance(range.first, range.second);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline typename multiset<Key, Compare, Allocator>::reverse_iterator
+ multiset<Key, Compare, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k)
+ {
+ // There are multiple ways to implement equal_range. The implementation mentioned
+ // in the C++ standard and which is used by most (all?) commercial STL implementations
+ // is this:
+ // return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ //
+ // This does two tree searches -- one for the lower bound and one for the
+ // upper bound. This works well for the case whereby you have a large container
+ // and there are lots of duplicated values. We provide an alternative version
+ // of equal_range called equal_range_small for cases where the user is confident
+ // that the number of duplicated items is only a few.
+
+ return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range(const Key& k) const
+ {
+ // See comments above in the non-const version of equal_range.
+ return eastl::pair<iterator, iterator>(lower_bound(k), upper_bound(k));
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::iterator,
+ typename multiset<Key, Compare, Allocator>::iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k)
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const iterator itLower(lower_bound(k));
+ iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, itUpper.mpNode->mValue))
+ ++itUpper;
+
+ return eastl::pair<iterator, iterator>(itLower, itUpper);
+ }
+
+
+ template <typename Key, typename Compare, typename Allocator>
+ inline eastl::pair<typename multiset<Key, Compare, Allocator>::const_iterator,
+ typename multiset<Key, Compare, Allocator>::const_iterator>
+ multiset<Key, Compare, Allocator>::equal_range_small(const Key& k) const
+ {
+ // We provide alternative version of equal_range here which works faster
+ // for the case where there are at most small number of potential duplicated keys.
+ const const_iterator itLower(lower_bound(k));
+ const_iterator itUpper(itLower);
+
+ while((itUpper != end()) && !mCompare(k, *itUpper))
+ ++itUpper;
+
+ return eastl::pair<const_iterator, const_iterator>(itLower, itUpper);
+ }
+
+
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/sort.h b/UnknownVersion/include/EASTL/sort.h
new file mode 100644
index 0000000..4d77466
--- /dev/null
+++ b/UnknownVersion/include/EASTL/sort.h
@@ -0,0 +1,912 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/sort.h
+// Written by Paul Pedriana - 2005.
+//////////////////////////////////////////////////////////////////////////////
+
+//////////////////////////////////////////////////////////////////////////////
+// This file implements sorting algorithms. Some of these are equivalent to
+// std C++ sorting algorithms, while others don't have equivalents in the
+// C++ standard. We implement the following sorting algorithms:
+// is_sorted
+// sort The implementation of this is simply mapped to quick_sort.
+// quick_sort
+// partial_sort
+// insertion_sort
+// shell_sort
+// heap_sort
+// stable_sort The implementation of this is simply mapped to merge_sort.
+// merge
+// merge_sort
+// merge_sort_buffer
+// nth_element
+// radix_sort Found in sort_extra.h.
+// comb_sort Found in sort_extra.h.
+// bubble_sort Found in sort_extra.h.
+// selection_sort Found in sort_extra.h.
+// shaker_sort Found in sort_extra.h.
+// bucket_sort Found in sort_extra.h.
+//
+// Additional sorting and related algorithms we may want to implement:
+// partial_sort_copy This would be like the std STL version.
+// paritition This would be like the std STL version. This is not categorized as a sort routine by the language standard.
+// stable_partition This would be like the std STL version.
+// counting_sort Maybe we don't want to implement this.
+//
+//////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_SORT_H
+#define EASTL_SORT_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/iterator.h>
+#include <EASTL/memory.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/functional.h>
+#include <EASTL/heap.h>
+#include <EASTL/allocator.h>
+#include <EASTL/memory.h>
+
+
+
+namespace eastl
+{
+
+ /// is_sorted
+ ///
+ /// Returns true if the range [first, last) is sorted.
+ /// An empty range is considered to be sorted.
+ /// To test if a range is reverse-sorted, use 'greater' as the comparison
+ /// instead of 'less'.
+ ///
+ /// Example usage:
+ /// vector<int> intArray;
+ /// bool bIsSorted = is_sorted(intArray.begin(), intArray.end());
+ /// bool bIsReverseSorted = is_sorted(intArray.begin(), intArray.end(), greater<int>());
+ ///
+ template <typename ForwardIterator, typename StrictWeakOrdering>
+ bool is_sorted(ForwardIterator first, ForwardIterator last, StrictWeakOrdering compare)
+ {
+ if(first != last)
+ {
+ ForwardIterator current = first;
+
+ for(++current; current != last; first = current, ++current)
+ {
+ if(compare(*current, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first, *current)); // Validate that the compare function is sane.
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ template <typename ForwardIterator>
+ inline bool is_sorted(ForwardIterator first, ForwardIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<ForwardIterator>::value_type> Less;
+
+ return eastl::is_sorted<ForwardIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// merge
+ ///
+ /// This function merges two sorted input sorted ranges into a result sorted range.
+ /// This merge is stable in that no element from the first range will be changed
+ /// in order relative to other elements from the first range.
+ ///
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator, typename Compare>
+ OutputIterator merge(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare compare)
+ {
+ while((first1 != last1) && (first2 != last2))
+ {
+ if(compare(*first2, *first1))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first1, *first2)); // Validate that the compare function is sane.
+ *result = *first2;
+ ++first2;
+ }
+ else
+ {
+ *result = *first1;
+ ++first1;
+ }
+ ++result;
+ }
+
+ return eastl::copy(first2, last2, eastl::copy(first1, last1, result));
+ }
+
+ template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
+ inline OutputIterator merge(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<InputIterator1>::value_type> Less;
+
+ return eastl::merge<InputIterator1, InputIterator2, OutputIterator, Less>
+ (first1, last1, first2, last2, result, Less());
+ }
+
+
+
+ /// insertion_sort
+ ///
+ /// Implements the InsertionSort algorithm.
+ ///
+ template <typename BidirectionalIterator, typename StrictWeakOrdering>
+ void insertion_sort(BidirectionalIterator first, BidirectionalIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iSorted = first;
+
+ for(++iSorted; iSorted != last; ++iSorted)
+ {
+ const value_type temp(*iSorted);
+
+ iNext = iCurrent = iSorted;
+
+ for(--iCurrent; (iNext != first) && compare(temp, *iCurrent); --iNext, --iCurrent)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iCurrent, temp)); // Validate that the compare function is sane.
+ *iNext = *iCurrent;
+ }
+
+ *iNext = temp;
+ }
+ }
+ } // insertion_sort
+
+
+
+ template <typename BidirectionalIterator>
+ void insertion_sort(BidirectionalIterator first, BidirectionalIterator last)
+ {
+ typedef typename eastl::iterator_traits<BidirectionalIterator>::value_type value_type;
+
+ if(first != last)
+ {
+ BidirectionalIterator iCurrent, iNext, iSorted = first;
+
+ for(++iSorted; iSorted != last; ++iSorted)
+ {
+ const value_type temp(*iSorted);
+
+ iNext = iCurrent = iSorted;
+
+ for(--iCurrent; (iNext != first) && (temp < *iCurrent); --iNext, --iCurrent)
+ {
+ EASTL_VALIDATE_COMPARE(!(*iCurrent < temp)); // Validate that the compare function is sane.
+ *iNext = *iCurrent;
+ }
+
+ *iNext = temp;
+ }
+ }
+ } // insertion_sort
+
+
+ #if 0 /*
+ // STLPort-like variation of insertion_sort. Doesn't seem to run quite as fast for small runs.
+ //
+ template <typename RandomAccessIterator, typename Compare>
+ void insertion_sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ if(first != last)
+ {
+ for(RandomAccessIterator i = first + 1; i != last; ++i)
+ {
+ const typename eastl::iterator_traits<RandomAccessIterator>::value_type value(*i);
+
+ if(compare(value, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first, value)); // Validate that the compare function is sane.
+ eastl::copy_backward(first, i, i + 1);
+ *first = value;
+ }
+ else
+ {
+ RandomAccessIterator end(i), prev(i);
+
+ for(--prev; compare(value, *prev); --end, --prev)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*prev, value)); // Validate that the compare function is sane.
+ *end = *prev;
+ }
+
+ *end = value;
+ }
+ }
+ }
+ }
+
+
+ // STLPort-like variation of insertion_sort. Doesn't seem to run quite as fast for small runs.
+ //
+ template <typename RandomAccessIterator>
+ void insertion_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ if(first != last)
+ {
+ for(RandomAccessIterator i = first + 1; i != last; ++i)
+ {
+ const typename eastl::iterator_traits<RandomAccessIterator>::value_type value(*i);
+
+ if(value < *first)
+ {
+ EASTL_VALIDATE_COMPARE(!(*first < value)); // Validate that the compare function is sane.
+ eastl::copy_backward(first, i, i + 1);
+ *first = value;
+ }
+ else
+ {
+ RandomAccessIterator end(i), prev(i);
+
+ for(--prev; value < *prev; --end, --prev)
+ {
+ EASTL_VALIDATE_COMPARE(!(*prev < value)); // Validate that the compare function is sane.
+ *end = *prev;
+ }
+
+ *end = value;
+ }
+ }
+ }
+ } */
+ #endif
+
+
+ /// shell_sort
+ ///
+ /// Implements the ShellSort algorithm. This algorithm is a serious algorithm for larger
+ /// data sets, as reported by Sedgewick in his discussions on QuickSort. Note that shell_sort
+ /// requires a random access iterator, which usually means an array (eg. vector, deque).
+ /// ShellSort has good performance with presorted sequences.
+ /// The term "shell" derives from the name of the inventor, David Shell.
+ ///
+ /// To consider: Allow the user to specify the "h-sequence" array.
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void shell_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ // We use the Knuth 'h' sequence below, as it is easy to calculate at runtime.
+ // However, possibly we are better off using a different sequence based on a table.
+ // One such sequence which averages slightly better than Knuth is:
+ // 1, 5, 19, 41, 109, 209, 505, 929, 2161, 3905, 8929, 16001, 36289,
+ // 64769, 146305, 260609, 587521, 1045505, 2354689, 4188161, 9427969, 16764929
+
+ if(first != last)
+ {
+ RandomAccessIterator iCurrent, iBack, iSorted, iInsertFirst;
+ difference_type nSize = last - first;
+ difference_type nSpace = 1; // nSpace is the 'h' value of the ShellSort algorithm.
+
+ while(nSpace < nSize)
+ nSpace = (nSpace * 3) + 1; // This is the Knuth 'h' sequence: 1, 4, 13, 40, 121, 364, 1093, 3280, 9841, 29524, 88573, 265720, 797161, 2391484, 7174453, 21523360, 64570081, 193710244,
+
+ for(nSpace = (nSpace - 1) / 3; nSpace >= 1; nSpace = (nSpace - 1) / 3) // Integer division is less than ideal.
+ {
+ for(difference_type i = 0; i < nSpace; i++)
+ {
+ iInsertFirst = first + i;
+
+ for(iSorted = iInsertFirst + nSpace; iSorted < last; iSorted += nSpace)
+ {
+ iBack = iCurrent = iSorted;
+
+ for(iBack -= nSpace; (iCurrent != iInsertFirst) && compare(*iCurrent, *iBack); iCurrent = iBack, iBack -= nSpace)
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*iBack, *iCurrent)); // Validate that the compare function is sane.
+ eastl::iter_swap(iCurrent, iBack);
+ }
+ }
+ }
+ }
+ }
+ } // shell_sort
+
+ template <typename RandomAccessIterator>
+ inline void shell_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::shell_sort<RandomAccessIterator, Less>(first, last, Less());
+ }
+
+
+
+ /// heap_sort
+ ///
+ /// Implements the HeapSort algorithm.
+ /// Note that heap_sort requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void heap_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ // We simply call our heap algorithms to do the work for us.
+ eastl::make_heap<RandomAccessIterator, StrictWeakOrdering>(first, last, compare);
+ eastl::sort_heap<RandomAccessIterator, StrictWeakOrdering>(first, last, compare);
+ }
+
+ template <typename RandomAccessIterator>
+ inline void heap_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::heap_sort<RandomAccessIterator, Less>(first, last, Less());
+ }
+
+
+
+
+ /// merge_sort_buffer
+ ///
+ /// Implements the MergeSort algorithm with a user-supplied buffer.
+ /// The input buffer must be able to hold a number of items equal to 'last - first'.
+ /// Note that merge_sort_buffer requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+
+ // For reference, the following is the simple version, before inlining one level
+ // of recursion and eliminating the copy:
+ //
+ //template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ //void merge_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, StrictWeakOrdering compare)
+ //{
+ // typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ //
+ // const difference_type nCount = last - first;
+ //
+ // if(nCount > 1)
+ // {
+ // const difference_type nMid = nCount / 2;
+ //
+ // eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>
+ // (first, first + nMid, pBuffer, compare);
+ // eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>
+ // (first + nMid, last , pBuffer, compare);
+ // eastl::copy(first, last, pBuffer);
+ // eastl::merge<T*, T*, RandomAccessIterator, StrictWeakOrdering>
+ // (pBuffer, pBuffer + nMid, pBuffer + nMid, pBuffer + nCount, first, compare);
+ // }
+ //}
+
+ template <typename RandomAccessIterator, typename T, typename StrictWeakOrdering>
+ void merge_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ const difference_type nCount = last - first;
+
+ if(nCount > 1)
+ {
+ const difference_type nMid = nCount / 2;
+ RandomAccessIterator half = first + nMid;
+
+ if(nMid > 1)
+ {
+ const difference_type nQ1(nMid / 2);
+ RandomAccessIterator part(first + nQ1);
+
+ eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>(first, part, pBuffer, compare);
+ eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>(part, half, pBuffer + nQ1, compare);
+ eastl::merge<RandomAccessIterator, RandomAccessIterator, T*, StrictWeakOrdering>
+ (first, part, part, half, pBuffer, compare);
+ }
+ else
+ *pBuffer = *first;
+
+ if((nCount - nMid) > 1)
+ {
+ const difference_type nQ3((nMid + nCount) / 2);
+ RandomAccessIterator part(first + nQ3);
+
+ eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>(half, part, pBuffer + nMid, compare);
+ eastl::merge_sort_buffer<RandomAccessIterator, T, StrictWeakOrdering>(part, last, pBuffer + nQ3, compare);
+ eastl::merge<RandomAccessIterator, RandomAccessIterator, T*, StrictWeakOrdering>
+ (half, part, part, last, pBuffer + nMid, compare);
+ }
+ else
+ *(pBuffer + nMid) = *half;
+
+ eastl::merge<T*, T*, RandomAccessIterator, StrictWeakOrdering>
+ (pBuffer, pBuffer + nMid, pBuffer + nMid, pBuffer + nCount, first, compare);
+ }
+ }
+
+ template <typename RandomAccessIterator, typename T>
+ inline void merge_sort_buffer(RandomAccessIterator first, RandomAccessIterator last, T* pBuffer)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::merge_sort_buffer<RandomAccessIterator, T, Less>(first, last, pBuffer, Less());
+ }
+
+
+
+ /// merge_sort
+ ///
+ /// Implements the MergeSort algorithm.
+ /// This algorithm allocates memory via the user-supplied allocator. Use merge_sort_buffer
+ /// function if you want a version which doesn't allocate memory.
+ /// Note that merge_sort requires a random access iterator, which usually means
+ /// an array (eg. vector, deque).
+ ///
+ template <typename RandomAccessIterator, typename Allocator, typename StrictWeakOrdering>
+ void merge_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator, StrictWeakOrdering compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ const difference_type nCount = last - first;
+
+ if(nCount > 1)
+ {
+ // We need to allocate an array of nCount value_type objects as a temporary buffer.
+ value_type* const pBuffer = (value_type*)allocate_memory(allocator, nCount * sizeof(value_type), EASTL_ALIGN_OF(value_type), 0);
+ eastl::uninitialized_fill(pBuffer, pBuffer + nCount, value_type());
+
+ eastl::merge_sort_buffer<RandomAccessIterator, value_type, StrictWeakOrdering>
+ (first, last, pBuffer, compare);
+
+ eastl::destruct(pBuffer, pBuffer + nCount);
+ EASTLFree(allocator, pBuffer, nCount * sizeof(value_type));
+ }
+ }
+
+ template <typename RandomAccessIterator, typename Allocator>
+ inline void merge_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator)
+ {
+ typedef eastl::less<typename eastl::iterator_traits<RandomAccessIterator>::value_type> Less;
+
+ eastl::merge_sort<RandomAccessIterator, Allocator, Less>(first, last, allocator, Less());
+ }
+
+
+
+ /////////////////////////////////////////////////////////////////////
+ // quick_sort
+ //
+ // We do the "introspection sort" variant of quick sort which is now
+ // well-known and understood. You can read about this algorithm in
+ // many articles on quick sort, but briefly what it does is a median-
+ // of-three quick sort whereby the recursion depth is limited to a
+ // some value (after which it gives up on quick sort and switches to
+ // a heap sort) and whereby after a certain amount of sorting the
+ // algorithm stops doing quick-sort and finishes the sorting via
+ // a simple insertion sort.
+ /////////////////////////////////////////////////////////////////////
+
+ static const int kQuickSortLimit = 28; // For sorts of random arrays over 100 items, 28 - 32 have been found to be good numbers on VC++/Win32.
+
+ namespace Internal
+ {
+ template <typename Size>
+ inline Size Log2(Size n)
+ {
+ int i;
+ for(i = 0; n; ++i)
+ n >>= 1;
+ return i - 1;
+ }
+
+ // To do: Investigate the speed of this bit-trick version of Log2.
+ // It may work better on some platforms but not others.
+ //
+ // union FloatUnion {
+ // float f;
+ // uint32_t i;
+ // };
+ //
+ // inline uint32_t Log2(uint32_t x)
+ // {
+ // const FloatInt32Union u = { x };
+ // return (u.i >> 23) - 127;
+ // }
+ }
+
+
+ /// get_partition
+ ///
+ /// This function takes const T& instead of T because T may have special alignment
+ /// requirements and some compilers (e.g. VC++) are don't respect alignment requirements
+ /// for function arguments.
+ ///
+ template <typename RandomAccessIterator, typename T>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, const T& pivotValue)
+ {
+ const T pivotCopy(pivotValue); // Need to make a temporary because the sequence below is mutating.
+
+ for(; ; ++first)
+ {
+ while(*first < pivotCopy)
+ {
+ EASTL_VALIDATE_COMPARE(!(pivotCopy < *first)); // Validate that the compare function is sane.
+ ++first;
+ }
+ --last;
+
+ while(pivotCopy < *last)
+ {
+ EASTL_VALIDATE_COMPARE(!(*last < pivotCopy)); // Validate that the compare function is sane.
+ --last;
+ }
+
+ if(first >= last) // Random access iterators allow operator >=
+ return first;
+
+ eastl::iter_swap(first, last);
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename T, typename Compare>
+ inline RandomAccessIterator get_partition(RandomAccessIterator first, RandomAccessIterator last, const T& pivotValue, Compare compare)
+ {
+ const T pivotCopy(pivotValue); // Need to make a temporary because the sequence below is mutating.
+
+ for(; ; ++first)
+ {
+ while(compare(*first, pivotCopy))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(pivotCopy, *first)); // Validate that the compare function is sane.
+ ++first;
+ }
+ --last;
+
+ while(compare(pivotCopy, *last))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*last, pivotCopy)); // Validate that the compare function is sane.
+ --last;
+ }
+
+ if(first >= last) // Random access iterators allow operator >=
+ return first;
+
+ eastl::iter_swap(first, last);
+ }
+ }
+
+
+ namespace Internal
+ {
+ // This function is used by quick_sort and is not intended to be used by itself.
+ // This is because the implementation below makes an assumption about the input
+ // data that quick_sort satisfies but arbitrary data may not.
+ // There is a standalone insertion_sort function.
+ template <typename RandomAccessIterator>
+ inline void insertion_sort_simple(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ for(RandomAccessIterator current = first; current != last; ++current)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ RandomAccessIterator end(current), prev(current);
+ const value_type value(*current);
+
+ for(--prev; value < *prev; --end, --prev) // We skip checking for (prev >= first) because quick_sort (our caller) makes this unnecessary.
+ {
+ EASTL_VALIDATE_COMPARE(!(*prev < value)); // Validate that the compare function is sane.
+ *end = *prev;
+ }
+
+ *end = value;
+ }
+ }
+
+
+ // This function is used by quick_sort and is not intended to be used by itself.
+ // This is because the implementation below makes an assumption about the input
+ // data that quick_sort satisfies but arbitrary data may not.
+ // There is a standalone insertion_sort function.
+ template <typename RandomAccessIterator, typename Compare>
+ inline void insertion_sort_simple(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ for(RandomAccessIterator current = first; current != last; ++current)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ RandomAccessIterator end(current), prev(current);
+ const value_type value(*current);
+
+ for(--prev; compare(value, *prev); --end, --prev) // We skip checking for (prev >= first) because quick_sort (our caller) makes this unnecessary.
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*prev, value)); // Validate that the compare function is sane.
+ *end = *prev;
+ }
+
+ *end = value;
+ }
+ }
+ } // namespace Internal
+
+
+ template <typename RandomAccessIterator>
+ inline void partial_sort(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::make_heap<RandomAccessIterator>(first, middle);
+
+ for(RandomAccessIterator i = middle; i < last; ++i)
+ {
+ if(*i < *first)
+ {
+ EASTL_VALIDATE_COMPARE(!(*first < *i)); // Validate that the compare function is sane.
+ const value_type temp(*i);
+ *i = *first;
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type>
+ (first, difference_type(0), difference_type(middle - first), difference_type(0), temp);
+ }
+ }
+
+ eastl::sort_heap<RandomAccessIterator>(first, middle);
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ inline void partial_sort(RandomAccessIterator first, RandomAccessIterator middle, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ eastl::make_heap<RandomAccessIterator, Compare>(first, middle, compare);
+
+ for(RandomAccessIterator i = middle; i < last; ++i)
+ {
+ if(compare(*i, *first))
+ {
+ EASTL_VALIDATE_COMPARE(!compare(*first, *i)); // Validate that the compare function is sane.
+ const value_type temp(*i);
+ *i = *first;
+ eastl::adjust_heap<RandomAccessIterator, difference_type, value_type, Compare>
+ (first, difference_type(0), difference_type(middle - first), difference_type(0), temp, compare);
+ }
+ }
+
+ eastl::sort_heap<RandomAccessIterator, Compare>(first, middle, compare);
+ }
+
+
+ template<typename RandomAccessIterator>
+ inline void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while((last - first) > 5)
+ {
+ const value_type midValue(eastl::median<value_type>(*first, *(first + (last - first) / 2), *(last - 1)));
+ const RandomAccessIterator midPos(eastl::get_partition<RandomAccessIterator, value_type>(first, last, midValue));
+
+ if(midPos <= nth)
+ first = midPos;
+ else
+ last = midPos;
+ }
+
+ eastl::insertion_sort<RandomAccessIterator>(first, last);
+ }
+
+
+ template<typename RandomAccessIterator, typename Compare>
+ inline void nth_element(RandomAccessIterator first, RandomAccessIterator nth, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while((last - first) > 5)
+ {
+ const value_type midValue(eastl::median<value_type, Compare>(*first, *(first + (last - first) / 2), *(last - 1), compare));
+ const RandomAccessIterator midPos(eastl::get_partition<RandomAccessIterator, value_type, Compare>(first, last, midValue, compare));
+
+ if(midPos <= nth)
+ first = midPos;
+ else
+ last = midPos;
+ }
+
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, last, compare);
+ }
+
+
+ template <typename RandomAccessIterator, typename Size>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while(((last - first) > kQuickSortLimit) && (kRecursionCount > 0))
+ {
+ const RandomAccessIterator position(eastl::get_partition<RandomAccessIterator, value_type>(first, last, eastl::median<value_type>(*first, *(first + (last - first) / 2), *(last - 1))));
+
+ eastl::quick_sort_impl<RandomAccessIterator, Size>(position, last, --kRecursionCount);
+ last = position;
+ }
+
+ if(kRecursionCount == 0)
+ eastl::partial_sort<RandomAccessIterator>(first, last, last);
+ }
+
+
+ template <typename RandomAccessIterator, typename Size, typename Compare>
+ inline void quick_sort_impl(RandomAccessIterator first, RandomAccessIterator last, Size kRecursionCount, Compare compare)
+ {
+ typedef typename iterator_traits<RandomAccessIterator>::value_type value_type;
+
+ while(((last - first) > kQuickSortLimit) && (kRecursionCount > 0))
+ {
+ const RandomAccessIterator position(eastl::get_partition<RandomAccessIterator, value_type, Compare>(first, last, eastl::median<value_type, Compare>(*first, *(first + (last - first) / 2), *(last - 1), compare), compare));
+
+ eastl::quick_sort_impl<RandomAccessIterator, Size, Compare>(position, last, --kRecursionCount, compare);
+ last = position;
+ }
+
+ if(kRecursionCount == 0)
+ eastl::partial_sort<RandomAccessIterator, Compare>(first, last, last, compare);
+ }
+
+
+ /// quick_sort
+ ///
+ /// quick_sort sorts the elements in [first, last) into ascending order,
+ /// meaning that if i and j are any two valid iterators in [first, last)
+ /// such that i precedes j, then *j is not less than *i. quick_sort is not
+ /// guaranteed to be stable. That is, suppose that *i and *j are equivalent:
+ /// neither one is less than the other. It is not guaranteed that the
+ /// relative order of these two elements will be preserved by sort.
+ ///
+ /// We implement the "introspective" variation of quick-sort. This is
+ /// considered to be the best general-purpose variant, as it avoids
+ /// worst-case behaviour and optimizes the final sorting stage by
+ /// switching to an insertion sort.
+ ///
+ template <typename RandomAccessIterator>
+ void quick_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ if(first != last)
+ {
+ eastl::quick_sort_impl<RandomAccessIterator, difference_type>(first, last, 2 * Internal::Log2(last - first));
+
+ if((last - first) > (difference_type)kQuickSortLimit)
+ {
+ eastl::insertion_sort<RandomAccessIterator>(first, first + kQuickSortLimit);
+ eastl::Internal::insertion_sort_simple<RandomAccessIterator>(first + kQuickSortLimit, last);
+ }
+ else
+ eastl::insertion_sort<RandomAccessIterator>(first, last);
+ }
+ }
+
+
+ template <typename RandomAccessIterator, typename Compare>
+ void quick_sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ typedef typename eastl::iterator_traits<RandomAccessIterator>::difference_type difference_type;
+
+ if(first != last)
+ {
+ eastl::quick_sort_impl<RandomAccessIterator, difference_type, Compare>(first, last, 2 * Internal::Log2(last - first), compare);
+
+ if((last - first) > (difference_type)kQuickSortLimit)
+ {
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, first + kQuickSortLimit, compare);
+ eastl::Internal::insertion_sort_simple<RandomAccessIterator, Compare>(first + kQuickSortLimit, last, compare);
+ }
+ else
+ eastl::insertion_sort<RandomAccessIterator, Compare>(first, last, compare);
+ }
+ }
+
+
+
+ /// sort
+ ///
+ /// We simply use quick_sort. See quick_sort for details.
+ ///
+ template <typename RandomAccessIterator>
+ inline void sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ eastl::quick_sort<RandomAccessIterator>(first, last);
+ }
+
+ template <typename RandomAccessIterator, typename Compare>
+ inline void sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare)
+ {
+ eastl::quick_sort<RandomAccessIterator, Compare>(first, last, compare);
+ }
+
+
+
+ /// stable_sort
+ ///
+ /// We simply use merge_sort. See merge_sort for details.
+ /// Beware that the used merge_sort -- and thus stable_sort -- allocates
+ /// memory during execution. Try using merge_sort_buffer if you want
+ /// to avoid memory allocation.
+ ///
+ template <typename RandomAccessIterator, typename StrictWeakOrdering>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last, StrictWeakOrdering compare)
+ {
+ eastl::merge_sort<RandomAccessIterator, EASTLAllocatorType, StrictWeakOrdering>
+ (first, last, *get_default_allocator(0), compare);
+ }
+
+ template <typename RandomAccessIterator>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last)
+ {
+ eastl::merge_sort<RandomAccessIterator, EASTLAllocatorType>
+ (first, last, *get_default_allocator(0));
+ }
+
+ template <typename RandomAccessIterator, typename Allocator, typename StrictWeakOrdering>
+ void stable_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator, StrictWeakOrdering compare)
+ {
+ eastl::merge_sort<RandomAccessIterator, Allocator, StrictWeakOrdering>(first, last, allocator, compare);
+ }
+
+ // This is not defined because it would cause compiler errors due to conflicts with a version above.
+ //template <typename RandomAccessIterator, typename Allocator>
+ //void stable_sort(RandomAccessIterator first, RandomAccessIterator last, Allocator& allocator)
+ //{
+ // eastl::merge_sort<RandomAccessIterator, Allocator>(first, last, allocator);
+ //}
+
+} // namespace eastl
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/string.h b/UnknownVersion/include/EASTL/string.h
new file mode 100644
index 0000000..69e21d6
--- /dev/null
+++ b/UnknownVersion/include/EASTL/string.h
@@ -0,0 +1,3498 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/string.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implements a basic_string class, much like the C++ std::basic_string.
+// The primary distinctions between basic_string and std::basic_string are:
+// - basic_string has a few extension functions that allow for increased performance.
+// - basic_string has a few extension functions that make use easier,
+// such as a member sprintf function and member tolower/toupper functions.
+// - basic_string supports debug memory naming natively.
+// - basic_string is easier to read, debug, and visualize.
+// - basic_string internally manually expands basic functions such as begin(),
+// size(), etc. in order to improve debug performance and optimizer success.
+// - basic_string is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - basic_string has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - basic_string doesn't use char traits. As a result, EASTL assumes that
+// strings will hold characters and not exotic things like widgets. At the
+// very least, basic_string assumes that the value_type is a POD.
+// - basic_string::size_type is defined as eastl_size_t instead of size_t in
+// order to save memory and run faster on 64 bit systems.
+// - basic_string data is guaranteed to be contiguous.
+// - basic_string data is guaranteed to be 0-terminated, and the c_str() function
+// is guaranteed to return the same pointer as the data() which is guaranteed
+// to be the same value as &string[0].
+// - basic_string has a set_capacity() function which frees excess capacity.
+// The only way to do this with std::basic_string is via the cryptic non-obvious
+// trick of using: basic_string<char>(x).swap(x);
+// - basic_string has a force_size() function, which unilaterally moves the string
+// end position (mpEnd) to the given location. Useful for when the user writes
+// into the string via some extenal means such as C strcpy or sprintf.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Copy on Write (cow)
+//
+// This string implementation does not do copy on write (cow). This is by design,
+// as cow penalizes 95% of string uses for the benefit of only 5% of the uses
+// (these percentages are qualitative, not quantitative). The primary benefit of
+// cow is that it allows for the sharing of string data between two string objects.
+// Thus if you say this:
+// string a("hello");
+// string b(a);
+// the "hello" will be shared between a and b. If you then say this:
+// a = "world";
+// then a will release its reference to "hello" and leave b with the only reference
+// to it. Normally this functionality is accomplished via reference counting and
+// with atomic operations or mutexes.
+//
+// The C++ standard does not say anything about basic_string and cow. However,
+// for a basic_string implementation to be standards-conforming, a number of
+// issues arise which dictate some things about how one would have to implement
+// a cow string. The discussion of these issues will not be rehashed here, as you
+// can read the references below for better detail than can be provided in the
+// space we have here. However, we can say that the C++ standard is sensible and
+// that anything we try to do here to allow for an efficient cow implementation
+// would result in a generally unacceptable string interface.
+//
+// The disadvantages of cow strings are:
+// - A reference count needs to exist with the string, which increases string memory usage.
+// - With thread safety, atomic operations and mutex locks are expensive, especially
+// on weaker memory systems such as console gaming platforms.
+// - All non-const string accessor functions need to do a sharing check the the
+// first such check needs to detach the string. Similarly, all string assignments
+// need to do a sharing check as well. If you access the string before doing an
+// assignment, the assignment doesn't result in a shared string, because the string
+// has already been detached.
+// - String sharing doesn't happen the large majority of the time. In some cases,
+// the total sum of the reference count memory can exceed any memory savings
+// gained by the strings that share representations.
+//
+// The addition of a string_cow class is under consideration for this library.
+// There are conceivably some systems which have string usage patterns which would
+// benefit from cow sharing. Such functionality is best saved for a separate string
+// implementation so that the other string uses aren't penalized.
+//
+// References:
+// This is a good starting HTML reference on the topic:
+// http://www.gotw.ca/publications/optimizations.htm
+// Here is a Usenet discussion on the topic:
+// http://groups-beta.google.com/group/comp.lang.c++.moderated/browse_thread/thread/3dc6af5198d0bf7/886c8642cb06e03d
+//
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_STRING_H
+#define EASTL_STRING_H
+
+#include <EASTL/internal/config.h>
+#if EASTL_ABSTRACT_STRING_ENABLED
+ #include <EASTL/bonus/string_abstract.h>
+#else // 'else' encompasses the entire rest of this file.
+#include <EASTL/allocator.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#ifdef __clang__
+ #include <EASTL/internal/hashtable.h>
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(push, 0)
+#endif
+#include <stddef.h> // size_t, ptrdiff_t, etc.
+#include <stdarg.h> // vararg functionality.
+#include <stdlib.h> // malloc, free.
+#include <stdio.h> // snprintf, etc.
+#include <ctype.h> // toupper, etc.
+#include <wchar.h> // toupper, etc.
+#ifdef __MWERKS__
+ #include <../Include/string.h> // Force the compiler to use the std lib header.
+#else
+ #include <string.h> // strlen, etc.
+#endif
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+#if EASTL_EXCEPTIONS_ENABLED
+ #ifdef _MSC_VER
+ #pragma warning(push, 0)
+ #endif
+ #include <stdexcept> // std::out_of_range, std::length_error.
+ #ifdef _MSC_VER
+ #pragma warning(pop)
+ #endif
+#endif
+
+#ifdef _MSC_VER
+ #pragma warning(push)
+ #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+ #pragma warning(disable: 4267) // 'argument' : conversion from 'size_t' to 'const uint32_t', possible loss of data. This is a bogus warning resulting from a bug in VC++.
+ #pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum
+#endif
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_EXPLICIT
+//
+// See EASTL_STRING_OPT_EXPLICIT_CTORS for documentation.
+//
+#if EASTL_STRING_OPT_EXPLICIT_CTORS
+ #define EASTL_STRING_EXPLICIT explicit
+#else
+ #define EASTL_STRING_EXPLICIT
+#endif
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL_STRING_INITIAL_CAPACITY
+//
+// As of this writing, this must be > 0. Note that an initially empty string
+// has a capacity of zero (it allocates no memory).
+//
+const eastl_size_t EASTL_STRING_INITIAL_CAPACITY = 8;
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Vsnprintf8 / Vsnprintf16
+//
+// The user is expected to supply these functions. Note that these functions
+// are expected to accept parameters as per the C99 standard. These functions
+// can deal with C99 standard return values or Microsoft non-standard return
+// values but act more efficiently if implemented via the C99 style.
+
+extern int Vsnprintf8 (char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments);
+extern int Vsnprintf16(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments);
+extern int Vsnprintf32(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments);
+
+namespace eastl
+{
+ inline int Vsnprintf(char8_t* pDestination, size_t n, const char8_t* pFormat, va_list arguments)
+ { return Vsnprintf8(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char16_t* pDestination, size_t n, const char16_t* pFormat, va_list arguments)
+ { return Vsnprintf16(pDestination, n, pFormat, arguments); }
+
+ inline int Vsnprintf(char32_t* pDestination, size_t n, const char32_t* pFormat, va_list arguments)
+ { return Vsnprintf32(pDestination, n, pFormat, arguments); }
+}
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+namespace eastl
+{
+
+ /// EASTL_BASIC_STRING_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+ #ifndef EASTL_BASIC_STRING_DEFAULT_NAME
+ #define EASTL_BASIC_STRING_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " basic_string" // Unless the user overrides something, this is "EASTL basic_string".
+ #endif
+
+
+ /// EASTL_BASIC_STRING_DEFAULT_ALLOCATOR
+ ///
+ #ifndef EASTL_BASIC_STRING_DEFAULT_ALLOCATOR
+ #define EASTL_BASIC_STRING_DEFAULT_ALLOCATOR allocator_type(EASTL_BASIC_STRING_DEFAULT_NAME)
+ #endif
+
+
+
+ /// gEmptyString
+ ///
+ /// Declares a shared terminating 0 representation for scalar strings that are empty.
+ ///
+ union EmptyString
+ {
+ uint32_t mUint32;
+ char mEmpty8[1];
+ unsigned char mEmptyU8[1];
+ signed char mEmptyS8[1];
+ char16_t mEmpty16[1];
+ char32_t mEmpty32[1];
+ };
+ extern EASTL_API EmptyString gEmptyString;
+
+ inline const signed char* GetEmptyString(signed char) { return gEmptyString.mEmptyS8; }
+ inline const unsigned char* GetEmptyString(unsigned char) { return gEmptyString.mEmptyU8; }
+ inline const char* GetEmptyString(char) { return gEmptyString.mEmpty8; }
+ inline const char16_t* GetEmptyString(char16_t) { return gEmptyString.mEmpty16; }
+ inline const char32_t* GetEmptyString(char32_t) { return gEmptyString.mEmpty32; }
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ /// basic_string
+ ///
+ /// Implements a templated string class, somewhat like C++ std::basic_string.
+ ///
+ /// Notes:
+ /// As of this writing, an insert of a string into itself necessarily
+ /// triggers a reallocation, even if there is enough capacity in self
+ /// to handle the increase in size. This is due to the slightly tricky
+ /// nature of the operation of modifying one's self with one's self,
+ /// and thus the source and destination are being modified during the
+ /// operation. It might be useful to rectify this to the extent possible.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class basic_string
+ {
+ public:
+ typedef basic_string<T, Allocator> this_type;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* iterator; // Maintainer note: We want to leave iterator defined as T* -- at least in release builds -- as this gives some algorithms an advantage that optimizers cannot get around.
+ typedef const T* const_iterator;
+ typedef eastl::reverse_iterator<iterator> reverse_iterator;
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef ptrdiff_t difference_type;
+ typedef Allocator allocator_type;
+
+ #if defined(_MSC_VER) && (_MSC_VER >= 1400) // _MSC_VER of 1400 means VC8 (VS2005), 1500 means VC9 (VS2008)
+ enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this.
+ npos = (size_type)-1,
+ kMaxSize = (size_type)-2
+ };
+ #else
+ static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position.
+ static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues.
+ #endif
+
+ enum
+ {
+ kAlignment = EASTL_ALIGN_OF(T),
+ kAlignmentOffset = 0
+ };
+
+ public:
+ // CtorDoNotInitialize exists so that we can create a constructor that allocates but doesn't
+ // initialize and also doesn't collide with any other constructor declaration.
+ struct CtorDoNotInitialize{};
+
+ // CtorSprintf exists so that we can create a constructor that accepts printf-style
+ // arguments but also doesn't collide with any other constructor declaration.
+ struct CtorSprintf{};
+
+ protected:
+ value_type* mpBegin; // Begin of string.
+ value_type* mpEnd; // End of string. *mpEnd is always '0', as we 0-terminate our string. mpEnd is always < mpCapacity.
+ value_type* mpCapacity; // End of allocated space, including the space needed to store the trailing '0' char. mpCapacity is always at least mpEnd + 1.
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ // Constructor, destructor
+ basic_string();
+ explicit basic_string(const allocator_type& allocator);
+ basic_string(const this_type& x, size_type position, size_type n = npos);
+ basic_string(const value_type* p, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ EASTL_STRING_EXPLICIT basic_string(const value_type* p, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(size_type n, value_type c, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(const this_type& x);
+ basic_string(const value_type* pBegin, const value_type* pEnd, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(CtorDoNotInitialize, size_type n, const allocator_type& allocator = EASTL_BASIC_STRING_DEFAULT_ALLOCATOR);
+ basic_string(CtorSprintf, const value_type* pFormat, ...);
+
+ ~basic_string();
+
+ // Allocator
+ const allocator_type& get_allocator() const;
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ // Operator =
+ this_type& operator=(const this_type& x);
+ this_type& operator=(const value_type* p);
+ this_type& operator=(value_type c);
+
+ void swap(this_type& x);
+
+ // Assignment operations
+ basic_string& assign(const basic_string& x);
+ basic_string& assign(const basic_string& x, size_type position, size_type n);
+ basic_string& assign(const value_type* p, size_type n);
+ basic_string& assign(const value_type* p);
+ basic_string& assign(size_type n, value_type c);
+ basic_string& assign(const value_type* pBegin, const value_type* pEnd);
+
+ // Iterators.
+ iterator begin(); // Expanded in source code as: mpBegin
+ const_iterator begin() const; // Expanded in source code as: mpBegin
+ iterator end(); // Expanded in source code as: mpEnd
+ const_iterator end() const; // Expanded in source code as: mpEnd
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+
+ // Size-related functionality
+ bool empty() const; // Expanded in source code as: (mpBegin == mpEnd) or (mpBegin != mpEnd)
+ size_type size() const; // Expanded in source code as: (size_type)(mpEnd - mpBegin)
+ size_type length() const; // Expanded in source code as: (size_type)(mpEnd - mpBegin)
+ size_type max_size() const; // Expanded in source code as: kMaxSize
+ size_type capacity() const; // Expanded in source code as: (size_type)((mpCapacity - mpBegin) - 1)
+ void resize(size_type n, value_type c);
+ void resize(size_type n);
+ void reserve(size_type = 0);
+ void set_capacity(size_type n = npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size.
+ void force_size(size_type n); // Unilaterally moves the string end position (mpEnd) to the given location. Useful for when the user writes into the string via some extenal means such as C strcpy or sprintf. This allows for more efficient use than using resize to achieve this.
+
+ // Raw access
+ const value_type* data() const;
+ const value_type* c_str() const;
+
+ // Element access
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+ reference at(size_type n);
+ const_reference at(size_type n) const;
+ reference front();
+ const_reference front() const;
+ reference back();
+ const_reference back() const;
+
+ // Append operations
+ basic_string& operator+=(const basic_string& x);
+ basic_string& operator+=(const value_type* p);
+ basic_string& operator+=(value_type c);
+
+ basic_string& append(const basic_string& x);
+ basic_string& append(const basic_string& x, size_type position, size_type n);
+ basic_string& append(const value_type* p, size_type n);
+ basic_string& append(const value_type* p);
+ basic_string& append(size_type n, value_type c);
+ basic_string& append(const value_type* pBegin, const value_type* pEnd);
+
+ basic_string& append_sprintf_va_list(const value_type* pFormat, va_list arguments);
+ basic_string& append_sprintf(const value_type* pFormat, ...);
+
+ void push_back(value_type c);
+ void pop_back();
+
+ // Insertion operations
+ basic_string& insert(size_type position, const basic_string& x);
+ basic_string& insert(size_type position, const basic_string& x, size_type beg, size_type n);
+ basic_string& insert(size_type position, const value_type* p, size_type n);
+ basic_string& insert(size_type position, const value_type* p);
+ basic_string& insert(size_type position, size_type n, value_type c);
+ iterator insert(iterator p, value_type c);
+ void insert(iterator p, size_type n, value_type c);
+ void insert(iterator p, const value_type* pBegin, const value_type* pEnd);
+
+ // Erase operations
+ basic_string& erase(size_type position = 0, size_type n = npos);
+ iterator erase(iterator p);
+ iterator erase(iterator pBegin, iterator pEnd);
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+ void clear();
+ void reset(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ //Replacement operations
+ basic_string& replace(size_type position, size_type n, const basic_string& x);
+ basic_string& replace(size_type pos1, size_type n1, const basic_string& x, size_type pos2, size_type n2);
+ basic_string& replace(size_type position, size_type n1, const value_type* p, size_type n2);
+ basic_string& replace(size_type position, size_type n1, const value_type* p);
+ basic_string& replace(size_type position, size_type n1, size_type n2, value_type c);
+ basic_string& replace(iterator first, iterator last, const basic_string& x);
+ basic_string& replace(iterator first, iterator last, const value_type* p, size_type n);
+ basic_string& replace(iterator first, iterator last, const value_type* p);
+ basic_string& replace(iterator first, iterator last, size_type n, value_type c);
+ basic_string& replace(iterator first, iterator last, const value_type* pBegin, const value_type* pEnd);
+ size_type copy(value_type* p, size_type n, size_type position = 0) const;
+
+ // Find operations
+ size_type find(const basic_string& x, size_type position = 0) const;
+ size_type find(const value_type* p, size_type position = 0) const;
+ size_type find(const value_type* p, size_type position, size_type n) const;
+ size_type find(value_type c, size_type position = 0) const;
+
+ // Reverse find operations
+ size_type rfind(const basic_string& x, size_type position = npos) const;
+ size_type rfind(const value_type* p, size_type position = npos) const;
+ size_type rfind(const value_type* p, size_type position, size_type n) const;
+ size_type rfind(value_type c, size_type position = npos) const;
+
+ // Find first-of operations
+ size_type find_first_of(const basic_string& x, size_type position = 0) const;
+ size_type find_first_of(const value_type* p, size_type position = 0) const;
+ size_type find_first_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_first_of(value_type c, size_type position = 0) const;
+
+ // Find last-of operations
+ size_type find_last_of(const basic_string& x, size_type position = npos) const;
+ size_type find_last_of(const value_type* p, size_type position = npos) const;
+ size_type find_last_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_last_of(value_type c, size_type position = npos) const;
+
+ // Find first not-of operations
+ size_type find_first_not_of(const basic_string& x, size_type position = 0) const;
+ size_type find_first_not_of(const value_type* p, size_type position = 0) const;
+ size_type find_first_not_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_first_not_of(value_type c, size_type position = 0) const;
+
+ // Find last not-of operations
+ size_type find_last_not_of(const basic_string& x, size_type position = npos) const;
+ size_type find_last_not_of(const value_type* p, size_type position = npos) const;
+ size_type find_last_not_of(const value_type* p, size_type position, size_type n) const;
+ size_type find_last_not_of(value_type c, size_type position = npos) const;
+
+ // Substring functionality
+ basic_string substr(size_type position = 0, size_type n = npos) const;
+
+ // Comparison operations
+ int compare(const basic_string& x) const;
+ int compare(size_type pos1, size_type n1, const basic_string& x) const;
+ int compare(size_type pos1, size_type n1, const basic_string& x, size_type pos2, size_type n2) const;
+ int compare(const value_type* p) const;
+ int compare(size_type pos1, size_type n1, const value_type* p) const;
+ int compare(size_type pos1, size_type n1, const value_type* p, size_type n2) const;
+ static int compare(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+
+ // Case-insensitive comparison functions. Not part of C++ basic_string. Only ASCII-level locale functionality is supported. Thus this is not suitable for localization purposes.
+ int comparei(const basic_string& x) const;
+ int comparei(const value_type* p) const;
+ static int comparei(const value_type* pBegin1, const value_type* pEnd1, const value_type* pBegin2, const value_type* pEnd2);
+
+ // Misc functionality, not part of C++ basic_string.
+ void make_lower();
+ void make_upper();
+ void ltrim();
+ void rtrim();
+ void trim();
+ basic_string left(size_type n) const;
+ basic_string right(size_type n) const;
+ basic_string& sprintf_va_list(const value_type* pFormat, va_list arguments);
+ basic_string& sprintf(const value_type* pFormat, ...);
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ // Helper functions for initialization/insertion operations.
+ value_type* DoAllocate(size_type n);
+ void DoFree(value_type* p, size_type n);
+ size_type GetNewCapacity(size_type currentCapacity);
+
+ void AllocateSelf();
+ void AllocateSelf(size_type n);
+ void DeallocateSelf();
+ iterator InsertInternal(iterator p, value_type c);
+ void RangeInitialize(const value_type* pBegin, const value_type* pEnd);
+ void RangeInitialize(const value_type* pBegin);
+ void SizeInitialize(size_type n, value_type c);
+ void ThrowLengthException() const;
+ void ThrowRangeException() const;
+ void ThrowInvalidArgumentException() const;
+
+ // Replacements for STL template functions.
+ static const value_type* CharTypeStringFindEnd(const value_type* pBegin, const value_type* pEnd, value_type c);
+ static const value_type* CharTypeStringRFind(const value_type* pRBegin, const value_type* pREnd, const value_type c);
+ static const value_type* CharTypeStringSearch(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRSearch(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringFindFirstOf(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRFindFirstOf(const value_type* p1RBegin, const value_type* p1REnd, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringFindFirstNotOf(const value_type* p1Begin, const value_type* p1End, const value_type* p2Begin, const value_type* p2End);
+ static const value_type* CharTypeStringRFindFirstNotOf(const value_type* p1RBegin, const value_type* p1REnd, const value_type* p2Begin, const value_type* p2End);
+
+ }; // basic_string
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // 'char traits' functionality
+ //
+ inline char8_t CharToLower(char8_t c)
+ { return (char8_t)tolower((uint8_t)c); }
+
+ inline char16_t CharToLower(char16_t c)
+ { if((unsigned)c <= 0xff) return (char16_t)tolower((uint8_t)c); return c; }
+
+ inline char32_t CharToLower(char32_t c)
+ { if((unsigned)c <= 0xff) return (char32_t)tolower((uint8_t)c); return c; }
+
+
+
+ inline char8_t CharToUpper(char8_t c)
+ { return (char8_t)toupper((uint8_t)c); }
+
+ inline char16_t CharToUpper(char16_t c)
+ { if((unsigned)c <= 0xff) return (char16_t)toupper((uint8_t)c); return c; }
+
+ inline char32_t CharToUpper(char32_t c)
+ { if((unsigned)c <= 0xff) return (char32_t)toupper((uint8_t)c); return c; }
+
+
+
+ template <typename T>
+ int Compare(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ if(*p1 != *p2)
+ return (*p1 < *p2) ? -1 : 1;
+ }
+ return 0;
+ }
+
+ inline int Compare(const char8_t* p1, const char8_t* p2, size_t n)
+ {
+ return memcmp(p1, p2, n);
+ }
+
+ template <typename T>
+ inline int CompareI(const T* p1, const T* p2, size_t n)
+ {
+ for(; n > 0; ++p1, ++p2, --n)
+ {
+ const T c1 = CharToLower(*p1);
+ const T c2 = CharToLower(*p2);
+
+ if(c1 != c2)
+ return (c1 < c2) ? -1 : 1;
+ }
+ return 0;
+ }
+
+
+ inline const char8_t* Find(const char8_t* p, char8_t c, size_t n)
+ {
+ return (const char8_t*)memchr(p, c, n);
+ }
+
+ inline const char16_t* Find(const char16_t* p, char16_t c, size_t n)
+ {
+ for(; n > 0; --n, ++p)
+ {
+ if(*p == c)
+ return p;
+ }
+
+ return NULL;
+ }
+
+ inline const char32_t* Find(const char32_t* p, char32_t c, size_t n)
+ {
+ for(; n > 0; --n, ++p)
+ {
+ if(*p == c)
+ return p;
+ }
+
+ return NULL;
+ }
+
+
+ inline size_t CharStrlen(const char8_t* p)
+ {
+ if ( !p ) return 0;
+ #ifdef _MSC_VER // VC++ can implement an instrinsic here.
+ return strlen(p);
+ #else
+ const char8_t* pCurrent = p ? p : "";
+ while(pCurrent && *pCurrent)
+ ++pCurrent;
+ return (size_t)(pCurrent - p);
+ #endif
+ }
+
+ inline size_t CharStrlen(const char16_t* p)
+ {
+ if ( !p ) return 0;
+ const char16_t* pCurrent = p;
+ while(*pCurrent)
+ ++pCurrent;
+ return (size_t)(pCurrent - p);
+ }
+
+ inline size_t CharStrlen(const char32_t* p)
+ {
+ if ( !p ) return 0;
+ const char32_t* pCurrent = p;
+ while(*pCurrent)
+ ++pCurrent;
+ return (size_t)(pCurrent - p);
+ }
+
+
+ template <typename T>
+ inline T* CharStringUninitializedCopy(const T* pSource, const T* pSourceEnd, T* pDestination)
+ {
+ memmove(pDestination, pSource, (size_t)(pSourceEnd - pSource) * sizeof(T));
+ return pDestination + (pSourceEnd - pSource);
+ }
+
+
+
+
+ inline char8_t* CharStringUninitializedFillN(char8_t* pDestination, size_t n, const char8_t c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ memset(pDestination, (uint8_t)c, (size_t)n);
+ return pDestination + n;
+ }
+
+ inline char16_t* CharStringUninitializedFillN(char16_t* pDestination, size_t n, const char16_t c)
+ {
+ char16_t* pDest16 = pDestination;
+ const char16_t* const pEnd = pDestination + n;
+ while(pDest16 < pEnd)
+ *pDest16++ = c;
+ return pDestination + n;
+ }
+
+ inline char32_t* CharStringUninitializedFillN(char32_t* pDestination, size_t n, const char32_t c)
+ {
+ char32_t* pDest32 = pDestination;
+ const char32_t* const pEnd = pDestination + n;
+ while(pDest32 < pEnd)
+ *pDest32++ = c;
+ return pDestination + n;
+ }
+
+
+
+ inline char8_t* CharTypeAssignN(char8_t* pDestination, size_t n, char8_t c)
+ {
+ if(n) // Some compilers (e.g. GCC 4.3+) generate a warning (which can't be disabled) if you call memset with a size of 0.
+ return (char8_t*)memset(pDestination, c, (size_t)n);
+ return pDestination;
+ }
+
+ inline char16_t* CharTypeAssignN(char16_t* pDestination, size_t n, char16_t c)
+ {
+ char16_t* pDest16 = pDestination;
+ const char16_t* const pEnd = pDestination + n;
+ while(pDest16 < pEnd)
+ *pDest16++ = c;
+ return pDestination;
+ }
+
+ inline char32_t* CharTypeAssignN(char32_t* pDestination, size_t n, char32_t c)
+ {
+ char32_t* pDest32 = pDestination;
+ const char32_t* const pEnd = pDestination + n;
+ while(pDest32 < pEnd)
+ *pDest32++ = c;
+ return pDestination;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // basic_string
+ ///////////////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string()
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(EASTL_BASIC_STRING_DEFAULT_NAME)
+ {
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const this_type& x)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(x.mAllocator)
+ {
+ RangeInitialize(x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(const this_type& x, size_type position, size_type n)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(x.mAllocator)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(x.mpEnd - x.mpBegin)))
+ {
+ ThrowRangeException();
+ AllocateSelf();
+ }
+ else
+ RangeInitialize(x.mpBegin + position, x.mpBegin + position + eastl::min_alt(n, (size_type)(x.mpEnd - x.mpBegin) - position));
+ #else
+ RangeInitialize(x.mpBegin + position, x.mpBegin + position + eastl::min_alt(n, (size_type)(x.mpEnd - x.mpBegin) - position));
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* p, size_type n, const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ RangeInitialize(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* p, const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ RangeInitialize(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(size_type n, value_type c, const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ SizeInitialize(n, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::basic_string(const value_type* pBegin, const value_type* pEnd, const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ RangeInitialize(pBegin, pEnd);
+ }
+
+
+ // CtorDoNotInitialize exists so that we can create a version that allocates but doesn't
+ // initialize but also doesn't collide with any other constructor declaration.
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(CtorDoNotInitialize /*unused*/, size_type n, const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ // Note that we do not call SizeInitialize here.
+ AllocateSelf(n + 1); // '+1' so that we have room for the terminating 0.
+ *mpEnd = 0;
+ }
+
+
+ // CtorSprintf exists so that we can create a version that does a variable argument
+ // sprintf but also doesn't collide with any other constructor declaration.
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>::basic_string(CtorSprintf /*unused*/, const value_type* pFormat, ...)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator()
+ {
+ const size_type n = (size_type)CharStrlen(pFormat) + 1; // We'll need at least this much. '+1' so that we have room for the terminating 0.
+ AllocateSelf(n);
+
+ va_list arguments;
+ va_start(arguments, pFormat);
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>::~basic_string()
+ {
+ DeallocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::allocator_type&
+ basic_string<T, Allocator>::get_allocator() const
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::allocator_type&
+ basic_string<T, Allocator>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::data() const
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::c_str() const
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::begin()
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::end()
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::begin() const
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_iterator
+ basic_string<T, Allocator>::end() const
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::rbegin()
+ {
+ return reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::rend()
+ {
+ return reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::rbegin() const
+ {
+ return const_reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reverse_iterator
+ basic_string<T, Allocator>::rend() const
+ {
+ return const_reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool basic_string<T, Allocator>::empty() const
+ {
+ return (mpBegin == mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::size() const
+ {
+ return (size_type)(mpEnd - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::length() const
+ {
+ return (size_type)(mpEnd - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::max_size() const
+ {
+ return kMaxSize;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::capacity() const
+ {
+ return (size_type)((mpCapacity - mpBegin) - 1); // '-1' because we pretend that we didn't allocate memory for the terminating 0.
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::operator[](size_type n) const
+ {
+ #if EASTL_ASSERT_ENABLED // We allow the user to reference the trailing 0 char without asserting. Perhaps we shouldn't.
+ if(EASTL_UNLIKELY(n > (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("basic_string::operator[] -- out of range");
+ #endif
+
+ return mpBegin[n]; // Sometimes done as *(mpBegin + n)
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::operator[](size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED // We allow the user to reference the trailing 0 char without asserting. Perhaps we shouldn't.
+ if(EASTL_UNLIKELY(n > (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("basic_string::operator[] -- out of range");
+ #endif
+
+ return mpBegin[n]; // Sometimes done as *(mpBegin + n)
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const basic_string<T, Allocator>& x)
+ {
+ if(&x != this)
+ {
+ #if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+ #endif
+
+ assign(x.mpBegin, x.mpEnd);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(const value_type* p)
+ {
+ return assign(p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::this_type& basic_string<T, Allocator>::operator=(value_type c)
+ {
+ return assign((size_type)1, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::resize(size_type n, value_type c)
+ {
+ const size_type s = (size_type)(mpEnd - mpBegin);
+
+ if(n < s)
+ erase(mpBegin + n, mpEnd);
+ else if(n > s)
+ append(n - s, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::resize(size_type n)
+ {
+ // C++ basic_string specifies that resize(n) is equivalent to resize(n, value_type()).
+ // For built-in types, value_type() is the same as zero (value_type(0)).
+ // We can improve the efficiency (especially for long strings) of this
+ // string class by resizing without assigning to anything.
+
+ const size_type s = (size_type)(mpEnd - mpBegin);
+
+ if(n < s)
+ erase(mpBegin + n, mpEnd);
+ else if(n > s)
+ {
+ #if EASTL_STRING_OPT_CHAR_INIT
+ append(n - s, value_type());
+ #else
+ append(n - s);
+ #endif
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::reserve(size_type n)
+ {
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(n > kMaxSize))
+ ThrowLengthException();
+ #endif
+
+ // The C++ standard for basic_string doesn't specify if we should or shouldn't
+ // downsize the container. The standard is overly vague in its description of reserve:
+ // The member function reserve() is a directive that informs a
+ // basic_string object of a planned change in size, so that it
+ // can manage the storage allocation accordingly.
+ // We will act like the vector container and preserve the contents of
+ // the container and only reallocate if increasing the size. The user
+ // can use the set_capacity function to reduce the capacity.
+
+ n = eastl::max_alt(n, (size_type)(mpEnd - mpBegin)); // Calculate the new capacity, which needs to be >= container size.
+
+ if(n >= (size_type)(mpCapacity - mpBegin)) // If there is something to do... // We use >= because mpCapacity accounts for the trailing zero.
+ set_capacity(n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::set_capacity(size_type n)
+ {
+ if(n == npos) // If the user wants to set the capacity to equal the current size... // '-1' because we pretend that we didn't allocate memory for the terminating 0.
+ n = (size_type)(mpEnd - mpBegin);
+ else if(n < (size_type)(mpEnd - mpBegin))
+ mpEnd = mpBegin + n;
+
+ if(n != (size_type)((mpCapacity - mpBegin) - 1)) // If there is any capacity change...
+ {
+ if(n)
+ {
+ pointer pNewBegin = DoAllocate(n + 1); // We need the + 1 to accomodate the trailing 0.
+ pointer pNewEnd = pNewBegin;
+
+ pNewEnd = CharStringUninitializedCopy(mpBegin, mpEnd, pNewBegin);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + (n + 1);
+ }
+ else
+ {
+ DeallocateSelf();
+ AllocateSelf();
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::force_size(size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n >= (size_type)(mpCapacity - mpBegin)))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (size_type)(mpCapacity - mpBegin)))
+ EASTL_FAIL_MSG("basic_string::force_size -- out of range");
+ #endif
+
+ mpEnd = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::clear()
+ {
+ if(mpBegin != mpEnd)
+ {
+ *mpBegin = value_type(0);
+ mpEnd = mpBegin;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::at(size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n >= (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED // We assert if the user references the trailing 0 char.
+ if(EASTL_UNLIKELY(n >= (size_type)(mpEnd - mpBegin)))
+ EASTL_FAIL_MSG("basic_string::at -- out of range");
+ #endif
+
+ return mpBegin[n];
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::at(size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(n >= (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED // We assert if the user references the trailing 0 char.
+ if(EASTL_UNLIKELY(n >= (size_type)(mpEnd - mpBegin)))
+ EASTL_FAIL_MSG("basic_string::at -- out of range");
+ #endif
+
+ return mpBegin[n];
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::front()
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference the trailing 0 char without asserting.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::front -- empty string");
+ #endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::front() const
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference the trailing 0 char without asserting.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::front -- empty string");
+ #endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reference
+ basic_string<T, Allocator>::back()
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference the trailing 0 char without asserting.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::back -- empty string");
+ #endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::const_reference
+ basic_string<T, Allocator>::back() const
+ {
+ #if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference the trailing 0 char without asserting.
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We assert if the user references the trailing 0 char.
+ EASTL_FAIL_MSG("basic_string::back -- empty string");
+ #endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(const basic_string<T, Allocator>& x)
+ {
+ return append(x);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(const value_type* p)
+ {
+ return append(p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::operator+=(value_type c)
+ {
+ push_back(c);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const basic_string<T, Allocator>& x)
+ {
+ return append(x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const basic_string<T, Allocator>& x, size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(x.mpEnd - x.mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ return append(x.mpBegin + position, x.mpBegin + position + eastl::min_alt(n, (size_type)(x.mpEnd - x.mpBegin) - position));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* p, size_type n)
+ {
+ return append(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* p)
+ {
+ return append(p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append(size_type n, value_type c)
+ {
+ const size_type s = (size_type)(mpEnd - mpBegin);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((n > kMaxSize) || (s > (kMaxSize - n))))
+ ThrowLengthException();
+ #endif
+
+ const size_type nCapacity = (size_type)((mpCapacity - mpBegin) - 1);
+
+ if((s + n) > nCapacity)
+ reserve(eastl::max_alt((size_type)GetNewCapacity(nCapacity), (size_type)(s + n)));
+
+ if(n > 0)
+ {
+ CharStringUninitializedFillN(mpEnd + 1, n - 1, c);
+ *mpEnd = c;
+ mpEnd += n;
+ *mpEnd = 0;
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append(const value_type* pBegin, const value_type* pEnd)
+ {
+ if(pBegin != pEnd)
+ {
+ const size_type nOldSize = (size_type)(mpEnd - mpBegin);
+ const size_type n = (size_type)(pEnd - pBegin);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(((size_t)n > kMaxSize) || (nOldSize > (kMaxSize - n))))
+ ThrowLengthException();
+ #endif
+
+ const size_type nCapacity = (size_type)((mpCapacity - mpBegin) - 1);
+
+ if((nOldSize + n) > nCapacity)
+ {
+ const size_type nLength = eastl::max_alt((size_type)GetNewCapacity(nCapacity), (size_type)(nOldSize + n)) + 1; // + 1 to accomodate the trailing 0.
+
+ pointer pNewBegin = DoAllocate(nLength);
+ pointer pNewEnd = pNewBegin;
+
+ pNewEnd = CharStringUninitializedCopy(mpBegin, mpEnd, pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin, pEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + nLength;
+ }
+ else
+ {
+ const value_type* pTemp = pBegin;
+ ++pTemp;
+ CharStringUninitializedCopy(pTemp, pEnd, mpEnd + 1);
+ mpEnd[n] = 0;
+ *mpEnd = *pBegin;
+ mpEnd += n;
+ }
+ }
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_sprintf_va_list(const value_type* pFormat, va_list arguments)
+ {
+ // From unofficial C89 extension documentation:
+ // The vsnprintf returns the number of characters written into the array,
+ // not counting the terminating null character, or a negative value
+ // if count or more characters are requested to be generated.
+ // An error can occur while converting a value for output.
+
+ // From the C99 standard:
+ // The vsnprintf function returns the number of characters that would have
+ // been written had n been sufficiently large, not counting the terminating
+ // null character, or a negative value if an encoding error occurred.
+ // Thus, the null-terminated output has been completely written if and only
+ // if the returned value is nonnegative and less than n.
+ size_type nInitialSize = (size_type)(mpEnd - mpBegin);
+ int nReturnValue;
+
+ #if EASTL_VA_COPY_ENABLED
+ va_list argumentsSaved;
+ va_copy(argumentsSaved, arguments);
+ #endif
+
+ if(mpBegin == GetEmptyString(value_type())) // We need to do this because non-standard vsnprintf implementations will otherwise overwrite gEmptyString with a non-zero char.
+ nReturnValue = eastl::Vsnprintf(mpEnd, 0, pFormat, arguments);
+ else
+ nReturnValue = eastl::Vsnprintf(mpEnd, (size_t)(mpCapacity - mpEnd), pFormat, arguments);
+
+ if(nReturnValue >= (int)(mpCapacity - mpEnd)) // If there wasn't enough capacity...
+ {
+ // In this case we definitely have C99 Vsnprintf behaviour.
+ #if EASTL_VA_COPY_ENABLED
+ va_copy(arguments, argumentsSaved);
+ #endif
+ resize(nInitialSize + nReturnValue);
+ nReturnValue = eastl::Vsnprintf(mpBegin + nInitialSize, (size_t)(nReturnValue + 1), pFormat, arguments); // '+1' because vsnprintf wants to know the size of the buffer including the terminating zero.
+ }
+ else if(nReturnValue < 0) // If vsnprintf is non-C99-standard (e.g. it is VC++ _vsnprintf)...
+ {
+ // In this case we either have C89 extension behaviour or C99 behaviour.
+ size_type n = eastl::max_alt((size_type)(EASTL_STRING_INITIAL_CAPACITY - 1), (size_type)(size() * 2)); // '-1' because the resize call below will add one for NULL terminator and we want to keep allocations on fixed block sizes.
+
+ for(; (nReturnValue < 0) && (n < 1000000); n *= 2)
+ {
+ #if EASTL_VA_COPY_ENABLED
+ va_copy(arguments, argumentsSaved);
+ #endif
+ resize(n);
+
+ const size_t nCapacity = (size_t)((n + 1) - nInitialSize);
+ nReturnValue = eastl::Vsnprintf(mpBegin + nInitialSize, nCapacity, pFormat, arguments); // '+1' because vsnprintf wants to know the size of the buffer including the terminating zero.
+
+ if(nReturnValue == (int)(unsigned)nCapacity)
+ {
+ resize(++n);
+ nReturnValue = eastl::Vsnprintf(mpBegin + nInitialSize, nCapacity + 1, pFormat, arguments);
+ }
+ }
+ }
+
+ if(nReturnValue >= 0)
+ mpEnd = mpBegin + nInitialSize + nReturnValue; // We are guaranteed from the above logic that mpEnd <= mpCapacity.
+
+ return *this;
+ }
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::append_sprintf(const value_type* pFormat, ...)
+ {
+ va_list arguments;
+ va_start(arguments, pFormat);
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::push_back(value_type c)
+ {
+ if((mpEnd + 1) == mpCapacity) // If we are out of space... (note that we test for + 1 because we have a trailing 0)
+ reserve(eastl::max_alt(GetNewCapacity((size_type)((mpCapacity - mpBegin) - 1)), (size_type)(mpEnd - mpBegin) + 1));
+ *mpEnd++ = c;
+ *mpEnd = 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::pop_back()
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin))
+ EASTL_FAIL_MSG("basic_string::pop_back -- empty string");
+ #endif
+
+ mpEnd[-1] = value_type(0);
+ --mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const basic_string<T, Allocator>& x)
+ {
+ return assign(x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const basic_string<T, Allocator>& x, size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(x.mpEnd - x.mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ return assign(x.mpBegin + position, x.mpBegin + position + eastl::min_alt(n, (size_type)(x.mpEnd - x.mpBegin) - position));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* p, size_type n)
+ {
+ return assign(p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* p)
+ {
+ return assign(p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign(size_type n, value_type c)
+ {
+ if(n <= (size_type)(mpEnd - mpBegin))
+ {
+ CharTypeAssignN(mpBegin, n, c);
+ erase(mpBegin + n, mpEnd);
+ }
+ else
+ {
+ CharTypeAssignN(mpBegin, (size_type)(mpEnd - mpBegin), c);
+ append(n - (size_type)(mpEnd - mpBegin), c);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::assign(const value_type* pBegin, const value_type* pEnd)
+ {
+ const ptrdiff_t n = pEnd - pBegin;
+ if(static_cast<size_type>(n) <= (size_type)(mpEnd - mpBegin))
+ {
+ memmove(mpBegin, pBegin, (size_t)n * sizeof(value_type));
+ erase(mpBegin + n, mpEnd);
+ }
+ else
+ {
+ memmove(mpBegin, pBegin, (size_t)(mpEnd - mpBegin) * sizeof(value_type));
+ append(pBegin + (size_type)(mpEnd - mpBegin), pEnd);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const basic_string<T, Allocator>& x)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((size_type)(mpEnd - mpBegin) > (kMaxSize - (size_type)(x.mpEnd - x.mpBegin))))
+ ThrowLengthException();
+ #endif
+
+ insert(mpBegin + position, x.mpBegin, x.mpEnd);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const basic_string<T, Allocator>& x, size_type beg, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((position > (size_type)(mpEnd - mpBegin)) || (beg > (size_type)(x.mpEnd - x.mpBegin))))
+ ThrowRangeException();
+ #endif
+
+ size_type nLength = eastl::min_alt(n, (size_type)(x.mpEnd - x.mpBegin) - beg);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((size_type)(mpEnd - mpBegin) > (kMaxSize - nLength)))
+ ThrowLengthException();
+ #endif
+
+ insert(mpBegin + position, x.mpBegin + beg, x.mpBegin + beg + nLength);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const value_type* p, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((size_type)(mpEnd - mpBegin) > (kMaxSize - n)))
+ ThrowLengthException();
+ #endif
+
+ insert(mpBegin + position, p, p + n);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, const value_type* p)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ size_type nLength = (size_type)CharStrlen(p);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((size_type)(mpEnd - mpBegin) > (kMaxSize - nLength)))
+ ThrowLengthException();
+ #endif
+
+ insert(mpBegin + position, p, p + nLength);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::insert(size_type position, size_type n, value_type c)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((size_type)(mpEnd - mpBegin) > (kMaxSize - n)))
+ ThrowLengthException();
+ #endif
+
+ insert(mpBegin + position, n, c);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::insert(iterator p, value_type c)
+ {
+ if(p == mpEnd)
+ {
+ push_back(c);
+ return mpEnd - 1;
+ }
+ return InsertInternal(p, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::insert(iterator p, size_type n, value_type c)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < mpBegin) || (p > mpEnd)))
+ EASTL_FAIL_MSG("basic_string::insert -- invalid position");
+ #endif
+
+ if(n) // If there is anything to insert...
+ {
+ if(size_type(mpCapacity - mpEnd) >= (n + 1)) // If we have enough capacity...
+ {
+ const size_type nElementsAfter = (size_type)(mpEnd - p);
+ iterator pOldEnd = mpEnd;
+
+ if(nElementsAfter >= n) // If there's enough space for the new chars between the insert position and the end...
+ {
+ CharStringUninitializedCopy((mpEnd - n) + 1, mpEnd + 1, mpEnd + 1);
+ mpEnd += n;
+ memmove(p + n, p, (size_t)((nElementsAfter - n) + 1) * sizeof(value_type));
+ CharTypeAssignN(p, n, c);
+ }
+ else
+ {
+ CharStringUninitializedFillN(mpEnd + 1, n - nElementsAfter - 1, c);
+ mpEnd += n - nElementsAfter;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ CharStringUninitializedCopy(p, pOldEnd + 1, mpEnd);
+ mpEnd += nElementsAfter;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ mpEnd = pOldEnd;
+ throw;
+ }
+ #endif
+
+ CharTypeAssignN(p, nElementsAfter + 1, c);
+ }
+ }
+ else
+ {
+ const size_type nOldSize = (size_type)(mpEnd - mpBegin);
+ const size_type nOldCap = (size_type)((mpCapacity - mpBegin) - 1);
+ const size_type nLength = eastl::max_alt((size_type)GetNewCapacity(nOldCap), (size_type)(nOldSize + n)) + 1; // + 1 to accomodate the trailing 0.
+
+ iterator pNewBegin = DoAllocate(nLength);
+ iterator pNewEnd = pNewBegin;
+
+ pNewEnd = CharStringUninitializedCopy(mpBegin, p, pNewBegin);
+ pNewEnd = CharStringUninitializedFillN(pNewEnd, n, c);
+ pNewEnd = CharStringUninitializedCopy(p, mpEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + nLength;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::insert(iterator p, const value_type* pBegin, const value_type* pEnd)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < mpBegin) || (p > mpEnd)))
+ EASTL_FAIL_MSG("basic_string::insert -- invalid position");
+ #endif
+
+ const size_type n = (size_type)(pEnd - pBegin);
+
+ if(n)
+ {
+ const bool bCapacityIsSufficient = ((mpCapacity - mpEnd) >= (difference_type)(n + 1));
+ const bool bSourceIsFromSelf = ((pEnd >= mpBegin) && (pBegin <= mpEnd));
+
+ // If bSourceIsFromSelf is true, then we reallocate. This is because we are
+ // inserting ourself into ourself and thus both the source and destination
+ // be modified, making it rather tricky to attempt to do in place. The simplest
+ // resolution is to reallocate. To consider: there may be a way to implement this
+ // whereby we don't need to reallocate or can often avoid reallocating.
+ if(bCapacityIsSufficient && !bSourceIsFromSelf)
+ {
+ const ptrdiff_t nElementsAfter = (mpEnd - p);
+ iterator pOldEnd = mpEnd;
+
+ if(nElementsAfter >= (ptrdiff_t)n) // If the newly inserted characters entirely fit within the size of the original string...
+ {
+ memmove(mpEnd + 1, mpEnd - n + 1, (size_t)n * sizeof(value_type));
+ mpEnd += n;
+ memmove(p + n, p, (size_t)((nElementsAfter - n) + 1) * sizeof(value_type));
+ memmove(p, pBegin, (size_t)(pEnd - pBegin) * sizeof(value_type));
+ }
+ else
+ {
+ const value_type* const pMid = pBegin + (nElementsAfter + 1);
+
+ memmove(mpEnd + 1, pMid, (size_t)(pEnd - pMid) * sizeof(value_type));
+ mpEnd += n - nElementsAfter;
+
+ #if EASTL_EXCEPTIONS_ENABLED
+ try
+ {
+ #endif
+ memmove(mpEnd, p, (size_t)(pOldEnd - p + 1) * sizeof(value_type));
+ mpEnd += nElementsAfter;
+ #if EASTL_EXCEPTIONS_ENABLED
+ }
+ catch(...)
+ {
+ mpEnd = pOldEnd;
+ throw;
+ }
+ #endif
+
+ memmove(p, pBegin, (size_t)(pMid - pBegin) * sizeof(value_type));
+ }
+ }
+ else // Else we need to reallocate to implement this.
+ {
+ const size_type nOldSize = (size_type)(mpEnd - mpBegin);
+ const size_type nOldCap = (size_type)((mpCapacity - mpBegin) - 1);
+ size_type nLength;
+
+ if(bCapacityIsSufficient) // If bCapacityIsSufficient is true, then bSourceIsFromSelf must be false.
+ nLength = nOldSize + n + 1; // + 1 to accomodate the trailing 0.
+ else
+ nLength = eastl::max_alt((size_type)GetNewCapacity(nOldCap), (size_type)(nOldSize + n)) + 1; // + 1 to accomodate the trailing 0.
+
+ pointer pNewBegin = DoAllocate(nLength);
+ pointer pNewEnd = pNewBegin;
+
+ pNewEnd = CharStringUninitializedCopy(mpBegin, p, pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin, pEnd, pNewEnd);
+ pNewEnd = CharStringUninitializedCopy(p, mpEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + nLength;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::erase(size_type position, size_type n)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ erase(mpBegin + position, mpBegin + position + eastl::min_alt(n, (size_type)(mpEnd - mpBegin) - position));
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::erase(iterator p)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((p < mpBegin) || (p >= mpEnd)))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ memmove(p, p + 1, (size_t)(mpEnd - p) * sizeof(value_type));
+ --mpEnd;
+ return p;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::erase(iterator pBegin, iterator pEnd)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((pBegin < mpBegin) || (pBegin > mpEnd) || (pEnd < mpBegin) || (pEnd > mpEnd) || (pEnd < pBegin)))
+ EASTL_FAIL_MSG("basic_string::erase -- invalid position");
+ #endif
+
+ if(pBegin != pEnd)
+ {
+ memmove(pBegin, pEnd, (size_t)((mpEnd - pEnd) + 1) * sizeof(value_type));
+ const iterator pNewEnd = (mpEnd - (pEnd - pBegin));
+ mpEnd = pNewEnd;
+ }
+ return pBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::reverse_iterator
+ basic_string<T, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n, const basic_string<T, Allocator>& x)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n, (size_type)(mpEnd - mpBegin) - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(((size_type)(mpEnd - mpBegin) - nLength) >= (kMaxSize - (size_type)(x.mpEnd - x.mpBegin))))
+ ThrowLengthException();
+ #endif
+
+ return replace(mpBegin + position, mpBegin + position + nLength, x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type pos1, size_type n1, const basic_string<T, Allocator>& x, size_type pos2, size_type n2)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((pos1 > (size_type)(mpEnd - mpBegin)) || (pos2 > (size_type)(x.mpEnd - x.mpBegin))))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength1 = eastl::min_alt(n1, (size_type)( mpEnd - mpBegin) - pos1);
+ const size_type nLength2 = eastl::min_alt(n2, (size_type)(x.mpEnd - x.mpBegin) - pos2);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(((size_type)(mpEnd - mpBegin) - nLength1) >= (kMaxSize - nLength2)))
+ ThrowLengthException();
+ #endif
+
+ return replace(mpBegin + pos1, mpBegin + pos1 + nLength1, x.mpBegin + pos2, x.mpBegin + pos2 + nLength2);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, const value_type* p, size_type n2)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((n2 > kMaxSize) || (((size_type)(mpEnd - mpBegin) - nLength) >= (kMaxSize - n2))))
+ ThrowLengthException();
+ #endif
+
+ return replace(mpBegin + position, mpBegin + position + nLength, p, p + n2);
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, const value_type* p)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ const size_type n2 = (size_type)CharStrlen(p);
+ if(EASTL_UNLIKELY((n2 > kMaxSize) || (((size_type)(mpEnd - mpBegin) - nLength) >= (kMaxSize - n2))))
+ ThrowLengthException();
+ #endif
+
+ return replace(mpBegin + position, mpBegin + position + nLength, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(size_type position, size_type n1, size_type n2, value_type c)
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ const size_type nLength = eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - position);
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY((n2 > kMaxSize) || ((size_type)(mpEnd - mpBegin) - nLength) >= (kMaxSize - n2)))
+ ThrowLengthException();
+ #endif
+
+ return replace(mpBegin + position, mpBegin + position + nLength, n2, c);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(iterator pBegin, iterator pEnd, const basic_string<T, Allocator>& x)
+ {
+ return replace(pBegin, pEnd, x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(iterator pBegin, iterator pEnd, const value_type* p, size_type n)
+ {
+ return replace(pBegin, pEnd, p, p + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::replace(iterator pBegin, iterator pEnd, const value_type* p)
+ {
+ return replace(pBegin, pEnd, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(iterator pBegin, iterator pEnd, size_type n, value_type c)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((pBegin < mpBegin) || (pBegin > mpEnd) || (pEnd < mpBegin) || (pEnd > mpEnd) || (pEnd < pBegin)))
+ EASTL_FAIL_MSG("basic_string::replace -- invalid position");
+ #endif
+
+ const size_type nLength = static_cast<size_type>(pEnd - pBegin);
+
+ if(nLength >= n)
+ {
+ CharTypeAssignN(pBegin, n, c);
+ erase(pBegin + n, pEnd);
+ }
+ else
+ {
+ CharTypeAssignN(pBegin, nLength, c);
+ insert(pEnd, n - nLength, c);
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::replace(iterator pBegin1, iterator pEnd1, const value_type* pBegin2, const value_type* pEnd2)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((pBegin1 < mpBegin) || (pBegin1 > mpEnd) || (pEnd1 < mpBegin) || (pEnd1 > mpEnd) || (pEnd1 < pBegin1)))
+ EASTL_FAIL_MSG("basic_string::replace -- invalid position");
+ #endif
+
+ const size_type nLength1 = (size_type)(pEnd1 - pBegin1);
+ const size_type nLength2 = (size_type)(pEnd2 - pBegin2);
+
+ if(nLength1 >= nLength2) // If we have a non-expanding operation...
+ {
+ if((pBegin2 > pEnd1) || (pEnd2 <= pBegin1)) // If we have a non-overlapping operation...
+ memcpy(pBegin1, pBegin2, (size_t)(pEnd2 - pBegin2) * sizeof(value_type));
+ else
+ memmove(pBegin1, pBegin2, (size_t)(pEnd2 - pBegin2) * sizeof(value_type));
+ erase(pBegin1 + nLength2, pEnd1);
+ }
+ else // Else we are expanding.
+ {
+ if((pBegin2 > pEnd1) || (pEnd2 <= pBegin1)) // If we have a non-overlapping operation...
+ {
+ const value_type* const pMid2 = pBegin2 + nLength1;
+
+ if((pEnd2 <= pBegin1) || (pBegin2 > pEnd1))
+ memcpy(pBegin1, pBegin2, (size_t)(pMid2 - pBegin2) * sizeof(value_type));
+ else
+ memmove(pBegin1, pBegin2, (size_t)(pMid2 - pBegin2) * sizeof(value_type));
+ insert(pEnd1, pMid2, pEnd2);
+ }
+ else // else we have an overlapping operation.
+ {
+ // I can't think of any easy way of doing this without allocating temporary memory.
+ const size_type nOldSize = (size_type)(mpEnd - mpBegin);
+ const size_type nOldCap = (size_type)((mpCapacity - mpBegin) - 1);
+ const size_type nNewCapacity = eastl::max_alt((size_type)GetNewCapacity(nOldCap), (size_type)(nOldSize + (nLength2 - nLength1))) + 1; // + 1 to accomodate the trailing 0.
+
+ pointer pNewBegin = DoAllocate(nNewCapacity);
+ pointer pNewEnd = pNewBegin;
+
+ pNewEnd = CharStringUninitializedCopy(mpBegin, pBegin1, pNewBegin);
+ pNewEnd = CharStringUninitializedCopy(pBegin2, pEnd2, pNewEnd);
+ pNewEnd = CharStringUninitializedCopy(pEnd1, mpEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + nNewCapacity;
+ }
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::copy(value_type* p, size_type n, size_type position) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ // It is not clear from the C++ standard if 'p' destination pointer is allowed to
+ // refer to memory from within the string itself. We assume so and use memmove
+ // instead of memcpy until we find otherwise.
+ const size_type nLength = eastl::min_alt(n, (size_type)(mpEnd - mpBegin) - position);
+ memmove(p, mpBegin + position, (size_t)nLength * sizeof(value_type));
+ return nLength;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::swap(basic_string<T, Allocator>& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // We leave mAllocator as-is.
+ eastl::swap(mpBegin, x.mpBegin);
+ eastl::swap(mpEnd, x.mpEnd);
+ eastl::swap(mpCapacity, x.mpCapacity);
+ }
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return find(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const value_type* p, size_type position) const
+ {
+ return find(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ #if defined(EA_PLATFORM_XENON) // If XBox 360...
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const value_type* p, size_type position, size_type n) const
+ {
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(n || (position > nLength))
+ {
+ if(position < nLength)
+ {
+ size_type nRemain = nLength - position;
+
+ if(n <= nRemain)
+ {
+ nRemain -= (n - 1);
+
+ for(const value_type* p1, *p2 = mpBegin + position;
+ (p1 = Find(p2, *p, nRemain)) != 0;
+ nRemain -= (p1 - p2) + 1, p2 = (p1 + 1))
+ {
+ if(Compare(p1, p, n) == 0)
+ return (size_type)(p1 - mpBegin);
+ }
+ }
+ }
+
+ return npos;
+ }
+
+ return position;
+ }
+ #else
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(const value_type* p, size_type position, size_type n) const
+ {
+ // It is not clear what the requirements are for position, but since the C++ standard
+ // appears to be silent it is assumed for now that position can be any value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ // EASTL_FAIL_MSG("basic_string::find -- invalid position");
+ //#endif
+
+ if(EASTL_LIKELY((position + n) <= (size_type)(mpEnd - mpBegin))) // If the range is valid...
+ {
+ const value_type* const pTemp = eastl::search(mpBegin + position, mpEnd, p, p + n);
+
+ if((pTemp != mpEnd) || (n == 0))
+ return (size_type)(pTemp - mpBegin);
+ }
+ return npos;
+ }
+ #endif
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find(value_type c, size_type position) const
+ {
+ // It is not clear what the requirements are for position, but since the C++ standard
+ // appears to be silent it is assumed for now that position can be any value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ // EASTL_FAIL_MSG("basic_string::find -- invalid position");
+ //#endif
+
+ if(EASTL_LIKELY(position < (size_type)(mpEnd - mpBegin))) // If the position is valid...
+ {
+ const const_iterator pResult = eastl::find(mpBegin + position, mpEnd, c);
+
+ if(pResult != mpEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return rfind(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const value_type* p, size_type position) const
+ {
+ return rfind(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(const value_type* p, size_type position, size_type n) const
+ {
+ // Disabled because it's not clear what values are valid for position.
+ // It is documented that npos is a valid value, though. We return npos and
+ // don't crash if postion is any invalid value.
+ //#if EASTL_ASSERT_ENABLED
+ // if(EASTL_UNLIKELY((position != npos) && (position > (size_type)(mpEnd - mpBegin))))
+ // EASTL_FAIL_MSG("basic_string::rfind -- invalid position");
+ //#endif
+
+ // Note that a search for a zero length string starting at position = end() returns end() and not npos.
+ // Note by Paul Pedriana: I am not sure how this should behave in the case of n == 0 and position > size.
+ // The standard seems to suggest that rfind doesn't act exactly the same as find in that input position
+ // can be > size and the return value can still be other than npos. Thus, if n == 0 then you can
+ // never return npos, unlike the case with find.
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(EASTL_LIKELY(n <= nLength))
+ {
+ if(EASTL_LIKELY(n))
+ {
+ const const_iterator pEnd = mpBegin + eastl::min_alt(nLength - n, position) + n;
+ const const_iterator pResult = CharTypeStringRSearch(mpBegin, pEnd, p, p + n);
+
+ if(pResult != pEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ else
+ return eastl::min_alt(nLength, position);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::rfind(value_type c, size_type position) const
+ {
+ // If n is zero or position is >= size, we return npos.
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFind(pEnd, mpBegin, c);
+
+ if(pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return find_first_of(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const value_type* p, size_type position) const
+ {
+ return find_first_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ #if defined(EA_PLATFORM_XENON) // If XBox 360...
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If position is >= size, we return npos.
+ if(n && (position < (size_type)(mpEnd - mpBegin)))
+ {
+ for(const value_type* p1 = (mpBegin + position); p1 < mpEnd; ++p1)
+ {
+ if(Find(p, *p1, n) != 0)
+ return (size_type)(p1 - mpBegin);
+ }
+ }
+ return npos;
+ }
+ #else
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If position is >= size, we return npos.
+ if(EASTL_LIKELY((position < (size_type)(mpEnd - mpBegin))))
+ {
+ const value_type* const pBegin = mpBegin + position;
+ const const_iterator pResult = CharTypeStringFindFirstOf(pBegin, mpEnd, p, p + n);
+
+ if(pResult != mpEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+ #endif
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_of(value_type c, size_type position) const
+ {
+ return find(c, position);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return find_last_of(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const value_type* p, size_type position) const
+ {
+ return find_last_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ #if defined(EA_PLATFORM_XENON) // If XBox 360...
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If n is zero or position is >= size, we return npos.
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(n && nLength)
+ {
+ const value_type* p1;
+
+ if(position < nLength)
+ p1 = mpBegin + position;
+ else
+ p1 = mpEnd - 1;
+
+ for(;;)
+ {
+ if(Find(p, *p1, n))
+ return (size_type)(p1 - mpBegin);
+
+ if(p1-- == mpBegin)
+ break;
+ }
+ }
+
+ return npos;
+ }
+ #else
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(const value_type* p, size_type position, size_type n) const
+ {
+ // If n is zero or position is >= size, we return npos.
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstOf(pEnd, mpBegin, p, p + n);
+
+ if(pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+ #endif
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_of(value_type c, size_type position) const
+ {
+ return rfind(c, position);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return find_first_not_of(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const value_type* p, size_type position) const
+ {
+ return find_first_not_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(const value_type* p, size_type position, size_type n) const
+ {
+ if(EASTL_LIKELY(position <= (size_type)(mpEnd - mpBegin)))
+ {
+ const const_iterator pResult = CharTypeStringFindFirstNotOf(mpBegin + position, mpEnd, p, p + n);
+
+ if(pResult != mpEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_first_not_of(value_type c, size_type position) const
+ {
+ if(EASTL_LIKELY(position <= (size_type)(mpEnd - mpBegin)))
+ {
+ // Todo: Possibly make a specialized version of CharTypeStringFindFirstNotOf(pBegin, pEnd, c).
+ const const_iterator pResult = CharTypeStringFindFirstNotOf(mpBegin + position, mpEnd, &c, &c + 1);
+
+ if(pResult != mpEnd)
+ return (size_type)(pResult - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const basic_string<T, Allocator>& x, size_type position) const
+ {
+ return find_last_not_of(x.mpBegin, position, (size_type)(x.mpEnd - x.mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const value_type* p, size_type position) const
+ {
+ return find_last_not_of(p, position, (size_type)CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(const value_type* p, size_type position, size_type n) const
+ {
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(EASTL_LIKELY(nLength))
+ {
+ const value_type* const pEnd = mpBegin + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, mpBegin, p, p + n);
+
+ if(pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::find_last_not_of(value_type c, size_type position) const
+ {
+ const size_type nLength = (size_type)(mpEnd - mpBegin);
+
+ if(EASTL_LIKELY(nLength))
+ {
+ // Todo: Possibly make a specialized version of CharTypeStringRFindFirstNotOf(pBegin, pEnd, c).
+ const value_type* const pEnd = mpBegin + eastl::min_alt(nLength - 1, position) + 1;
+ const value_type* const pResult = CharTypeStringRFindFirstNotOf(pEnd, mpBegin, &c, &c + 1);
+
+ if(pResult != mpBegin)
+ return (size_type)((pResult - 1) - mpBegin);
+ }
+ return npos;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::substr(size_type position, size_type n) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(position > (size_type)(mpEnd - mpBegin)))
+ EASTL_FAIL_MSG("basic_string::substr -- invalid position");
+ #endif
+
+ return basic_string(mpBegin + position, mpBegin + position + eastl::min_alt(n, (size_type)(mpEnd - mpBegin) - position), mAllocator);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(const basic_string<T, Allocator>& x) const
+ {
+ return compare(mpBegin, mpEnd, x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const basic_string<T, Allocator>& x) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ return compare(mpBegin + pos1,
+ mpBegin + pos1 + eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - pos1),
+ x.mpBegin,
+ x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const basic_string<T, Allocator>& x, size_type pos2, size_type n2) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY((pos1 > (size_type)(mpEnd - mpBegin)) || (pos2 > (size_type)(x.mpEnd - x.mpBegin))))
+ ThrowRangeException();
+ #endif
+
+ return compare(mpBegin + pos1,
+ mpBegin + pos1 + eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - pos1),
+ x.mpBegin + pos2,
+ x.mpBegin + pos2 + eastl::min_alt(n2, (size_type)(mpEnd - mpBegin) - pos2));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(const value_type* p) const
+ {
+ return compare(mpBegin, mpEnd, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const value_type* p) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ return compare(mpBegin + pos1,
+ mpBegin + pos1 + eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - pos1),
+ p,
+ p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::compare(size_type pos1, size_type n1, const value_type* p, size_type n2) const
+ {
+ #if EASTL_STRING_OPT_RANGE_ERRORS
+ if(EASTL_UNLIKELY(pos1 > (size_type)(mpEnd - mpBegin)))
+ ThrowRangeException();
+ #endif
+
+ return compare(mpBegin + pos1,
+ mpBegin + pos1 + eastl::min_alt(n1, (size_type)(mpEnd - mpBegin) - pos1),
+ p,
+ p + n2);
+ }
+
+
+ // make_lower
+ // This is a very simple ASCII-only case conversion function
+ // Anything more complicated should use a more powerful separate library.
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::make_lower()
+ {
+ for(pointer p = mpBegin; p < mpEnd; ++p)
+ *p = (value_type)CharToLower(*p);
+ }
+
+
+ // make_upper
+ // This is a very simple ASCII-only case conversion function
+ // Anything more complicated should use a more powerful separate library.
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::make_upper()
+ {
+ for(pointer p = mpBegin; p < mpEnd; ++p)
+ *p = (value_type)CharToUpper(*p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ltrim()
+ {
+ const value_type array[] = { ' ', '\t', 0 }; // This is a pretty simplistic view of whitespace.
+ erase(0, find_first_not_of(array));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::rtrim()
+ {
+ const value_type array[] = { ' ', '\t', 0 }; // This is a pretty simplistic view of whitespace.
+ erase(find_last_not_of(array) + 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::trim()
+ {
+ ltrim();
+ rtrim();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::left(size_type n) const
+ {
+ const size_type nLength = length();
+ if(n < nLength)
+ return substr(0, n);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator> basic_string<T, Allocator>::right(size_type n) const
+ {
+ const size_type nLength = length();
+ if(n < nLength)
+ return substr(nLength - n, n);
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline basic_string<T, Allocator>& basic_string<T, Allocator>::sprintf(const value_type* pFormat, ...)
+ {
+ va_list arguments;
+ va_start(arguments, pFormat);
+ mpEnd = mpBegin; // Fast truncate to zero length.
+ append_sprintf_va_list(pFormat, arguments);
+ va_end(arguments);
+
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator>& basic_string<T, Allocator>::sprintf_va_list(const value_type* pFormat, va_list arguments)
+ {
+ mpEnd = mpBegin; // Fast truncate to zero length.
+
+ return append_sprintf_va_list(pFormat, arguments);
+ }
+
+
+ template <typename T, typename Allocator>
+ int basic_string<T, Allocator>::compare(const value_type* pBegin1, const value_type* pEnd1,
+ const value_type* pBegin2, const value_type* pEnd2)
+ {
+ const ptrdiff_t n1 = pEnd1 - pBegin1;
+ const ptrdiff_t n2 = pEnd2 - pBegin2;
+ const ptrdiff_t nMin = eastl::min_alt(n1, n2);
+ const int cmp = Compare(pBegin1, pBegin2, (size_t)nMin);
+
+ return (cmp != 0 ? cmp : (n1 < n2 ? -1 : (n1 > n2 ? 1 : 0)));
+ }
+
+
+ template <typename T, typename Allocator>
+ int basic_string<T, Allocator>::comparei(const value_type* pBegin1, const value_type* pEnd1,
+ const value_type* pBegin2, const value_type* pEnd2)
+ {
+ const ptrdiff_t n1 = pEnd1 - pBegin1;
+ const ptrdiff_t n2 = pEnd2 - pBegin2;
+ const ptrdiff_t nMin = eastl::min_alt(n1, n2);
+ const int cmp = CompareI(pBegin1, pBegin2, (size_t)nMin);
+
+ return (cmp != 0 ? cmp : (n1 < n2 ? -1 : (n1 > n2 ? 1 : 0)));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::comparei(const basic_string<T, Allocator>& x) const
+ {
+ return comparei(mpBegin, mpEnd, x.mpBegin, x.mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::comparei(const value_type* p) const
+ {
+ return comparei(mpBegin, mpEnd, p, p + CharStrlen(p));
+ }
+
+
+ template <typename T, typename Allocator>
+ typename basic_string<T, Allocator>::iterator
+ basic_string<T, Allocator>::InsertInternal(iterator p, value_type c)
+ {
+ iterator pNewPosition = p;
+
+ if((mpEnd + 1) < mpCapacity)
+ {
+ *(mpEnd + 1) = 0;
+ memmove(p + 1, p, (size_t)(mpEnd - p) * sizeof(value_type));
+ *p = c;
+ ++mpEnd;
+ }
+ else
+ {
+ const size_type nOldSize = (size_type)(mpEnd - mpBegin);
+ const size_type nOldCap = (size_type)((mpCapacity - mpBegin) - 1);
+ const size_type nLength = eastl::max_alt((size_type)GetNewCapacity(nOldCap), (size_type)(nOldSize + 1)) + 1; // The second + 1 is to accomodate the trailing 0.
+
+ iterator pNewBegin = DoAllocate(nLength);
+ iterator pNewEnd = pNewBegin;
+
+ pNewPosition = CharStringUninitializedCopy(mpBegin, p, pNewBegin);
+ *pNewPosition = c;
+
+ pNewEnd = pNewPosition + 1;
+ pNewEnd = CharStringUninitializedCopy(p, mpEnd, pNewEnd);
+ *pNewEnd = 0;
+
+ DeallocateSelf();
+ mpBegin = pNewBegin;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewBegin + nLength;
+ }
+ return pNewPosition;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::SizeInitialize(size_type n, value_type c)
+ {
+ AllocateSelf((size_type)(n + 1)); // '+1' so that we have room for the terminating 0.
+
+ mpEnd = CharStringUninitializedFillN(mpBegin, n, c);
+ *mpEnd = 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::RangeInitialize(const value_type* pBegin, const value_type* pEnd)
+ {
+ const size_type n = (size_type)(pEnd - pBegin);
+
+ #if EASTL_STRING_OPT_ARGUMENT_ERRORS
+ if(EASTL_UNLIKELY(!pBegin && (n != 0)))
+ ThrowInvalidArgumentException();
+ #endif
+
+ AllocateSelf((size_type)(n + 1)); // '+1' so that we have room for the terminating 0.
+
+ mpEnd = CharStringUninitializedCopy(pBegin, pEnd, mpBegin);
+ *mpEnd = 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::RangeInitialize(const value_type* pBegin)
+ {
+ #if EASTL_STRING_OPT_ARGUMENT_ERRORS
+ if(EASTL_UNLIKELY(!pBegin))
+ ThrowInvalidArgumentException();
+ #endif
+
+ RangeInitialize(pBegin, pBegin + CharStrlen(pBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::DoAllocate(size_type n)
+ {
+ EASTL_ASSERT(n > 1); // We want n > 1 because n == 1 is reserved for empty capacity and usage of gEmptyString.
+ return (value_type*)EASTLAlloc(mAllocator, n * sizeof(value_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::DoFree(value_type* p, size_type n)
+ {
+ if(p)
+ EASTLFree(mAllocator, p, n * sizeof(value_type));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename basic_string<T, Allocator>::size_type
+ basic_string<T, Allocator>::GetNewCapacity(size_type currentCapacity) // This needs to return a value of at least currentCapacity and at least 1.
+ {
+ return (currentCapacity > EASTL_STRING_INITIAL_CAPACITY) ? (2 * currentCapacity) : EASTL_STRING_INITIAL_CAPACITY;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::AllocateSelf()
+ {
+ EASTL_ASSERT(gEmptyString.mUint32 == 0);
+ mpBegin = const_cast<value_type*>(GetEmptyString(value_type())); // In const_cast-int this, we promise not to modify it.
+ mpEnd = mpBegin;
+ mpCapacity = mpBegin + 1; // When we are using gEmptyString, mpCapacity is always mpEnd + 1. This is an important distinguising characteristic.
+ }
+
+
+ template <typename T, typename Allocator>
+ void basic_string<T, Allocator>::AllocateSelf(size_type n)
+ {
+ #if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= 0x40000000))
+ EASTL_FAIL_MSG("basic_string::AllocateSelf -- improbably large request.");
+ #endif
+
+ #if EASTL_STRING_OPT_LENGTH_ERRORS
+ if(EASTL_UNLIKELY(n > kMaxSize))
+ ThrowLengthException();
+ #endif
+
+ if(n > 1)
+ {
+ mpBegin = DoAllocate(n);
+ mpEnd = mpBegin;
+ mpCapacity = mpBegin + n;
+ }
+ else
+ AllocateSelf();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::DeallocateSelf()
+ {
+ // Note that we compare mpCapacity to mpEnd instead of comparing
+ // mpBegin to &gEmptyString. This is important because we may have
+ // a case whereby one library passes a string to another library to
+ // deallocate and the two libraries have idependent versions of gEmptyString.
+ if((mpCapacity - mpBegin) > 1) // If we are not using gEmptyString as our memory...
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowLengthException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::length_error("basic_string -- length_error");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- length_error");
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowRangeException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::out_of_range("basic_string -- out of range");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- out of range");
+ #endif
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void basic_string<T, Allocator>::ThrowInvalidArgumentException() const
+ {
+ #if EASTL_EXCEPTIONS_ENABLED
+ throw std::invalid_argument("basic_string -- invalid argument");
+ #elif EASTL_ASSERT_ENABLED
+ EASTL_FAIL_MSG("basic_string -- invalid argument");
+ #endif
+ }
+
+
+ // CharTypeStringFindEnd
+ // Specialized char version of STL find() from back function.
+ // Not the same as RFind because search range is specified as forward iterators.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindEnd(const value_type* pBegin, const value_type* pEnd, value_type c)
+ {
+ const value_type* pTemp = pEnd;
+ while(--pTemp >= pBegin)
+ {
+ if(*pTemp == c)
+ return pTemp;
+ }
+
+ return pEnd;
+ }
+
+
+ // CharTypeStringRFind
+ // Specialized value_type version of STL find() function in reverse.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFind(const value_type* pRBegin, const value_type* pREnd, const value_type c)
+ {
+ while(pRBegin > pREnd)
+ {
+ if(*(pRBegin - 1) == c)
+ return pRBegin;
+ --pRBegin;
+ }
+ return pREnd;
+ }
+
+
+ // CharTypeStringSearch
+ // Specialized value_type version of STL search() function.
+ // Purpose: find p2 within p1. Return p1End if not found or if either string is zero length.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringSearch(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return eastl::find(p1Begin, p1End, *p2Begin);
+
+ // General case.
+ const value_type* pTemp;
+ const value_type* pTemp1 = (p2Begin + 1);
+ const value_type* pCurrent = p1Begin;
+
+ while(p1Begin != p1End)
+ {
+ p1Begin = eastl::find(p1Begin, p1End, *p2Begin);
+ if(p1Begin == p1End)
+ return p1End;
+
+ pTemp = pTemp1;
+ pCurrent = p1Begin;
+ if(++pCurrent == p1End)
+ return p1End;
+
+ while(*pCurrent == *pTemp)
+ {
+ if(++pTemp == p2End)
+ return p1Begin;
+ if(++pCurrent == p1End)
+ return p1End;
+ }
+
+ ++p1Begin;
+ }
+
+ return p1Begin;
+ }
+
+
+ // CharTypeStringRSearch
+ // Specialized value_type version of STL find_end() function (which really is a reverse search function).
+ // Purpose: find last instance of p2 within p1. Return p1End if not found or if either string is zero length.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRSearch(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ // Test for zero length strings, in which case we have a match or a failure,
+ // but the return value is the same either way.
+ if((p1Begin == p1End) || (p2Begin == p2End))
+ return p1Begin;
+
+ // Test for a pattern of length 1.
+ if((p2Begin + 1) == p2End)
+ return CharTypeStringFindEnd(p1Begin, p1End, *p2Begin);
+
+ // Test for search string length being longer than string length.
+ if((p2End - p2Begin) > (p1End - p1Begin))
+ return p1End;
+
+ // General case.
+ const value_type* pSearchEnd = (p1End - (p2End - p2Begin) + 1);
+ const value_type* pCurrent1;
+ const value_type* pCurrent2;
+
+ while(pSearchEnd != p1Begin)
+ {
+ // Search for the last occurrence of *p2Begin.
+ pCurrent1 = CharTypeStringFindEnd(p1Begin, pSearchEnd, *p2Begin);
+ if(pCurrent1 == pSearchEnd) // If the first char of p2 wasn't found,
+ return p1End; // then we immediately have failure.
+
+ // In this case, *pTemp == *p2Begin. So compare the rest.
+ pCurrent2 = p2Begin;
+ while(*pCurrent1++ == *pCurrent2++)
+ {
+ if(pCurrent2 == p2End)
+ return (pCurrent1 - (p2End - p2Begin));
+ }
+
+ // A smarter algorithm might know to subtract more than just one,
+ // but in most cases it won't make much difference anyway.
+ --pSearchEnd;
+ }
+
+ return p1End;
+ }
+
+
+ // CharTypeStringFindFirstOf
+ // Specialized value_type version of STL find_first_of() function.
+ // This function is much like the C runtime strtok function, except the strings aren't null-terminated.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindFirstOf(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1Begin != p1End; ++p1Begin)
+ {
+ for(const value_type* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*p1Begin == *pTemp)
+ return p1Begin;
+ }
+ }
+ return p1End;
+ }
+
+
+ // CharTypeStringRFindFirstOf
+ // Specialized value_type version of STL find_first_of() function in reverse.
+ // This function is much like the C runtime strtok function, except the strings aren't null-terminated.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFindFirstOf(const value_type* p1RBegin, const value_type* p1REnd,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1RBegin != p1REnd; --p1RBegin)
+ {
+ for(const value_type* pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*(p1RBegin - 1) == *pTemp)
+ return p1RBegin;
+ }
+ }
+ return p1REnd;
+ }
+
+
+
+ // CharTypeStringFindFirstNotOf
+ // Specialized value_type version of STL find_first_not_of() function.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringFindFirstNotOf(const value_type* p1Begin, const value_type* p1End,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1Begin != p1End; ++p1Begin)
+ {
+ const value_type* pTemp;
+ for(pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*p1Begin == *pTemp)
+ break;
+ }
+ if(pTemp == p2End)
+ return p1Begin;
+ }
+ return p1End;
+ }
+
+
+ // CharTypeStringRFindFirstNotOf
+ // Specialized value_type version of STL find_first_not_of() function in reverse.
+ template <typename T, typename Allocator>
+ const typename basic_string<T, Allocator>::value_type*
+ basic_string<T, Allocator>::CharTypeStringRFindFirstNotOf(const value_type* p1RBegin, const value_type* p1REnd,
+ const value_type* p2Begin, const value_type* p2End)
+ {
+ for( ; p1RBegin != p1REnd; --p1RBegin)
+ {
+ const value_type* pTemp;
+ for(pTemp = p2Begin; pTemp != p2End; ++pTemp)
+ {
+ if(*(p1RBegin-1) == *pTemp)
+ break;
+ }
+ if(pTemp == p2End)
+ return p1RBegin;
+ }
+ return p1REnd;
+ }
+
+
+
+
+ // iterator operators
+ template <typename T, typename Allocator>
+ inline bool operator==(const typename basic_string<T, Allocator>::reverse_iterator& r1,
+ const typename basic_string<T, Allocator>::reverse_iterator& r2)
+ {
+ return r1.mpCurrent == r2.mpCurrent;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const typename basic_string<T, Allocator>::reverse_iterator& r1,
+ const typename basic_string<T, Allocator>::reverse_iterator& r2)
+ {
+ return r1.mpCurrent != r2.mpCurrent;
+ }
+
+
+ // Operator +
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, a.size() + b.size(), const_cast<basic_string<T, Allocator>&>(a).get_allocator()); // Note that we choose to assign a's allocator.
+ result.append(a);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ const typename basic_string<T, Allocator>::size_type n = (typename basic_string<T, Allocator>::size_type)CharStrlen(p);
+ basic_string<T, Allocator> result(cDNI, n + b.size(), const_cast<basic_string<T, Allocator>&>(b).get_allocator());
+ result.append(p, p + n);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(typename basic_string<T, Allocator>::value_type c, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, 1 + b.size(), const_cast<basic_string<T, Allocator>&>(b).get_allocator());
+ result.push_back(c);
+ result.append(b);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ const typename basic_string<T, Allocator>::size_type n = (typename basic_string<T, Allocator>::size_type)CharStrlen(p);
+ basic_string<T, Allocator> result(cDNI, a.size() + n, const_cast<basic_string<T, Allocator>&>(a).get_allocator());
+ result.append(a);
+ result.append(p, p + n);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ basic_string<T, Allocator> operator+(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type c)
+ {
+ typedef typename basic_string<T, Allocator>::CtorDoNotInitialize CtorDoNotInitialize;
+ CtorDoNotInitialize cDNI; // GCC 2.x forces us to declare a named temporary like this.
+ basic_string<T, Allocator> result(cDNI, a.size() + 1, const_cast<basic_string<T, Allocator>&>(a).get_allocator());
+ result.append(a);
+ result.push_back(c);
+ return result;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool basic_string<T, Allocator>::validate() const
+ {
+ if((mpBegin == NULL) || (mpEnd == NULL))
+ return false;
+ if(mpEnd < mpBegin)
+ return false;
+ if(mpCapacity < mpEnd)
+ return false;
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int basic_string<T, Allocator>::validate_iterator(const_iterator i) const
+ {
+ if(i >= mpBegin)
+ {
+ if(i < mpEnd)
+ return (isf_valid | isf_current | isf_can_dereference);
+
+ if(i <= mpEnd)
+ return (isf_valid | isf_current);
+ }
+
+ return isf_none;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ // Operator== and operator!=
+ template <typename T, typename Allocator>
+ inline bool operator==(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return ((a.size() == b.size()) && (memcmp(a.data(), b.data(), (size_t)a.size() * sizeof(typename basic_string<T, Allocator>::value_type)) == 0));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return ((n == b.size()) && (memcmp(p, b.data(), (size_t)n * sizeof(*p)) == 0));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return ((a.size() == n) && (memcmp(a.data(), p, (size_t)n * sizeof(*p)) == 0));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(p == b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(a == p);
+ }
+
+
+ // Operator< (and also >, <=, and >=).
+ template <typename T, typename Allocator>
+ inline bool operator<(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), b.begin(), b.end()) < 0; }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return basic_string<T, Allocator>::compare(p, p + n, b.begin(), b.end()) < 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ typedef typename basic_string<T, Allocator>::size_type size_type;
+ const size_type n = (size_type)CharStrlen(p);
+ return basic_string<T, Allocator>::compare(a.begin(), a.end(), p, p + n) < 0;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return b < p;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return p < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(b < p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(p < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const basic_string<T, Allocator>& a, const basic_string<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const typename basic_string<T, Allocator>::value_type* p, const basic_string<T, Allocator>& b)
+ {
+ return !(p < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const basic_string<T, Allocator>& a, const typename basic_string<T, Allocator>::value_type* p)
+ {
+ return !(a < p);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void swap(basic_string<T, Allocator>& a, basic_string<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+ /// string / wstring
+ typedef basic_string<char> string;
+ typedef basic_string<wchar_t> wstring;
+
+ /// string8 / string16 / string32
+ typedef basic_string<char8_t> string8;
+ typedef basic_string<char16_t> string16;
+ typedef basic_string<char32_t> string32;
+
+
+
+ /// hash<string>
+ ///
+ /// We provide EASTL hash function objects for use in hash table containers.
+ ///
+ /// Example usage:
+ /// #include <EASTL/hash_set.h>
+ /// hash_set<string> stringHashSet;
+ ///
+ template <typename T> struct hash;
+
+ template <>
+ struct hash<string>
+ {
+ size_t operator()(const string& x) const
+ {
+ const unsigned char* p = (const unsigned char*)x.c_str(); // To consider: limit p to at most 256 chars.
+ unsigned int c, result = 2166136261U; // We implement an FNV-like string hash.
+ while((c = *p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+ /// hash<wstring>
+ ///
+ template <>
+ struct hash<wstring>
+ {
+ size_t operator()(const wstring& x) const
+ {
+ const wchar_t* p = (const wchar_t*)x.c_str(); // To consider: limit p to at most 256 chars.
+ unsigned int c, result = 2166136261U; // We implement an FNV-like string hash.
+ while((c = *p++) != 0) // Using '!=' disables compiler warnings.
+ result = (result * 16777619) ^ c;
+ return (size_t)result;
+ }
+ };
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+#endif // EASTL_ABSTRACT_STRING_ENABLED
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/type_traits.h b/UnknownVersion/include/EASTL/type_traits.h
new file mode 100644
index 0000000..42b6c34
--- /dev/null
+++ b/UnknownVersion/include/EASTL/type_traits.h
@@ -0,0 +1,359 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/type_traits.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Specification
+//
+// This file implements C++ type traits as proposed by the emerging C++ update
+// as of May, 2005. This update is known as "Proposed Draft Technical Report
+// on C++ Library Extensions" and is document number n1745. It can be found
+// on the Internet as n1745.pdf and as of this writing it is updated every
+// couple months to reflect current thinking.
+//////////////////////////////////////////////////////////////////////////////
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Description
+//
+// EASTL includes a fairly serious type traits library that is on par with the
+// one found in Boost but offers some additional performance-enhancing help as well.
+// The type_traits library provides information about class types, as opposed to
+// class instances. For example, the is_integral type trait tells if a type is
+// one of int, short, long, char, uint64_t, etc.
+//
+// There are three primary uses of type traits:
+// * Allowing for optimized operations on some data types.
+// * Allowing for different logic pathways based on data types.
+// * Allowing for compile-type assertions about data type expectations.
+//
+// Most of the type traits are automatically detected and implemented by the compiler.
+// However, EASTL allows for the user to explicitly give the compiler hints about
+// type traits that the compiler cannot know, via the EASTL_DECLARE declarations.
+// If the user has a class that is relocatable (i.e. can safely use memcpy to copy values),
+// the user can use the EASTL_DECLARE_TRIVIAL_RELOCATE declaration to tell the compiler
+// that the class can be copied via memcpy. This will automatically significantly speed
+// up some containers and algorithms that use that class.
+//
+// Here is an example of using type traits to tell if a value is a floating point
+// value or not:
+//
+// template <typename T>
+// DoSomething(T t) {
+// assert(is_floating_point<T>::value);
+// }
+//
+// Here is an example of declaring a class as relocatable and using it in a vector.
+//
+// EASTL_DECLARE_TRIVIAL_RELOCATE(Widget); // Usually you put this at the Widget class declaration.
+// vector<Widget> wVector;
+// wVector.erase(wVector.begin()); // This operation will be optimized via using memcpy.
+//
+// The following is a full list of the currently recognized type traits. Most of these
+// are implemented as of this writing, but if there is one that is missing, feel free
+// to contact the maintainer of this library and request that it be completed.
+//
+// Trait Description
+// ------------------------------------------------------------------------------
+// is_void T is void or a cv-qualified (const/void-qualified) void.
+// is_integral T is an integral type.
+// is_floating_point T is a floating point type.
+// is_arithmetic T is an arithmetic type (integral or floating point).
+// is_fundamental T is a fundamental type (void, integral, or floating point).
+// is_const T is const-qualified.
+// is_volatile T is volatile-qualified.
+// is_abstract T is an abstract class.
+// is_signed T is a signed integral type.
+// is_unsigned T is an unsigned integral type.
+// is_array T is an array type. The templated array container is not an array type.
+// is_pointer T is a pointer type. Includes function pointers, but not pointers to (data or function) members.
+// is_reference T is a reference type. Includes references to functions.
+// is_member_object_pointer T is a pointer to data member.
+// is_member_function_pointer T is a pointer to member function.
+// is_member_pointer T is a pointer to a member or member function.
+// is_enum T is an enumeration type.
+// is_union T is a union type.
+// is_class T is a class type but not a union type.
+// is_polymorphic T is a polymorphic class.
+// is_function T is a function type.
+// is_object T is an object type.
+// is_scalar T is a scalar type (arithmetic, enum, pointer, member_pointer)
+// is_compound T is a compound type (anything but fundamental).
+// is_same T and U name the same type.
+// is_convertible An imaginary lvalue of type From is implicitly convertible to type To. Special conversions involving string-literals and null-pointer constants are not considered. No function-parameter adjustments are made to type To when determining whether From is convertible to To; this implies that if type To is a function type or an array type, then the condition is false.
+// is_base_of Base is a base class of Derived or Base and Derived name the same type.
+// is_empty T is an empty class.
+// is_pod T is a POD type.
+// *is_aligned Defined as true if the type has alignment requirements greater than default alignment, which is taken to be 8.
+// has_trivial_constructor The default constructor for T is trivial.
+// has_trivial_copy The copy constructor for T is trivial.
+// has_trivial_assign The assignment operator for T is trivial.
+// has_trivial_destructor The destructor for T is trivial.
+// *has_trivial_relocate T can be moved to a new location via bitwise copy.
+// has_nothrow_constructor The default constructor for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// has_nothrow_copy The copy constructor for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// has_nothrow_assign The assignment operator for T has an empty exception specification or can otherwise be deduced never to throw an exception.
+// has_virtual_destructor T has a virtual destructor.
+// alignment_of An integer value representing the number of bytes of the alignment of objects of type T; an object of type T may be allocated at an address that is a multiple of its alignment.
+// rank An integer value representing the rank of objects of type T. The term 'rank' here is used to describe the number of dimensions of an array type.
+// extent An integer value representing the extent (dimension) of the I'th bound of objects of type T. If the type T is not an array type, has rank of less than I, or if I == 0 and T is of type 'array of unknown bound of U,' then value shall evaluate to zero; otherwise value shall evaluate to the number of elements in the I'th array bound of T. The term 'extent' here is used to describe the number of elements in an array type.
+// remove_const The member typedef type shall be the same as T except that any top level const-qualifier has been removed. remove_const<const volatile int>::type evaluates to volatile int, whereas remove_const<const int*> is const int*.
+//
+// * is_aligned is not found in Boost nor the C++ standard update proposal.
+//
+// * has_trivial_relocate is not found in Boost nor the C++ standard update proposal.
+// However, it is very useful in allowing for the generation of optimized object
+// moving operations. It is similar to the is_pod type trait, but goes further and
+// allows non-pod classes to be categorized as relocatable. Such categorization is
+// something that no compiler can do, as only the user can know if it is such.
+// Thus EASTL_DECLARE_TRIVIAL_RELOCATE is provided to allow the user to give
+// the compiler a hint.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Requirements
+//
+// As of this writing (5/2005), type_traits here requires a well-conforming
+// C++ compiler with respect to template metaprogramming. To use this library
+// you need to have at least one of the following:
+// MSVC++ 7.1 (includes Win32, XBox 360, Win64, and WinCE platforms)
+// GCC 3.2 (includes Playstation 3, and Linux platforms)
+// Metrowerks 8.0 (incluees Playstation 3, Windows, and other platforms)
+// SN Systems (not the GCC 2.95-based compilers)
+// EDG (includes any compiler with EDG as a back-end, such as the Intel compiler)
+// Comeau (this is a C++ to C generator)
+//
+// It may be useful to list the compilers/platforms the current version of
+// type_traits doesn't support:
+// Borland C++ (it simply has too many bugs with respect to templates).
+// GCC 2.96 With a little effort, type_traits can probably be made to work with this compiler.
+//////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Implementation
+//
+// The implementation here is almost entirely based on template metaprogramming.
+// This is whereby you use the compiler's template functionality to define types
+// and values and make compilation decisions based on template declarations.
+// Many of the algorithms here are similar to those found in books such as
+// "Modern C++ Design" and C++ libraries such as Boost. The implementations here
+// are simpler and more straightforward than those found in some libraries, due
+// largely to our assumption that the compiler is good at donig template programming.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_TYPE_TRAITS_H
+#define EASTL_TYPE_TRAITS_H
+
+
+
+#include <EASTL/internal/config.h>
+#include <stddef.h> // Is needed for size_t usage by some traits.
+
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ // integral_constant
+ //
+ // This is the base class for various type traits, as defined by the proposed
+ // C++ standard. This is essentially a utility base class for defining properties
+ // as both class constants (value) and as types (type).
+ //
+ template <typename T, T v>
+ struct integral_constant
+ {
+ static const T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+ };
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // true_type / false_type
+ //
+ // These are commonly used types in the implementation of type_traits.
+ // Other integral constant types can be defined, such as those based on int.
+ //
+ typedef integral_constant<bool, true> true_type;
+ typedef integral_constant<bool, false> false_type;
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // yes_type / no_type
+ //
+ // These are used as a utility to differentiate between two things.
+ //
+ typedef char yes_type; // sizeof(yes_type) == 1
+ struct no_type { char padding[8]; }; // sizeof(no_type) != 1
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_select
+ //
+ // This is used to declare a type from one of two type options.
+ // The result is based on the condition type. This has certain uses
+ // in template metaprogramming.
+ //
+ // Example usage:
+ // typedef ChosenType = type_select<is_integral<SomeType>::value, ChoiceAType, ChoiceBType>::type;
+ //
+ template <bool bCondition, class ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select { typedef ConditionIsTrueType type; };
+
+ template <typename ConditionIsTrueType, class ConditionIsFalseType>
+ struct type_select<false, ConditionIsTrueType, ConditionIsFalseType> { typedef ConditionIsFalseType type; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_or
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = false, bool b4 = false, bool b5 = false>
+ struct type_or;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_or { static const bool value = true; };
+
+ template <>
+ struct type_or<false, false, false, false, false> { static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_and
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b1, bool b2, bool b3 = true, bool b4 = true, bool b5 = true>
+ struct type_and;
+
+ template <bool b1, bool b2, bool b3, bool b4, bool b5>
+ struct type_and{ static const bool value = false; };
+
+ template <>
+ struct type_and<true, true, true, true, true>{ static const bool value = true; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_equal{ static const bool value = (b1 == b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not_equal
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <int b1, int b2>
+ struct type_not_equal{ static const bool value = (b1 != b2); };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // type_not
+ //
+ // This is a utility class for creating composite type traits.
+ //
+ template <bool b>
+ struct type_not{ static const bool value = true; };
+
+ template <>
+ struct type_not<true>{ static const bool value = false; };
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // empty
+ //
+ template <typename T>
+ struct empty{ };
+
+
+} // namespace eastl
+
+
+// The following files implement the type traits themselves.
+#if defined(__GNUC__) && (__GNUC__ <= 2)
+ #include <EASTL/internal/compat/type_fundamental.h>
+ #include <EASTL/internal/compat/type_transformations.h>
+ #include <EASTL/internal/compat/type_properties.h>
+ #include <EASTL/internal/compat/type_compound.h>
+ #include <EASTL/internal/compat/type_pod.h>
+#else
+ #include <EASTL/internal/type_fundamental.h>
+ #include <EASTL/internal/type_transformations.h>
+ #include <EASTL/internal/type_properties.h>
+ #include <EASTL/internal/type_compound.h>
+ #include <EASTL/internal/type_pod.h>
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/utility.h b/UnknownVersion/include/EASTL/utility.h
new file mode 100644
index 0000000..e2937c1
--- /dev/null
+++ b/UnknownVersion/include/EASTL/utility.h
@@ -0,0 +1,312 @@
+/*
+Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/utility.h
+// Written and maintained by Paul Pedriana - 2005.
+///////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef EASTL_UTILITY_H
+#define EASTL_UTILITY_H
+
+
+#include <EASTL/internal/config.h>
+
+
+#ifdef _MSC_VER
+ #pragma warning(push) // VC++ generates a bogus warning that you cannot code away.
+ #pragma warning(disable: 4619) // There is no warning number 'number'.
+ #pragma warning(disable: 4217) // Member template functions cannot be used for copy-assignment or copy-construction.
+ #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated. // This disabling would best be put elsewhere.
+#endif
+
+
+namespace eastl
+{
+
+ ///////////////////////////////////////////////////////////////////////
+ /// rel_ops
+ ///
+ /// rel_ops allow the automatic generation of operators !=, >, <=, >= from
+ /// just operators == and <. These are intentionally in the rel_ops namespace
+ /// so that they don't conflict with other similar operators. To use these
+ /// operators, add "using namespace std::rel_ops;" to an appropriate place in
+ /// your code, usually right in the function that you need them to work.
+ /// In fact, you will very likely have collision problems if you put such
+ /// using statements anywhere other than in the .cpp file like so and may
+ /// also have collisions when you do, as the using statement will affect all
+ /// code in the module. You need to be careful about use of rel_ops.
+ ///
+ namespace rel_ops
+ {
+ template <typename T>
+ inline bool operator!=(const T& x, const T& y)
+ { return !(x == y); }
+
+ template <typename T>
+ inline bool operator>(const T& x, const T& y)
+ { return (y < x); }
+
+ template <typename T>
+ inline bool operator<=(const T& x, const T& y)
+ { return !(y < x); }
+
+ template <typename T>
+ inline bool operator>=(const T& x, const T& y)
+ { return !(x < y); }
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// pair
+ ///
+ /// Implements a simple pair, just like the C++ std::pair.
+ ///
+ template <typename T1, typename T2>
+ struct pair
+ {
+ typedef T1 first_type;
+ typedef T2 second_type;
+
+ T1 first;
+ T2 second;
+
+ pair();
+ pair(const T1& x);
+ pair(const T1& x, const T2& y);
+
+ template <typename U, typename V>
+ pair(const pair<U, V>& p);
+
+ // pair(const pair& p); // Not necessary, as default version is OK.
+ // pair& operator=(const pair& p); // Not necessary, as default version is OK.
+ };
+
+
+
+
+ /// use_self
+ ///
+ /// operator()(x) simply returns x. Used in sets, as opposed to maps.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_first template implementation.
+ ///
+ /// The existance of use_self may seem odd, given that it does nothing,
+ /// but these kinds of things are useful, virtually required, for optimal
+ /// generic programming.
+ ///
+ template <typename T>
+ struct use_self // : public unary_function<T, T> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef T result_type;
+
+ const T& operator()(const T& x) const
+ { return x; }
+ };
+
+ /// use_first
+ ///
+ /// operator()(x) simply returns x.first. Used in maps, as opposed to sets.
+ /// This is a template policy implementation; it is an alternative to
+ /// the use_self template implementation. This is the same thing as the
+ /// SGI SGL select1st utility.
+ ///
+ template <typename Pair>
+ struct use_first // : public unary_function<Pair, typename Pair::first_type> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef typename Pair::first_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.first; }
+ };
+
+ /// use_second
+ ///
+ /// operator()(x) simply returns x.second.
+ /// This is the same thing as the SGI SGL select2nd utility
+ ///
+ template <typename Pair>
+ struct use_second // : public unary_function<Pair, typename Pair::second_type> // Perhaps we want to make it a subclass of unary_function.
+ {
+ typedef typename Pair::second_type result_type;
+
+ const result_type& operator()(const Pair& x) const
+ { return x.second; }
+ };
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // pair
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair()
+ : first(), second()
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair(const T1& x)
+ : first(x), second()
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2>::pair(const T1& x, const T2& y)
+ : first(x), second(y)
+ {
+ // Empty
+ }
+
+
+ template <typename T1, typename T2>
+ template <typename U, typename V>
+ inline pair<T1, T2>::pair(const pair<U, V>& p)
+ : first(p.first), second(p.second)
+ {
+ // Empty
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T1, typename T2>
+ inline bool operator==(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return ((a.first == b.first) && (a.second == b.second));
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator<(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ // Note that we use only operator < in this expression. Otherwise we could
+ // use the simpler: return (a.m1 == b.m1) ? (a.m2 < b.m2) : (a.m1 < b.m1);
+ // The user can write a specialization for this operator to get around this
+ // in cases where the highest performance is required.
+ return ((a.first < b.first) || (!(b.first < a.first) && (a.second < b.second)));
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator!=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a == b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator>(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator>=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline bool operator<=(const pair<T1, T2>& a, const pair<T1, T2>& b)
+ {
+ return !(b < a);
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ /// make_pair / make_pair_ref
+ ///
+ /// make_pair is the same as std::make_pair specified by the C++ standard.
+ /// If you look at the C++ standard, you'll see that it specifies T& instead of T.
+ /// However, it has been determined that the C++ standard is incorrect and has
+ /// flagged it as a defect (http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-defects.html#181).
+ /// In case you feel that you want a more efficient version that uses references,
+ /// we provide the make_pair_ref function below.
+ ///
+ /// Note: You don't need to use make_pair in order to make a pair. The following
+ /// code is equivalent, and the latter avoids one more level of inlining:
+ /// return make_pair(charPtr, charPtr);
+ /// return pair<char*, char*>(charPtr, charPtr);
+ ///
+ template <typename T1, typename T2>
+ inline pair<T1, T2> make_pair(T1 a, T2 b)
+ {
+ return pair<T1, T2>(a, b);
+ }
+
+
+ template <typename T1, typename T2>
+ inline pair<T1, T2> make_pair_ref(const T1& a, const T2& b)
+ {
+ return pair<T1, T2>(a, b);
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+ #pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/vector.h b/UnknownVersion/include/EASTL/vector.h
new file mode 100644
index 0000000..4791ea7
--- /dev/null
+++ b/UnknownVersion/include/EASTL/vector.h
@@ -0,0 +1,1649 @@
+/*
+Copyright (C) 2009-2010 Electronic Arts, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+///////////////////////////////////////////////////////////////////////////////
+// EASTL/vector.h
+//
+// Copyright (c) 2005, Electronic Arts. All rights reserved.
+// Written and maintained by Paul Pedriana.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// This file implements a vector (array-like container), much like the C++
+// std::vector class.
+// The primary distinctions between this vector and std::vector are:
+// - vector has a couple extension functions that increase performance.
+// - vector can contain objects with alignment requirements. std::vector
+// cannot do so without a bit of tedious non-portable effort.
+// - vector supports debug memory naming natively.
+// - vector is easier to read, debug, and visualize.
+// - vector is savvy to an environment that doesn't have exception handling,
+// as is sometimes the case with console or embedded environments.
+// - vector has less deeply nested function calls and allows the user to
+// enable forced inlining in debug builds in order to reduce bloat.
+// - vector<bool> is a vector of boolean values and not a bit vector.
+// - vector guarantees that memory is contiguous and that vector::iterator
+// is nothing more than a pointer to T.
+// - vector has an explicit data() method for obtaining a pointer to storage
+// which is safe to call even if the block is empty. This avoids the
+// common &v[0], &v.front(), and &*v.begin() constructs that trigger false
+// asserts in STL debugging modes.
+// - vector::size_type is defined as eastl_size_t instead of size_t in order to
+// save memory and run faster on 64 bit systems.
+// - vector data is guaranteed to be contiguous.
+// - vector has a set_capacity() function which frees excess capacity.
+// The only way to do this with std::vector is via the cryptic non-obvious
+// trick of using: vector<SomeClass>(x).swap(x);
+///////////////////////////////////////////////////////////////////////////////
+
+
+#ifndef EASTL_VECTOR_H
+#define EASTL_VECTOR_H
+
+
+#include <EASTL/internal/config.h>
+#include <EASTL/allocator.h>
+#include <EASTL/type_traits.h>
+#include <EASTL/iterator.h>
+#include <EASTL/algorithm.h>
+#include <EASTL/memory.h>
+
+#ifdef _MSC_VER
+# pragma warning(push, 0)
+# include <new>
+# include <stddef.h>
+# pragma warning(pop)
+#else
+# include <new>
+# include <stddef.h>
+#endif
+
+#if EASTL_EXCEPTIONS_ENABLED
+# ifdef _MSC_VER
+# pragma warning(push, 0)
+# endif
+# include <stdexcept> // std::out_of_range, std::length_error.
+# ifdef _MSC_VER
+# pragma warning(pop)
+# endif
+#endif
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
+# pragma warning(disable: 4345) // Behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized
+# pragma warning(disable: 4244) // Argument: conversion from 'int' to 'const eastl::vector<T>::value_type', possible loss of data
+# pragma warning(disable: 4127) // Conditional expression is constant
+# pragma warning(disable: 4480) // nonstandard extension used: specifying underlying type for enum
+#endif
+
+
+namespace eastl
+{
+
+ /// EASTL_VECTOR_DEFAULT_NAME
+ ///
+ /// Defines a default container name in the absence of a user-provided name.
+ ///
+#ifndef EASTL_VECTOR_DEFAULT_NAME
+# define EASTL_VECTOR_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " vector" // Unless the user overrides something, this is "EASTL vector".
+#endif
+
+
+ /// EASTL_VECTOR_DEFAULT_ALLOCATOR
+ ///
+#ifndef EASTL_VECTOR_DEFAULT_ALLOCATOR
+# define EASTL_VECTOR_DEFAULT_ALLOCATOR allocator_type(EASTL_VECTOR_DEFAULT_NAME)
+#endif
+
+
+
+ /// VectorBase
+ ///
+ /// The reason we have a VectorBase class is that it makes exception handling
+ /// simpler to implement because memory allocation is implemented entirely
+ /// in this class. If a user creates a vector which needs to allocate
+ /// memory in the constructor, VectorBase handles it. If an exception is thrown
+ /// by the allocator then the exception throw jumps back to the user code and
+ /// no try/catch code need be written in the vector or VectorBase constructor.
+ /// If an exception is thrown in the vector (not VectorBase) constructor, the
+ /// destructor for VectorBase will be called automatically (and free the allocated
+ /// memory) before the execution jumps back to the user code.
+ /// However, if the vector class were to handle both allocation and initialization
+ /// then it would have no choice but to implement an explicit try/catch statement
+ /// for all pathways that allocate memory. This increases code size and decreases
+ /// performance and makes the code a little harder read and maintain.
+ ///
+ /// The C++ standard (15.2 paragraph 2) states:
+ /// "An object that is partially constructed or partially destroyed will
+ /// have destructors executed for all its fully constructed subobjects,
+ /// that is, for subobjects for which the constructor has been completed
+ /// execution and the destructor has not yet begun execution."
+ ///
+ /// The C++ standard (15.3 paragraph 11) states:
+ /// "The fully constructed base classes and members of an object shall
+ /// be destroyed before entering the handler of a function-try-block
+ /// of a constructor or destructor for that block."
+ ///
+ template <typename T, typename Allocator>
+ struct VectorBase
+ {
+ typedef Allocator allocator_type;
+ typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
+ typedef ptrdiff_t difference_type;
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) // _MSC_VER of 1400 means VC8 (VS2005), 1500 means VC9 (VS2008)
+ enum : size_type { // Use Microsoft enum language extension, allowing for smaller debug symbols than using a static const. Users have been affected by this.
+ npos = (size_type)-1,
+ kMaxSize = (size_type)-2
+ };
+#else
+ static const size_type npos = (size_type)-1; /// 'npos' means non-valid position or simply non-position.
+ static const size_type kMaxSize = (size_type)-2; /// -1 is reserved for 'npos'. It also happens to be slightly beneficial that kMaxSize is a value less than -1, as it helps us deal with potential integer wraparound issues.
+#endif
+
+ enum
+ {
+ kAlignment = EASTL_ALIGN_OF(T),
+ kAlignmentOffset = 0
+ };
+
+ protected:
+ T* mpBegin;
+ T* mpEnd;
+ T* mpCapacity;
+ allocator_type mAllocator; // To do: Use base class optimization to make this go away.
+
+ public:
+ VectorBase();
+ VectorBase(const allocator_type& allocator);
+ VectorBase(size_type n, const allocator_type& allocator);
+
+ ~VectorBase();
+
+ allocator_type& get_allocator();
+ void set_allocator(const allocator_type& allocator);
+
+ protected:
+ T* DoAllocate(size_type n);
+ void DoFree(T* p, size_type n);
+ size_type GetNewCapacity(size_type currentCapacity);
+
+ }; // VectorBase
+
+
+
+
+ /// vector
+ ///
+ /// Implements a dynamic array.
+ ///
+ template <typename T, typename Allocator = EASTLAllocatorType>
+ class vector : public VectorBase<T, Allocator>
+ {
+ typedef VectorBase<T, Allocator> base_type;
+ typedef vector<T, Allocator> this_type;
+
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference; // Maintainer note: We want to leave iterator defined as T* -- at least in release builds -- as this gives some algorithms an advantage that optimizers cannot get around.
+ typedef T* iterator; // Note: iterator is simply T* right now, but this will likely change in the future, at least for debug builds.
+ typedef const T* const_iterator; // Do not write code that relies on iterator being T*. The reason it will
+ typedef eastl::reverse_iterator<iterator> reverse_iterator; // change in the future is that a debugging iterator system will be created.
+ typedef eastl::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+ typedef typename base_type::allocator_type allocator_type;
+
+ using base_type::mpBegin;
+ using base_type::mpEnd;
+ using base_type::mpCapacity;
+ using base_type::mAllocator;
+ using base_type::npos;
+ using base_type::GetNewCapacity;
+ using base_type::DoAllocate;
+ using base_type::DoFree;
+
+ public:
+ vector();
+ explicit vector(const allocator_type& allocator);
+ explicit vector(size_type n, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+ vector(size_type n, const value_type& value, const allocator_type& allocator = EASTL_VECTOR_DEFAULT_ALLOCATOR);
+ vector(const this_type& x);
+
+#ifdef EA_COMPILER_HAS_MOVE_SEMANTICS
+ vector(this_type&& x);
+ this_type& operator =(this_type&& x);
+ iterator insert(iterator position, value_type&& value);
+ void push_back(value_type&& x);
+# ifdef EA_COMPILER_HAS_VARIADIC_TEMPLATES
+ /*
+ template<class ... Args>
+ iterator emplace(const_iterator pos, Args&& ... args);
+ template<class ... Args>
+ iterator emplace_back(const_iterator pos, Args&& ... args);
+ */
+# endif
+#endif
+
+ template <typename InputIterator>
+ vector(InputIterator first, InputIterator last); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
+
+ ~vector();
+
+ this_type& operator=(const this_type& x);
+ void swap(this_type& x);
+
+ void assign(size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void assign(InputIterator first, InputIterator last);
+
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end();
+ const_iterator end() const;
+
+ reverse_iterator rbegin();
+ const_reverse_iterator rbegin() const;
+
+ reverse_iterator rend();
+ const_reverse_iterator rend() const;
+
+ bool empty() const;
+ size_type size() const;
+ size_type capacity() const;
+
+ void resize(size_type n, const value_type& value);
+ void resize(size_type n);
+ void reserve(size_type n);
+ void set_capacity(size_type n = base_type::npos); // Revises the capacity to the user-specified value. Resizes the container to match the capacity if the requested capacity n is less than the current size. If n == npos then the capacity is reallocated (if necessary) such that capacity == size.
+
+ pointer data();
+ const_pointer data() const;
+
+ reference operator[](size_type n);
+ const_reference operator[](size_type n) const;
+
+ reference at(size_type n);
+ const_reference at(size_type n) const;
+
+ reference front();
+ const_reference front() const;
+
+ reference back();
+ const_reference back() const;
+
+ void push_back(const value_type& value);
+ reference push_back();
+ void* push_back_uninitialized();
+
+ void pop_back();
+
+ void replace_with_last(size_t idx);
+
+ iterator insert(iterator position, const value_type& value);
+ void insert(iterator position, size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void insert(iterator position, InputIterator first, InputIterator last);
+
+ iterator erase(iterator position);
+ iterator erase(iterator first, iterator last);
+
+ reverse_iterator erase(reverse_iterator position);
+ reverse_iterator erase(reverse_iterator first, reverse_iterator last);
+
+ void clear();
+ void clear_unsafe(); ///clear but don't call destructors
+ void reset(); /// This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
+
+ bool validate() const;
+ int validate_iterator(const_iterator i) const;
+
+ protected:
+ // These functions do the real work of maintaining the vector. You will notice
+ // that many of them have the same name but are specialized on iterator_tag
+ // (iterator categories). This is because in these cases there is an optimized
+ // implementation that can be had for some cases relative to others. Functions
+ // which aren't referenced are neither compiled nor linked into the application.
+
+ template <typename ForwardIterator>
+ pointer DoRealloc(size_type n, ForwardIterator first, ForwardIterator last);
+
+ template <typename Integer>
+ void DoInit(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInit(InputIterator first, InputIterator last, false_type);
+
+ template <typename InputIterator>
+ void DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename ForwardIterator>
+ void DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag);
+
+ void DoDestroyValues(pointer first, pointer last);
+
+ template <typename Integer>
+ void DoAssign(Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoAssign(InputIterator first, InputIterator last, false_type);
+
+ void DoAssignValues(size_type n, const value_type& value);
+
+ template <typename InputIterator>
+ void DoAssignFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename RandomAccessIterator>
+ void DoAssignFromIterator(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag);
+
+ template <typename Integer>
+ void DoInsert(iterator position, Integer n, Integer value, true_type);
+
+ template <typename InputIterator>
+ void DoInsert(iterator position, InputIterator first, InputIterator last, false_type);
+
+ template <typename InputIterator>
+ void DoInsertFromIterator(iterator position, InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag);
+
+ template <typename BidirectionalIterator>
+ void DoInsertFromIterator(iterator position, BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag);
+
+ void DoInsertValues(iterator position, size_type n, const value_type& value);
+
+ void DoInsertValue(iterator position, const value_type& value);
+
+ }; // class vector
+
+
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // VectorBase
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase()
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(EASTL_VECTOR_DEFAULT_NAME)
+ {
+ }
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase(const allocator_type& allocator)
+ : mpBegin(NULL),
+ mpEnd(NULL),
+ mpCapacity(NULL),
+ mAllocator(allocator)
+ {
+ }
+
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::VectorBase(size_type n, const allocator_type& allocator)
+ : mAllocator(allocator)
+ {
+ mpBegin = DoAllocate(n);
+ mpEnd = mpBegin;
+ mpCapacity = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline VectorBase<T, Allocator>::~VectorBase()
+ {
+ if(mpBegin)
+ EASTLFree(mAllocator, mpBegin, (mpCapacity - mpBegin) * sizeof(T));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename VectorBase<T, Allocator>::allocator_type&
+ VectorBase<T, Allocator>::get_allocator()
+ {
+ return mAllocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void VectorBase<T, Allocator>::set_allocator(const allocator_type& allocator)
+ {
+ mAllocator = allocator;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline T* VectorBase<T, Allocator>::DoAllocate(size_type n)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= 0x80000000))
+ EASTL_FAIL_MSG("vector::DoAllocate -- improbably large request.");
+#endif
+
+ // If n is zero, then we allocate no memory and just return NULL.
+ // This is fine, as our default ctor initializes with NULL pointers.
+ return n ? (T*)allocate_memory(mAllocator, n * sizeof(T), kAlignment, kAlignmentOffset) : NULL;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void VectorBase<T, Allocator>::DoFree(T* p, size_type n)
+ {
+ if(p)
+ EASTLFree(mAllocator, p, n * sizeof(T));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename VectorBase<T, Allocator>::size_type
+ VectorBase<T, Allocator>::GetNewCapacity(size_type currentCapacity)
+ {
+ // This needs to return a value of at least currentCapacity and at least 1.
+ return (currentCapacity > 0) ? (2 * currentCapacity) : 1;
+ }
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // vector
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector()
+ : base_type()
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(const allocator_type& allocator)
+ : base_type(allocator)
+ {
+ // Empty
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(size_type n, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ eastl::uninitialized_fill_n_ptr(mpBegin, n, value_type());
+ mpEnd = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(size_type n, const value_type& value, const allocator_type& allocator)
+ : base_type(n, allocator)
+ {
+ eastl::uninitialized_fill_n_ptr(mpBegin, n, value);
+ mpEnd = mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(const this_type& x)
+ : base_type(x.size(), x.mAllocator)
+ {
+ mpEnd = eastl::uninitialized_copy_ptr(x.mpBegin, x.mpEnd, mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline vector<T, Allocator>::vector(InputIterator first, InputIterator last)
+ : base_type(EASTL_VECTOR_DEFAULT_ALLOCATOR)
+ {
+ DoInit(first, last, is_integral<InputIterator>());
+ }
+
+#ifdef EA_COMPILER_HAS_MOVE_SEMANTICS
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::vector(vector<T, Allocator>&& x)
+ : base_type()
+ {
+ if(&x == this) { return; }
+
+ swap(x);
+ }
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>& vector<T, Allocator>::operator =(vector<T, Allocator>&& x)
+ {
+ if(&x != this) {
+ // Commented out due to compilation issues on macOS TODO: Figure out how to call destructor
+ //base_type::~base_type();
+
+ mpBegin = NULL;
+ mpEnd = NULL;
+ mpCapacity = NULL;
+ mAllocator = EASTL_VECTOR_DEFAULT_NAME;
+
+ swap(x);
+ }
+ return *this;
+ }
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator vector<T, Allocator>::insert(
+ typename vector<T, Allocator>::iterator const position, value_type&& value)
+ {
+#if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+#endif
+
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+
+ if ((mpEnd == mpCapacity) || (position != mpEnd))
+ DoInsertValue(position, value);
+ else
+ ::new(mpEnd++) value_type(value);
+
+ return mpBegin + n;
+ }
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::push_back(T&& x) {
+ push_back() = x;
+ }
+# ifdef EA_COMPILER_HAS_VARIADIC_TEMPLATES
+ /*
+ template <typename T, typename Allocator>
+ template<class ... Args>
+ typename vector<T, Allocator>::iterator vector<T, Allocator>::emplace(const_iterator pos, Args&& ... args);
+
+ template <typename T, typename Allocator>
+ template<class ... Args>
+ typename vector<T, Allocator>::iterator vector<T, Allocator>::emplace_back(const_iterator pos, Args&& ... args);
+ */
+# endif
+#endif
+
+ template <typename T, typename Allocator>
+ inline vector<T, Allocator>::~vector()
+ {
+ // Call destructor for the values. Parent class will free the memory.
+ DoDestroyValues(mpBegin, mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ typename vector<T, Allocator>::this_type&
+ vector<T, Allocator>::operator=(const this_type& x)
+ {
+ if(&x != this)
+ {
+#if EASTL_ALLOCATOR_COPY_ENABLED
+ mAllocator = x.mAllocator;
+#endif
+
+ const size_type n = x.size();
+
+ if(n > size_type(mpCapacity - mpBegin)) // If n > capacity ...
+ {
+ pointer const pNewData = DoRealloc(n, x.mpBegin, x.mpEnd);
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+ mpBegin = pNewData;
+ mpCapacity = mpBegin + n;
+ }
+ else if(n > size_type(mpEnd - mpBegin)) // If size < n <= capacity ...
+ {
+ eastl::copy(x.mpBegin, x.mpBegin + (mpEnd - mpBegin), mpBegin);
+ eastl::uninitialized_copy_ptr(x.mpBegin + (mpEnd - mpBegin), x.mpEnd, mpEnd);
+ }
+ else // else n <= size
+ {
+ iterator const position = eastl::copy(x.mpBegin, x.mpEnd, mpBegin);
+ DoDestroyValues(position, mpEnd);
+ }
+ mpEnd = mpBegin + n;
+ }
+ return *this;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::assign(size_type n, const value_type& value)
+ {
+ DoAssignValues(n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::assign(InputIterator first, InputIterator last)
+ {
+ // It turns out that the C++ std::vector<int, int> specifies a two argument
+ // version of assign that takes (int size, int value). These are not iterators,
+ // so we need to do a template compiler trick to do the right thing.
+ DoAssign(first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::begin()
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::begin() const
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::end()
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_iterator
+ vector<T, Allocator>::end() const
+ {
+ return mpEnd;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::rbegin()
+ {
+ return reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::rbegin() const
+ {
+ return const_reverse_iterator(mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::rend()
+ {
+ return reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reverse_iterator
+ vector<T, Allocator>::rend() const
+ {
+ return const_reverse_iterator(mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ bool vector<T, Allocator>::empty() const
+ {
+ return (mpBegin == mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::size_type
+ vector<T, Allocator>::size() const
+ {
+ return (size_type)(mpEnd - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::size_type
+ vector<T, Allocator>::capacity() const
+ {
+ return (size_type)(mpCapacity - mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::resize(size_type n, const value_type& value)
+ {
+ if(n > (size_type)(mpEnd - mpBegin)) // We expect that more often than not, resizes will be upsizes.
+ insert(mpEnd, n - ((size_type)(mpEnd - mpBegin)), value);
+ else
+ erase(mpBegin + n, mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::resize(size_type n)
+ {
+ // Alternative implementation:
+ // resize(n, value_type());
+
+ if(n > (size_type)(mpEnd - mpBegin)) // We expect that more often than not, resizes will be upsizes.
+ insert(mpEnd, n - ((size_type)(mpEnd - mpBegin)), value_type());
+ else
+ erase(mpBegin + n, mpEnd);
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::reserve(size_type n)
+ {
+ // If the user wants to reduce the reserved memory, there is the set_capacity function.
+ if(n > size_type(mpCapacity - mpBegin)) // If n > capacity ...
+ {
+ // To consider: fold this reserve implementation with the set_capacity
+ // implementation below. But we need to be careful to not call resize
+ // in the implementation, as that would require the user to have a
+ // default constructor, which we are trying to avoid.
+ pointer const pNewData = DoRealloc(n, mpBegin, mpEnd);
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ const ptrdiff_t nPrevSize = mpEnd - mpBegin;
+ mpBegin = pNewData;
+ mpEnd = pNewData + nPrevSize;
+ mpCapacity = mpBegin + n;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::set_capacity(size_type n)
+ {
+ if((n == npos) || (n <= (size_type)(mpEnd - mpBegin))) // If new capacity <= size...
+ {
+ if(n < (size_type)(mpEnd - mpBegin))
+ resize(n);
+
+ this_type temp(*this); // This is the simplest way to accomplish this,
+ swap(temp); // and it is as efficient as any other.
+ }
+ else // Else new capacity > size.
+ {
+ pointer const pNewData = DoRealloc(n, mpBegin, mpEnd);
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ const ptrdiff_t nPrevSize = mpEnd - mpBegin;
+ mpBegin = pNewData;
+ mpEnd = pNewData + nPrevSize;
+ mpCapacity = mpBegin + n;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::pointer
+ vector<T, Allocator>::data()
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_pointer
+ vector<T, Allocator>::data() const
+ {
+ return mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::operator[](size_type n)
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED // We allow the user to use a reference to v[0] of an empty container.
+ if(EASTL_UNLIKELY((n != 0) && (n >= (static_cast<size_type>(mpEnd - mpBegin)))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+#endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::operator[](size_type n) const
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED // We allow the user to use a reference to v[0] of an empty container.
+ if(EASTL_UNLIKELY((n != 0) && (n >= (static_cast<size_type>(mpEnd - mpBegin)))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::operator[] -- out of range");
+#endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::at(size_type n)
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ throw std::out_of_range("vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::at -- out of range");
+#endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::at(size_type n) const
+ {
+#if EASTL_EXCEPTIONS_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ throw std::out_of_range("vector::at -- out of range");
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(n >= (static_cast<size_type>(mpEnd - mpBegin))))
+ EASTL_FAIL_MSG("vector::at -- out of range");
+#endif
+
+ return *(mpBegin + n);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::front()
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::front -- empty vector");
+#endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::front() const
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::front -- empty vector");
+#endif
+
+ return *mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::back()
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::back -- empty vector");
+#endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::const_reference
+ vector<T, Allocator>::back() const
+ {
+#if EASTL_EMPTY_REFERENCE_ASSERT_ENABLED
+ // We allow the user to reference an empty container.
+#elif EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin)) // We don't allow the user to reference an empty container.
+ EASTL_FAIL_MSG("vector::back -- empty vector");
+#endif
+
+ return *(mpEnd - 1);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::push_back(const value_type& value)
+ {
+ if(mpEnd < mpCapacity)
+ ::new(mpEnd++) value_type(value);
+ else
+ DoInsertValue(mpEnd, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reference
+ vector<T, Allocator>::push_back()
+ {
+ if(mpEnd < mpCapacity)
+ ::new(mpEnd++) value_type();
+ else // Note that in this case we create a temporary, which is less desirable.
+ DoInsertValue(mpEnd, value_type());
+
+ return *(mpEnd - 1); // Same as return back();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void* vector<T, Allocator>::push_back_uninitialized()
+ {
+ if(mpEnd == mpCapacity)
+ {
+ const size_type newSize = (size_type)(mpEnd - mpBegin) + 1;
+ reserve(newSize);
+ }
+
+ return mpEnd++;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::pop_back()
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY(mpEnd <= mpBegin))
+ EASTL_FAIL_MSG("vector::pop_back -- empty vector");
+#endif
+
+ --mpEnd;
+ mpEnd->~value_type();
+ }
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::replace_with_last(size_t idx)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_LIKELY( idx >= size() ) )
+ EASTL_FAIL_MSG("vector::replace_with_last -- index out of range");
+#endif
+ this->operator[]((uint32_t)idx) = this->back();
+ this->pop_back();
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::insert(iterator position, const value_type& value)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+#endif
+
+ const ptrdiff_t n = position - mpBegin; // Save this because we might reallocate.
+
+ if((mpEnd == mpCapacity) || (position != mpEnd))
+ DoInsertValue(position, value);
+ else
+ ::new(mpEnd++) value_type(value);
+
+ return mpBegin + n;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::insert(iterator position, size_type n, const value_type& value)
+ {
+ DoInsertValues(position, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::insert(iterator position, InputIterator first, InputIterator last)
+ {
+ DoInsert(position, first, last, is_integral<InputIterator>());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase(iterator position)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position >= mpEnd)))
+ EASTL_FAIL_MSG("vector::erase -- invalid position");
+#endif
+
+ if((position + 1) < mpEnd)
+ eastl::copy(position + 1, mpEnd, position);
+ --mpEnd;
+ mpEnd->~value_type();
+ return position;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::iterator
+ vector<T, Allocator>::erase(iterator first, iterator last)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((first < mpBegin) || (first > mpEnd) || (last < mpBegin) || (last > mpEnd) || (last < first)))
+ EASTL_FAIL_MSG("vector::erase -- invalid position");
+#endif
+
+ //#if 0
+ // Reference implementation, known to be correct:
+ iterator const position = eastl::copy(last, mpEnd, first);
+ DoDestroyValues(position, mpEnd);
+ mpEnd -= (last - first);
+ //#else
+ // To do: Test this.
+ // // Implementation that has an optimization for memcpy which eastl::copy cannot do (the best it can do is memmove).
+ // iterator position;
+ // T* const pEnd = mpEnd - (last - first);
+ //
+ // if((pEnd <= last) && eastl::has_trivial_assign<value_type>::value) // If doing a non-overlapping copy and the data is memcpy-able
+ // {
+ // const size_t size = (size_t)((uintptr_t)mpEnd-(uintptr_t)last);
+ // position = (T*)((uintptr_t)memcpy(first, last, size) + size);
+ // }
+ // else
+ // position = eastl::copy(last, mpEnd, first);
+ //
+ // DoDestroyValues(position, mpEnd);
+ // mpEnd = pEnd;
+ //#endif
+
+ return first;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase(reverse_iterator position)
+ {
+ return reverse_iterator(erase((++position).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline typename vector<T, Allocator>::reverse_iterator
+ vector<T, Allocator>::erase(reverse_iterator first, reverse_iterator last)
+ {
+ // Version which erases in order from first to last.
+ // difference_type i(first.base() - last.base());
+ // while(i--)
+ // first = erase(first);
+ // return first;
+
+ // Version which erases in order from last to first, but is slightly more efficient:
+ return reverse_iterator(erase((++last).base(), (++first).base()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::clear()
+ {
+ DoDestroyValues(mpBegin, mpEnd);
+ mpEnd = mpBegin;
+ }
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::clear_unsafe()
+ {
+ mpEnd = mpBegin;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::reset()
+ {
+ // The reset function is a special extension function which unilaterally
+ // resets the container to an empty state without freeing the memory of
+ // the contained objects. This is useful for very quickly tearing down a
+ // container built into scratch memory.
+ mpBegin = mpEnd = mpCapacity = NULL;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::swap(this_type& x)
+ {
+ if(mAllocator == x.mAllocator) // If allocators are equivalent...
+ {
+ // We leave mAllocator as-is.
+ eastl::swap(mpBegin, x.mpBegin);
+ eastl::swap(mpEnd, x.mpEnd);
+ eastl::swap(mpCapacity, x.mpCapacity);
+ }
+ else // else swap the contents.
+ {
+ const this_type temp(*this); // Can't call eastl::swap because that would
+ *this = x; // itself call this member swap function.
+ x = temp;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename ForwardIterator>
+ inline typename vector<T, Allocator>::pointer
+ vector<T, Allocator>::DoRealloc(size_type n, ForwardIterator first, ForwardIterator last)
+ {
+ T* const p = DoAllocate(n);
+ eastl::uninitialized_copy_ptr(first, last, p);
+ return p;
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void vector<T, Allocator>::DoInit(Integer n, Integer value, true_type)
+ {
+ mpBegin = DoAllocate((size_type)n);
+ mpCapacity = mpBegin + n;
+ mpEnd = mpCapacity;
+ eastl::uninitialized_fill_n_ptr<value_type, Integer>(mpBegin, n, value);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInit(InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>:: iterator_category IC;
+ DoInitFromIterator(first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInitFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ for(; first < last; ++first) // InputIterators by definition actually only allow you to iterate through them once.
+ push_back(*first); // Thus the standard *requires* that we do this (inefficient) implementation.
+ } // Luckily, InputIterators are in practice almost never used, so this code will likely never get executed.
+
+
+ template <typename T, typename Allocator>
+ template <typename ForwardIterator>
+ inline void vector<T, Allocator>::DoInitFromIterator(ForwardIterator first, ForwardIterator last, EASTL_ITC_NS::forward_iterator_tag)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+ mpBegin = DoAllocate(n);
+ mpCapacity = mpBegin + n;
+ mpEnd = mpCapacity;
+ eastl::uninitialized_copy_ptr(first, last, mpBegin);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void vector<T, Allocator>::DoDestroyValues(pointer first, pointer last)
+ {
+ for(; first < last; ++first) // In theory, this could be an external function that works on an iterator.
+ first->~value_type();
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void vector<T, Allocator>::DoAssign(Integer n, Integer value, true_type)
+ {
+ DoAssignValues(static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoAssign(InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoAssignFromIterator(first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoAssignValues(size_type n, const value_type& value)
+ {
+ if(n > size_type(mpCapacity - mpBegin)) // If n > capacity ...
+ {
+ this_type temp(n, value, mAllocator); // We have little choice but to reallocate with new memory.
+ swap(temp);
+ }
+ else if(n > size_type(mpEnd - mpBegin)) // If n > size ...
+ {
+ eastl::fill(mpBegin, mpEnd, value);
+ eastl::uninitialized_fill_n_ptr(mpEnd, n - size_type(mpEnd - mpBegin), value);
+ mpEnd += n - size_type(mpEnd - mpBegin);
+ }
+ else // else 0 <= n <= size
+ {
+ eastl::fill_n(mpBegin, n, value);
+ erase(mpBegin + n, mpEnd);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ void vector<T, Allocator>::DoAssignFromIterator(InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ iterator position(mpBegin);
+
+ while((position != mpEnd) && (first != last))
+ {
+ *position = *first;
+ ++first;
+ ++position;
+ }
+ if(first == last)
+ erase(position, mpEnd);
+ else
+ insert(mpEnd, first, last);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename RandomAccessIterator>
+ void vector<T, Allocator>::DoAssignFromIterator(RandomAccessIterator first, RandomAccessIterator last, EASTL_ITC_NS::random_access_iterator_tag)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ if(n > size_type(mpCapacity - mpBegin)) // If n > capacity ...
+ {
+ pointer const pNewData = DoRealloc(n, first, last);
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = mpBegin + n;
+ mpCapacity = mpEnd;
+ }
+ else if(n <= size_type(mpEnd - mpBegin)) // If n <= size ...
+ {
+ pointer const pNewEnd = eastl::copy(first, last, mpBegin); // Since we are copying to mpBegin, we don't have to worry about needing copy_backward or a memmove-like copy (as opposed to memcpy-like copy).
+ DoDestroyValues(pNewEnd, mpEnd);
+ mpEnd = pNewEnd;
+ }
+ else // else size < n <= capacity
+ {
+ RandomAccessIterator position = first + (mpEnd - mpBegin);
+ eastl::copy(first, position, mpBegin); // Since we are copying to mpBegin, we don't have to worry about needing copy_backward or a memmove-like copy (as opposed to memcpy-like copy).
+ mpEnd = eastl::uninitialized_copy_ptr(position, last, mpEnd);
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename Integer>
+ inline void vector<T, Allocator>::DoInsert(iterator position, Integer n, Integer value, true_type)
+ {
+ DoInsertValues(position, static_cast<size_type>(n), static_cast<value_type>(value));
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInsert(iterator position, InputIterator first, InputIterator last, false_type)
+ {
+ typedef typename eastl::iterator_traits<InputIterator>::iterator_category IC;
+ DoInsertFromIterator(position, first, last, IC());
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename InputIterator>
+ inline void vector<T, Allocator>::DoInsertFromIterator(iterator position, InputIterator first, InputIterator last, EASTL_ITC_NS::input_iterator_tag)
+ {
+ for(; first != last; ++first, ++position)
+ position = insert(position, *first);
+ }
+
+
+ template <typename T, typename Allocator>
+ template <typename BidirectionalIterator>
+ void vector<T, Allocator>::DoInsertFromIterator(iterator position, BidirectionalIterator first, BidirectionalIterator last, EASTL_ITC_NS::bidirectional_iterator_tag)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+#endif
+
+ if(first != last)
+ {
+ const size_type n = (size_type)eastl::distance(first, last);
+
+ if(n <= size_type(mpCapacity - mpEnd)) // If n fits within the existing capacity...
+ {
+ const size_type nExtra = static_cast<size_type>(mpEnd - position);
+ const pointer pEnd = mpEnd;
+
+ if(n < nExtra)
+ {
+ eastl::uninitialized_copy_ptr(mpEnd - n, mpEnd, mpEnd);
+ mpEnd += n;
+ eastl::copy_backward(position, pEnd - n, pEnd); // We need copy_backward because of potential overlap issues.
+ eastl::copy(first, last, position);
+ }
+ else
+ {
+ BidirectionalIterator fiTemp = first;
+ eastl::advance(fiTemp, nExtra);
+ eastl::uninitialized_copy_ptr(fiTemp, last, mpEnd);
+ mpEnd += n - nExtra;
+ eastl::uninitialized_copy_ptr(position, pEnd, mpEnd);
+ mpEnd += nExtra;
+ eastl::copy_backward(first, fiTemp, position + nExtra);
+ }
+ }
+ else // else we need to expand our capacity.
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = nGrowSize > (nPrevSize + n) ? nGrowSize : (nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+#if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ {
+ pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ pNewEnd = eastl::uninitialized_copy_ptr(first, last, pNewEnd);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, pNewEnd);
+ }
+ catch(...)
+ {
+ DoDestroyValues(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+#else
+ pointer pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ pNewEnd = eastl::uninitialized_copy_ptr(first, last, pNewEnd);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, pNewEnd);
+#endif
+
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewData + nNewSize;
+ }
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoInsertValues(iterator position, size_type n, const value_type& value)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+#endif
+
+ if(n <= size_type(mpCapacity - mpEnd)) // If n is <= capacity...
+ {
+ if(n > 0) // To do: See if there is a way we can eliminate this 'if' statement.
+ {
+ // To consider: Make this algorithm work more like DoInsertValue whereby a pointer to value is used.
+ const value_type temp = value;
+ const size_type nExtra = static_cast<size_type>(mpEnd - position);
+ const pointer pEnd = mpEnd;
+
+ if(n < nExtra)
+ {
+ eastl::uninitialized_copy_ptr(mpEnd - n, mpEnd, mpEnd);
+ mpEnd += n;
+ eastl::copy_backward(position, pEnd - n, pEnd); // We need copy_backward because of potential overlap issues.
+ eastl::fill(position, position + n, temp);
+ }
+ else
+ {
+ eastl::uninitialized_fill_n_ptr(mpEnd, n - nExtra, temp);
+ mpEnd += n - nExtra;
+ eastl::uninitialized_copy_ptr(position, pEnd, mpEnd);
+ mpEnd += nExtra;
+ eastl::fill(position, pEnd, temp);
+ }
+ }
+ }
+ else // else n > capacity
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nGrowSize = GetNewCapacity(nPrevSize);
+ const size_type nNewSize = nGrowSize > (nPrevSize + n) ? nGrowSize : (nPrevSize + n);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+#if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ {
+ pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ eastl::uninitialized_fill_n_ptr(pNewEnd, n, value);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, pNewEnd + n);
+ }
+ catch(...)
+ {
+ DoDestroyValues(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+#else
+ pointer pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ eastl::uninitialized_fill_n_ptr(pNewEnd, n, value);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, pNewEnd + n);
+#endif
+
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewData + nNewSize;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ void vector<T, Allocator>::DoInsertValue(iterator position, const value_type& value)
+ {
+#if EASTL_ASSERT_ENABLED
+ if(EASTL_UNLIKELY((position < mpBegin) || (position > mpEnd)))
+ EASTL_FAIL_MSG("vector::insert -- invalid position");
+#endif
+
+ if(mpEnd != mpCapacity) // If size < capacity ...
+ {
+ // EASTL_ASSERT(position < mpEnd); // We don't call this function unless position is less than end, and the code directly below relies on this.
+ // We need to take into account the possibility that value may come from within the vector itself.
+ const T* pValue = &value;
+ if((pValue >= position) && (pValue < mpEnd)) // If value comes from within the range to be moved...
+ ++pValue;
+ ::new(mpEnd) value_type(*(mpEnd - 1));
+ eastl::copy_backward(position, mpEnd - 1, mpEnd); // We need copy_backward because of potential overlap issues.
+ *position = *pValue;
+ ++mpEnd;
+ }
+ else // else (size == capacity)
+ {
+ const size_type nPrevSize = size_type(mpEnd - mpBegin);
+ const size_type nNewSize = GetNewCapacity(nPrevSize);
+ pointer const pNewData = DoAllocate(nNewSize);
+
+#if EASTL_EXCEPTIONS_ENABLED
+ pointer pNewEnd = pNewData;
+ try
+ {
+ pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ ::new(pNewEnd) value_type(value);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, ++pNewEnd);
+ }
+ catch(...)
+ {
+ DoDestroyValues(pNewData, pNewEnd);
+ DoFree(pNewData, nNewSize);
+ throw;
+ }
+#else
+ pointer pNewEnd = eastl::uninitialized_copy_ptr(mpBegin, position, pNewData);
+ ::new(pNewEnd) value_type(value);
+ pNewEnd = eastl::uninitialized_copy_ptr(position, mpEnd, ++pNewEnd);
+#endif
+
+ DoDestroyValues(mpBegin, mpEnd);
+ DoFree(mpBegin, (size_type)(mpCapacity - mpBegin));
+
+ mpBegin = pNewData;
+ mpEnd = pNewEnd;
+ mpCapacity = pNewData + nNewSize;
+ }
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool vector<T, Allocator>::validate() const
+ {
+ if(mpEnd < mpBegin)
+ return false;
+ if(mpCapacity < mpEnd)
+ return false;
+ return true;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline int vector<T, Allocator>::validate_iterator(const_iterator i) const
+ {
+ if(i >= mpBegin)
+ {
+ if(i < mpEnd)
+ return (isf_valid | isf_current | isf_can_dereference);
+
+ if(i <= mpEnd)
+ return (isf_valid | isf_current);
+ }
+
+ return isf_none;
+ }
+
+
+
+ ///////////////////////////////////////////////////////////////////////
+ // global operators
+ ///////////////////////////////////////////////////////////////////////
+
+ template <typename T, typename Allocator>
+ inline bool operator==(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return ((a.size() == b.size()) && equal(a.begin(), a.end(), b.begin()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator!=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return ((a.size() != b.size()) || !equal(a.begin(), a.end(), b.begin()));
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return b < a;
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator<=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return !(b < a);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline bool operator>=(const vector<T, Allocator>& a, const vector<T, Allocator>& b)
+ {
+ return !(a < b);
+ }
+
+
+ template <typename T, typename Allocator>
+ inline void swap(vector<T, Allocator>& a, vector<T, Allocator>& b)
+ {
+ a.swap(b);
+ }
+
+
+} // namespace eastl
+
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+
+#endif // Header include guard
+
+
+
+
+
+
+
+
+
+
diff --git a/UnknownVersion/include/EASTL/vector_map.h b/UnknownVersion/include/EASTL/vector_map.h
new file mode 100644
index 0000000..c20518c
--- /dev/null
+++ b/UnknownVersion/include/EASTL/vector_map.h
@@ -0,0 +1,248 @@
+#ifndef EASTL_VECTOR_MAP_H
+#define EASTL_VECTOR_MAP_H
+
+#include <EASTL/internal/config.h>
+
+#include <EASTL/algorithm.h>
+#include <EASTL/allocator.h>
+#include <EASTL/functional.h>
+#include <EASTL/iterator.h>
+#include <EASTL/map.h>
+#include <EASTL/sort.h>
+#include <EASTL/utility.h>
+#include <EASTL/vector.h>
+
+
+namespace eastl {
+
+namespace detail {
+
+template<class Pair, class Compare>
+struct compare_impl {
+ typedef Pair pair_type;
+ typedef typename pair_type::first_type first_argument_type;
+
+ compare_impl() {}
+ compare_impl(Compare const& src) : m_cmp(src) {}
+
+ bool operator()(first_argument_type const& lhs, first_argument_type const& rhs) const
+ { return Compare()(lhs, rhs); }
+ bool operator()(pair_type const& lhs, pair_type const& rhs) const
+ { return operator()(lhs.first, rhs.first); }
+ bool operator()(pair_type const& lhs, first_argument_type const& rhs) const
+ { return operator()(lhs.first, rhs); }
+ bool operator()(first_argument_type const& lhs, pair_type const& rhs) const
+ { return operator()(lhs, rhs.first); }
+
+ operator Compare() const { return m_cmp; }
+ void swap(compare_impl& rhs) {
+ using ::eastl::swap;
+ swap(m_cmp, rhs.m_cmp);
+ }
+ private:
+ Compare m_cmp;
+}; // struct compare_impl
+
+} // namespace detail
+
+template<class K, class V, class C = ::eastl::less<K>, class A = EASTLAllocatorType>
+class vector_map {
+ public:
+ typedef K key_type;
+ typedef V mapped_type;
+ typedef ::eastl::pair<key_type, mapped_type> value_type;
+ typedef C key_compare;
+ typedef A allocator_type;
+
+ typedef ::eastl::vector<value_type, allocator_type> base_type;
+ private:
+ typedef detail::compare_impl<key_type, key_compare> compare_impl_type;
+
+ base_type m_base;
+ detail::compare_impl<typename base_type::value_type, key_compare> m_cmp;
+ public:
+ typedef typename base_type::iterator iterator;
+ typedef typename base_type::const_iterator const_iterator;
+ typedef typename base_type::reverse_iterator reverse_iterator;
+ typedef typename base_type::const_reverse_iterator const_reverse_iterator;
+
+ typedef typename base_type::size_type size_type;
+ typedef typename base_type::difference_type difference_type;
+
+ typedef typename base_type::reference reference;
+ typedef typename base_type::const_reference const_reference;
+ typedef typename base_type::pointer pointer;
+ typedef typename base_type::const_pointer const_pointer;;
+
+ static const size_type kMaxSize = base_type::kMaxSize;
+
+ class value_compare {
+ friend class vector_map;
+ key_compare const m_cmp;
+ protected:
+ value_compare(key_compare pred) : m_cmp(pred) {}
+ public:
+ bool operator()(value_type const& lhs, value_type const& rhs) const
+ { return m_cmp(lhs.first, rhs.first); }
+ }; // struct value_compare
+
+ explicit vector_map(key_compare const& cmp = key_compare(), allocator_type const& alloc = EASTL_VECTOR_DEFAULT_ALLOCATOR)
+ : m_base(alloc), m_cmp(cmp) {}
+ template<class InputIterator>
+ vector_map(InputIterator first, InputIterator last,
+ key_compare const& cmp = key_compare(),
+ allocator_type const& alloc = EASTL_VECTOR_DEFAULT_ALLOCATOR)
+ : m_base(alloc), m_cmp(cmp)
+ {
+ ::eastl::map<key_type, mapped_type, key_compare, allocator_type> const tmp(first, last);
+ m_base.reserve(tmp.size());
+ ::eastl::copy(tmp.begin(), tmp.end(), ::eastl::back_inserter(m_base));
+ }
+
+ vector_map& operator =(vector_map const& rhs) {
+ vector_map(rhs).swap(*this);
+ return *this;
+ }
+
+ iterator begin() { return m_base.begin(); }
+ iterator end() { return m_base.end(); }
+ reverse_iterator rbegin() { return m_base.rbegin(); }
+ reverse_iterator rend() { return m_base.rend(); }
+
+ const_iterator begin() const { return m_base.begin(); }
+ const_iterator end() const { return m_base.end(); }
+ const_reverse_iterator rbegin() const { return m_base.rbegin(); }
+ const_reverse_iterator rend() const { return m_base.rend(); }
+
+ void clear() { m_base.clear(); }
+
+ bool empty() const { return m_base.empty(); }
+ size_type size() const { return m_base.size(); }
+ size_type max_size() { return base_type::kMaxSize; }
+
+ mapped_type& operator[](key_type const& key) {
+ return insert(value_type(key, mapped_type())).first->second;
+ }
+
+ ::eastl::pair<iterator, bool> insert(value_type const& val) {
+ iterator const i(lower_bound(val.first));
+
+ return (i == end() || m_cmp(val.first, i->first))
+ ? ::eastl::make_pair(m_base.insert(i, val), true)
+ : ::eastl::make_pair(i, false)
+ ;
+ }
+ iterator insert(iterator const pos, value_type const& val) {
+ return
+ ((pos == begin() || m_cmp(*(pos-1), val)) &&
+ (pos == end() || m_cmp(val, *pos)))
+ ? m_base.insert(pos, val)
+ : insert(val).first
+ ;
+ }
+ template<class InputIterator>
+ void insert(InputIterator first, InputIterator const last)
+ { for(; first != last; ++first) insert(*first); }
+
+ void erase(iterator const pos) { m_base.erase(pos); }
+ void erase(iterator const first, iterator const last)
+ { m_base.erase(first, last); }
+ size_type erase(key_type const& k) {
+ iterator const i(find(k));
+ if(i == end()) return 0;
+ else { erase(i); return 1; }
+ }
+
+ void swap(vector_map& other) {
+ m_base.swap(other.m_base);
+ m_cmp.swap(other.m_cmp);
+ }
+
+ allocator_type& get_allocator() { return m_base.get_allocator(); }
+ void set_allocator(allocator_type const& alloc) { m_base.set_allocator(alloc); }
+
+ key_compare key_comp() const { return static_cast<key_compare>(m_cmp); }
+ value_compare value_comp() const { return value_compare(m_cmp); }
+
+ iterator find(key_type const& k) {
+ iterator const i(lower_bound(k));
+ return (i != end() && m_cmp(k, i->first))? end() : i;
+ }
+ const_iterator find(key_type const& k) const {
+ const_iterator const i(lower_bound(k));
+ return (i != end() && m_cmp(k, i->first))? end() : i;
+ }
+ size_type count(key_type const& k) const { return find(k) != end()? 1 : 0; }
+ iterator lower_bound(key_type const& k) {
+ return ::eastl::lower_bound(begin(), end(), k, m_cmp);
+ }
+ const_iterator lower_bound(key_type const& k) const {
+ return ::eastl::lower_bound(begin(), end(), k, m_cmp);
+ }
+ iterator upper_bound(key_type const& k) {
+ return ::eastl::upper_bound(begin(), end(), k, m_cmp);
+ }
+ const_iterator upper_bound(key_type const& k) const {
+ return ::eastl::upper_bound(begin(), end(), k, m_cmp);
+ }
+
+ ::eastl::pair<iterator, iterator> equal_range(key_type const& k) {
+ return ::eastl::equal_range(begin(), end(), k, m_cmp);
+ }
+ ::eastl::pair<const_iterator, const_iterator> equal_range(key_type const& k) const {
+ return ::eastl::equal_range(begin(), end(), k, m_cmp);
+ }
+
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator==(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator!=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator<(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator>(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator>=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+ template<class Key, class T, class Compare, class Allocator>
+ friend bool operator<=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs);
+}; // class vector_map
+
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator==(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base == rhs.m_base; }
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator!=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base != rhs.m_base; }
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator<(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base < rhs.m_base; }
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator>(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base > rhs.m_base; }
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator>=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base >= rhs.m_base; }
+template<class Key, class T, class Compare, class Allocator>
+inline bool operator<=(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.m_base <= rhs.m_base; }
+
+template<class Key, class T, class Compare, class Allocator>
+void swap(vector_map<Key, T, Compare, Allocator> const& lhs,
+ vector_map<Key, T, Compare, Allocator> const& rhs)
+{ return lhs.swap(rhs); }
+
+} // namespace eastl
+
+#endif // EASTL_VECTOR_MAP_H