summaryrefslogtreecommitdiffstats
path: root/chromium/v8/src/base
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/base')
-rw-r--r--chromium/v8/src/base/DEPS4
-rw-r--r--chromium/v8/src/base/atomicops.h161
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm64_gcc.h316
-rw-r--r--chromium/v8/src/base/atomicops_internals_arm_gcc.h301
-rw-r--r--chromium/v8/src/base/atomicops_internals_atomicword_compat.h99
-rw-r--r--chromium/v8/src/base/atomicops_internals_mac.h204
-rw-r--r--chromium/v8/src/base/atomicops_internals_mips_gcc.h159
-rw-r--r--chromium/v8/src/base/atomicops_internals_tsan.h379
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.cc111
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_gcc.h272
-rw-r--r--chromium/v8/src/base/atomicops_internals_x86_msvc.h202
-rw-r--r--chromium/v8/src/base/build_config.h120
-rw-r--r--chromium/v8/src/base/lazy-instance.h237
-rw-r--r--chromium/v8/src/base/macros.h120
-rw-r--r--chromium/v8/src/base/once.cc53
-rw-r--r--chromium/v8/src/base/once.h100
-rw-r--r--chromium/v8/src/base/safe_conversions.h67
-rw-r--r--chromium/v8/src/base/safe_conversions_impl.h220
-rw-r--r--chromium/v8/src/base/safe_math.h276
-rw-r--r--chromium/v8/src/base/safe_math_impl.h531
-rw-r--r--chromium/v8/src/base/win32-headers.h79
21 files changed, 4011 insertions, 0 deletions
diff --git a/chromium/v8/src/base/DEPS b/chromium/v8/src/base/DEPS
new file mode 100644
index 00000000000..65480302438
--- /dev/null
+++ b/chromium/v8/src/base/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "-src",
+ "+src/base",
+]
diff --git a/chromium/v8/src/base/atomicops.h b/chromium/v8/src/base/atomicops.h
new file mode 100644
index 00000000000..b26fc4c98b9
--- /dev/null
+++ b/chromium/v8/src/base/atomicops.h
@@ -0,0 +1,161 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The routines exported by this module are subtle. If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain. If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative. You should assume only properties explicitly guaranteed by the
+// specifications in this file. You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break. If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines. The NoBarrier
+// versions are provided when no barriers are needed:
+// NoBarrier_Store()
+// NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef V8_BASE_ATOMICOPS_H_
+#define V8_BASE_ATOMICOPS_H_
+
+#include "include/v8.h"
+#include "src/base/build_config.h"
+
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace v8 {
+namespace base {
+
+typedef char Atomic8;
+typedef int32_t Atomic32;
+#ifdef V8_HOST_ARCH_64_BIT
+// We need to be able to go between Atomic64 and AtomicWord implicitly. This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__ILP32__)
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif // V8_HOST_ARCH_64_BIT
+
+} } // namespace v8::base
+
+// Include our platform specific implementation.
+#if defined(THREAD_SANITIZER)
+#include "src/base/atomicops_internals_tsan.h"
+#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "src/base/atomicops_internals_x86_msvc.h"
+#elif defined(__APPLE__)
+#include "src/base/atomicops_internals_mac.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
+#include "src/base/atomicops_internals_arm64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
+#include "src/base/atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "src/base/atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
+#include "src/base/atomicops_internals_mips_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "src/base/atomicops_internals_atomicword_compat.h"
+#endif
+
+#endif // V8_BASE_ATOMICOPS_H_
diff --git a/chromium/v8/src/base/atomicops_internals_arm64_gcc.h b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
new file mode 100644
index 00000000000..b01783e6a7e
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,316 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace base {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
+}
+
+// NoBarrier versions of the operation include "memory" in the clobber list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used as
+// part of the Acquire or Release versions, to ensure that nothing from outside
+// the call is reordered between the operation and the memory barrier. This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %w[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %w[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"IJr" (old_value),
+ [new_value]"r" (new_value)
+ : "cc", "memory"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"IJr" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+
+ MemoryBarrier();
+ result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+
+ MemoryBarrier();
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %x[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %x[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_arm_gcc.h b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
new file mode 100644
index 00000000000..069b1ffa883
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_arm_gcc.h
@@ -0,0 +1,301 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+// all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+// writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+// barrier (though writing to the co-processor will still work).
+// However, on single core devices (e.g. Nexus One, or Nexus S),
+// this instruction will take up to 200 ns, which is huge, even though
+// it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+// single or multi-core. However, the kernel provides a useful helper
+// function at a fixed memory address (0xffff0fa0), which will always
+// perform a memory barrier in the most efficient way. I.e. on single
+// core devices, this is an empty function that exits immediately.
+// On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+// are needed for correct execution. Always call the kernel helper, even
+// when targeting ARMv5TE.
+//
+
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+ // Note: This is a function call, which is also an implicit compiler barrier.
+ typedef void (*KernelMemoryBarrierFunc)();
+ ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+ __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
+
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ int reloop;
+ do {
+ // The following is equivalent to:
+ //
+ // prev_value = LDREX(ptr)
+ // reloop = 0
+ // if (prev_value != old_value)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " mov %1, #0\n"
+ " cmp %0, %4\n"
+#ifdef __thumb2__
+ " it eq\n"
+#endif
+ " strexeq %1, %5, [%3]\n"
+ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(old_value), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return prev_value;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 value;
+ int reloop;
+ do {
+ // Equivalent to:
+ //
+ // value = LDREX(ptr)
+ // value += increment
+ // reloop = STREX(ptr, value)
+ //
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " add %0, %0, %4\n"
+ " strex %1, %0, [%3]\n"
+ : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(increment)
+ : "cc", "memory");
+ } while (reloop);
+ return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ // TODO(digit): Investigate if it's possible to implement this with
+ // a single MemoryBarrier() operation between the LDREX and STREX.
+ // See http://crbug.com/246514
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ int reloop;
+ do {
+ // old_value = LDREX(ptr)
+ // reloop = STREX(ptr, new_value)
+ __asm__ __volatile__(" ldrex %0, [%3]\n"
+ " strex %1, %4, [%3]\n"
+ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+ : "r"(ptr), "r"(new_value)
+ : "cc", "memory");
+ } while (reloop != 0);
+ return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr) {
+ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+} // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value)
+ return prev_value;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ // Atomic exchange the old value with an incremented one.
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
+ // The exchange took place as expected.
+ return new_value;
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ for (;;) {
+ prev_value = *ptr;
+ if (prev_value != old_value) {
+ // Always ensure acquire semantics.
+ MemoryBarrier();
+ return prev_value;
+ }
+ if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+ return old_value;
+ }
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ // This could be implemented as:
+ // MemoryBarrier();
+ // return NoBarrier_CompareAndSwap();
+ //
+ // But would use 3 barriers per succesful CAS. To save performance,
+ // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+ // - A succesful swap uses only 2 barriers (in the kernel helper).
+ // - An early return due to (prev_value != old_value) performs
+ // a memory barrier with no store, which is equivalent to the
+ // generic implementation above.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#else
+# error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// Byte accessors.
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_atomicword_compat.h b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
new file mode 100644
index 00000000000..0530ced2a44
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace base {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::base::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return v8::base::Release_CompareAndSwap(
+ reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::base::Acquire_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return v8::base::Release_Store(
+ reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return v8::base::Acquire_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return v8::base::Release_Load(
+ reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} } // namespace v8::base
+
+#endif // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
diff --git a/chromium/v8/src/base/atomicops_internals_mac.h b/chromium/v8/src/base/atomicops_internals_mac.h
new file mode 100644
index 00000000000..a046872e4d0
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_mac.h
@@ -0,0 +1,204 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+ OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64(old_value, new_value,
+ reinterpret_cast<volatile int64_t*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+ reinterpret_cast<volatile int64_t*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64Barrier(increment,
+ reinterpret_cast<volatile int64_t*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64Barrier(
+ old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ // The lib kern interface does not distinguish between
+ // Acquire and Release memory barriers; they are equivalent.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(__LP64__)
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_mips_gcc.h b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
new file mode 100644
index 00000000000..0d3a0e38c13
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_mips_gcc.h
@@ -0,0 +1,159 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 temp, old;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %1, %2\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp, temp2;
+
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %2\n" // temp = *ptr
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_tsan.h b/chromium/v8/src/base/atomicops_internals_tsan.h
new file mode 100644
index 00000000000..363668d86d3
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_tsan.h
@@ -0,0 +1,379 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
+
+namespace v8 {
+namespace base {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+extern "C" {
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
+typedef enum {
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+ __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
+ __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+} // extern "C"
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire, __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release, __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline void MemoryBarrier() {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+} // namespace base
+} // namespace v8
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/chromium/v8/src/base/atomicops_internals_x86_gcc.cc b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
new file mode 100644
index 00000000000..b8ba0c34b56
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,111 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "src/base/atomicops.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file. If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction. In PIC compilations, %ebx contains the address
+// of the global offset table. To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+ asm("mov %%ebx, %%edi\n" \
+ "cpuid\n" \
+ "xchg %%edi, %%ebx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+ asm("mov %%rbx, %%rdi\n" \
+ "cpuid\n" \
+ "xchg %%rdi, %%rbx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid) // initialize the struct only on x86
+
+namespace v8 {
+namespace base {
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+ false, // bug can't exist before process spawns multiple threads
+ false, // no SSE2
+};
+
+} } // namespace v8::base
+
+namespace {
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+void AtomicOps_Internalx86CPUFeaturesInit() {
+ using v8::base::AtomicOps_Internalx86CPUFeatures;
+
+ uint32_t eax = 0;
+ uint32_t ebx = 0;
+ uint32_t ecx = 0;
+ uint32_t edx = 0;
+
+ // Get vendor string (issue CPUID with eax = 0)
+ cpuid(eax, ebx, ecx, edx, 0);
+ char vendor[13];
+ memcpy(vendor, &ebx, 4);
+ memcpy(vendor + 4, &edx, 4);
+ memcpy(vendor + 8, &ecx, 4);
+ vendor[12] = 0;
+
+ // get feature flags in ecx/edx, and family/model in eax
+ cpuid(eax, ebx, ecx, edx, 1);
+
+ int family = (eax >> 8) & 0xf; // family and model fields
+ int model = (eax >> 4) & 0xf;
+ if (family == 0xf) { // use extended family and model fields
+ family += (eax >> 20) & 0xff;
+ model += ((eax >> 16) & 0xf) << 4;
+ }
+
+ // Opteron Rev E has a bug in which on very rare occasions a locked
+ // instruction doesn't act as a read-acquire barrier if followed by a
+ // non-locked read-modify-write instruction. Rev F has this bug in
+ // pre-release versions, but not in versions released to customers,
+ // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+ if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
+ family == 15 &&
+ 32 <= model && model <= 63) {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+ } else {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+ }
+
+ // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+ AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+class AtomicOpsx86Initializer {
+ public:
+ AtomicOpsx86Initializer() {
+ AtomicOps_Internalx86CPUFeaturesInit();
+ }
+};
+
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+} // namespace
+
+#endif // if x86
+
+#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_x86_gcc.h b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
new file mode 100644
index 00000000000..00b64484683
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_x86_gcc.h
@@ -0,0 +1,272 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else { // mfence is faster but not present on PIII
+ Atomic32 x = 0;
+ NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
+ }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ *ptr = value;
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier on PIII
+ }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+ *ptr = value; // An x86 store acts as a release barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+
+ *ptr = value; // An x86 store acts as a release barrier
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Acquire_Load(), below.
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+ //
+ // x86 stores/loads fail to act as barriers for a few instructions (clflush
+ // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+ // not generated by the compiler, and are rare. Users of these instructions
+ // need to know about cache behaviour in any case since all of these involve
+ // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Release_Store(), above.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif // defined(__x86_64__)
+
+} } // namespace v8::base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/chromium/v8/src/base/atomicops_internals_x86_msvc.h b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 00000000000..adc40318e92
--- /dev/null
+++ b/chromium/v8/src/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,202 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "src/base/macros.h"
+#include "src/base/win32-headers.h"
+
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ LONG result = InterlockedCompareExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value),
+ static_cast<LONG>(old_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ LONG result = InterlockedExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return InterlockedExchangeAdd(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+ // See #undef and note at the top of this file.
+ __faststorefence();
+#else
+ // We use MemoryBarrier from WinNT.h
+ ::MemoryBarrier();
+#endif
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+ // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ PVOID result = InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ PVOID result = InterlockedExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return InterlockedExchangeAdd64(
+ reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif // defined(_WIN64)
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/chromium/v8/src/base/build_config.h b/chromium/v8/src/base/build_config.h
new file mode 100644
index 00000000000..e412b92dfef
--- /dev/null
+++ b/chromium/v8/src/base/build_config.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BUILD_CONFIG_H_
+#define V8_BASE_BUILD_CONFIG_H_
+
+#include "include/v8config.h"
+
+// Processor architecture detection. For more info on what's defined, see:
+// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+// http://www.agner.org/optimize/calling_conventions.pdf
+// or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#if defined(__native_client__)
+// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
+// generates ARM machine code, together with a portable ARM simulator
+// compiled for the host architecture in question.
+//
+// Since Native Client is ILP-32 on all architectures we use
+// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#else
+#define V8_HOST_ARCH_X64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#endif // __native_client__
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__AARCH64EL__)
+#define V8_HOST_ARCH_ARM64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__ARMEL__)
+#define V8_HOST_ARCH_ARM 1
+#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_HOST_ARCH_MIPS 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#error "Host architecture was not detected as supported by v8"
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+# ifndef CAN_USE_VFP3_INSTRUCTIONS
+# define CAN_USE_VFP3_INSTRUCTIONS
+# endif
+#endif
+
+
+// Target architecture detection. This may be set externally. If not, detect
+// in the same way as the host architecture, that is, target the native
+// environment as presented by the compiler.
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
+ !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_TARGET_ARCH_X64 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_TARGET_ARCH_IA32 1
+#elif defined(__AARCH64EL__)
+#define V8_TARGET_ARCH_ARM64 1
+#elif defined(__ARMEL__)
+#define V8_TARGET_ARCH_ARM 1
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
+#define V8_TARGET_ARCH_MIPS 1
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+#endif
+
+// Check for supported combinations of host and target architectures.
+#if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
+#error Target architecture ia32 is only supported on ia32 host
+#endif
+#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
+#error Target architecture x64 is only supported on x64 host
+#endif
+#if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
+#error Target architecture arm is only supported on arm and ia32 host
+#endif
+#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
+#error Target architecture arm64 is only supported on arm64 and x64 host
+#endif
+#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
+#error Target architecture mips is only supported on mips and ia32 host
+#endif
+
+// Determine architecture endianness.
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_X64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_MIPS
+#if defined(__MIPSEB__)
+#define V8_TARGET_BIG_ENDIAN 1
+#else
+#define V8_TARGET_LITTLE_ENDIAN 1
+#endif
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#error Unknown target architecture endianness
+#endif
+
+#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
+#define USING_BSD_ABI
+#endif
+
+#endif // V8_BASE_BUILD_CONFIG_H_
diff --git a/chromium/v8/src/base/lazy-instance.h b/chromium/v8/src/base/lazy-instance.h
new file mode 100644
index 00000000000..a20689a16c4
--- /dev/null
+++ b/chromium/v8/src/base/lazy-instance.h
@@ -0,0 +1,237 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The LazyInstance<Type, Traits> class manages a single instance of Type,
+// which will be lazily created on the first time it's accessed. This class is
+// useful for places you would normally use a function-level static, but you
+// need to have guaranteed thread-safety. The Type constructor will only ever
+// be called once, even if two threads are racing to create the object. Get()
+// and Pointer() will always return the same, completely initialized instance.
+//
+// LazyInstance is completely thread safe, assuming that you create it safely.
+// The class was designed to be POD initialized, so it shouldn't require a
+// static constructor. It really only makes sense to declare a LazyInstance as
+// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
+//
+// LazyInstance is similar to Singleton, except it does not have the singleton
+// property. You can have multiple LazyInstance's of the same type, and each
+// will manage a unique instance. It also preallocates the space for Type, as
+// to avoid allocating the Type instance on the heap. This may help with the
+// performance of creating the instance, and reducing heap fragmentation. This
+// requires that Type be a complete type so we can determine the size. See
+// notes for advanced users below for more explanations.
+//
+// Example usage:
+// static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
+// void SomeMethod() {
+// my_instance.Get().SomeMethod(); // MyClass::SomeMethod()
+//
+// MyClass* ptr = my_instance.Pointer();
+// ptr->DoDoDo(); // MyClass::DoDoDo
+// }
+//
+// Additionally you can override the way your instance is constructed by
+// providing your own trait:
+// Example usage:
+// struct MyCreateTrait {
+// static void Construct(MyClass* allocated_ptr) {
+// new (allocated_ptr) MyClass(/* extra parameters... */);
+// }
+// };
+// static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
+// LAZY_INSTANCE_INITIALIZER;
+//
+// WARNINGS:
+// - This implementation of LazyInstance IS THREAD-SAFE by default. See
+// SingleThreadInitOnceTrait if you don't care about thread safety.
+// - Lazy initialization comes with a cost. Make sure that you don't use it on
+// critical path. Consider adding your initialization code to a function
+// which is explicitly called once.
+//
+// Notes for advanced users:
+// LazyInstance can actually be used in two different ways:
+//
+// - "Static mode" which is the default mode since it is the most efficient
+// (no extra heap allocation). In this mode, the instance is statically
+// allocated (stored in the global data section at compile time).
+// The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
+// must be used to initialize static lazy instances.
+//
+// - "Dynamic mode". In this mode, the instance is dynamically allocated and
+// constructed (using new) by default. This mode is useful if you have to
+// deal with some code already allocating the instance for you (e.g.
+// OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
+// The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
+// dynamic lazy instances.
+
+#ifndef V8_BASE_LAZY_INSTANCE_H_
+#define V8_BASE_LAZY_INSTANCE_H_
+
+#include "src/base/macros.h"
+#include "src/base/once.h"
+
+namespace v8 {
+namespace base {
+
+#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, { {} } }
+#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
+
+// Default to static mode.
+#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+template <typename T>
+struct LeakyInstanceTrait {
+ static void Destroy(T* /* instance */) {}
+};
+
+
+// Traits that define how an instance is allocated and accessed.
+
+
+template <typename T>
+struct StaticallyAllocatedInstanceTrait {
+ // 16-byte alignment fallback to be on the safe side here.
+ struct V8_ALIGNAS(T, 16) StorageType {
+ char x[sizeof(T)];
+ };
+
+ STATIC_ASSERT(V8_ALIGNOF(StorageType) >= V8_ALIGNOF(T));
+
+ static T* MutableInstance(StorageType* storage) {
+ return reinterpret_cast<T*>(storage);
+ }
+
+ template <typename ConstructTrait>
+ static void InitStorageUsingTrait(StorageType* storage) {
+ ConstructTrait::Construct(MutableInstance(storage));
+ }
+};
+
+
+template <typename T>
+struct DynamicallyAllocatedInstanceTrait {
+ typedef T* StorageType;
+
+ static T* MutableInstance(StorageType* storage) {
+ return *storage;
+ }
+
+ template <typename CreateTrait>
+ static void InitStorageUsingTrait(StorageType* storage) {
+ *storage = CreateTrait::Create();
+ }
+};
+
+
+template <typename T>
+struct DefaultConstructTrait {
+ // Constructs the provided object which was already allocated.
+ static void Construct(T* allocated_ptr) {
+ new(allocated_ptr) T();
+ }
+};
+
+
+template <typename T>
+struct DefaultCreateTrait {
+ static T* Create() {
+ return new T();
+ }
+};
+
+
+struct ThreadSafeInitOnceTrait {
+ template <typename Function, typename Storage>
+ static void Init(OnceType* once, Function function, Storage storage) {
+ CallOnce(once, function, storage);
+ }
+};
+
+
+// Initialization trait for users who don't care about thread-safety.
+struct SingleThreadInitOnceTrait {
+ template <typename Function, typename Storage>
+ static void Init(OnceType* once, Function function, Storage storage) {
+ if (*once == ONCE_STATE_UNINITIALIZED) {
+ function(storage);
+ *once = ONCE_STATE_DONE;
+ }
+ }
+};
+
+
+// TODO(pliard): Handle instances destruction (using global destructors).
+template <typename T, typename AllocationTrait, typename CreateTrait,
+ typename InitOnceTrait, typename DestroyTrait /* not used yet. */>
+struct LazyInstanceImpl {
+ public:
+ typedef typename AllocationTrait::StorageType StorageType;
+
+ private:
+ static void InitInstance(StorageType* storage) {
+ AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
+ }
+
+ void Init() const {
+ InitOnceTrait::Init(
+ &once_,
+ // Casts to void* are needed here to avoid breaking strict aliasing
+ // rules.
+ reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT
+ reinterpret_cast<void*>(&storage_));
+ }
+
+ public:
+ T* Pointer() {
+ Init();
+ return AllocationTrait::MutableInstance(&storage_);
+ }
+
+ const T& Get() const {
+ Init();
+ return *AllocationTrait::MutableInstance(&storage_);
+ }
+
+ mutable OnceType once_;
+ // Note that the previous field, OnceType, is an AtomicWord which guarantees
+ // 4-byte alignment of the storage field below. If compiling with GCC (>4.2),
+ // the LAZY_ALIGN macro above will guarantee correctness for any alignment.
+ mutable StorageType storage_;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultConstructTrait<T>,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyStaticInstance {
+ typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>,
+ CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultConstructTrait<T>,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyInstance {
+ // A LazyInstance is a LazyStaticInstance.
+ typedef typename LazyStaticInstance<T, CreateTrait, InitOnceTrait,
+ DestroyTrait>::type type;
+};
+
+
+template <typename T,
+ typename CreateTrait = DefaultCreateTrait<T>,
+ typename InitOnceTrait = ThreadSafeInitOnceTrait,
+ typename DestroyTrait = LeakyInstanceTrait<T> >
+struct LazyDynamicInstance {
+ typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>,
+ CreateTrait, InitOnceTrait, DestroyTrait> type;
+};
+
+} } // namespace v8::base
+
+#endif // V8_BASE_LAZY_INSTANCE_H_
diff --git a/chromium/v8/src/base/macros.h b/chromium/v8/src/base/macros.h
new file mode 100644
index 00000000000..736a656ed9f
--- /dev/null
+++ b/chromium/v8/src/base/macros.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_MACROS_H_
+#define V8_BASE_MACROS_H_
+
+#include "include/v8stdint.h"
+
+
+// The expression OFFSET_OF(type, field) computes the byte-offset
+// of the specified field relative to the containing type. This
+// corresponds to 'offsetof' (in stddef.h), except that it doesn't
+// use 0 or NULL, which causes a problem with the compiler warnings
+// we have enabled (which is also why 'offsetof' doesn't seem to work).
+// Here we simply use the non-zero value 4, which seems to work.
+#define OFFSET_OF(type, field) \
+ (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#define ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) V8_DELETE; \
+ void operator=(const TypeName&) V8_DELETE
+
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() V8_DELETE; \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+
+// Newly written code should use V8_INLINE and V8_NOINLINE directly.
+#define INLINE(declarator) V8_INLINE declarator
+#define NO_INLINE(declarator) V8_NOINLINE declarator
+
+
+// Newly written code should use V8_WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
+
+
+// Define V8_USE_ADDRESS_SANITIZER macros.
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_USE_ADDRESS_SANITIZER 1
+#endif
+#endif
+
+// Define DISABLE_ASAN macros.
+#ifdef V8_USE_ADDRESS_SANITIZER
+#define DISABLE_ASAN __attribute__((no_sanitize_address))
+#else
+#define DISABLE_ASAN
+#endif
+
+
+#if V8_CC_GNU
+#define V8_IMMEDIATE_CRASH() __builtin_trap()
+#else
+#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
+#endif
+
+
+// Use C++11 static_assert if possible, which gives error
+// messages that are easier to understand on first sight.
+#if V8_HAS_CXX11_STATIC_ASSERT
+#define STATIC_ASSERT(test) static_assert(test, #test)
+#else
+// This is inspired by the static assertion facility in boost. This
+// is pretty magical. If it causes you trouble on a platform you may
+// find a fix in the boost code.
+template <bool> class StaticAssertion;
+template <> class StaticAssertion<true> { };
+// This macro joins two tokens. If one of the tokens is a macro the
+// helper call causes it to be resolved before joining.
+#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
+#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
+// Causes an error during compilation of the condition is not
+// statically known to be true. It is formulated as a typedef so that
+// it can be used wherever a typedef can be used. Beware that this
+// actually causes each use to introduce a new defined type with a
+// name depending on the source line.
+template <int> class StaticAssertionHelper { };
+#define STATIC_ASSERT(test) \
+ typedef \
+ StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
+
+#endif
+
+
+// The USE(x) template is used to silence C++ compiler warnings
+// issued for (yet) unused variables (typically parameters).
+template <typename T>
+inline void USE(T) { }
+
+
+#define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write V8_2PART_UINT64_C(0x12345678,90123456);
+#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+#endif // V8_BASE_MACROS_H_
diff --git a/chromium/v8/src/base/once.cc b/chromium/v8/src/base/once.cc
new file mode 100644
index 00000000000..eaabf40d9a5
--- /dev/null
+++ b/chromium/v8/src/base/once.cc
@@ -0,0 +1,53 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/once.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#endif
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace base {
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
+ AtomicWord state = Acquire_Load(once);
+ // Fast path. The provided function was already executed.
+ if (state == ONCE_STATE_DONE) {
+ return;
+ }
+
+ // The function execution did not complete yet. The once object can be in one
+ // of the two following states:
+ // - UNINITIALIZED: We are the first thread calling this function.
+ // - EXECUTING_FUNCTION: Another thread is already executing the function.
+ //
+ // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
+ // atomically.
+ state = Acquire_CompareAndSwap(
+ once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
+ if (state == ONCE_STATE_UNINITIALIZED) {
+ // We are the first thread to call this function, so we have to call the
+ // function.
+ init_func(arg);
+ Release_Store(once, ONCE_STATE_DONE);
+ } else {
+ // Another thread has already started executing the function. We need to
+ // wait until it completes the initialization.
+ while (state == ONCE_STATE_EXECUTING_FUNCTION) {
+#ifdef _WIN32
+ ::Sleep(0);
+#else
+ sched_yield();
+#endif
+ state = Acquire_Load(once);
+ }
+ }
+}
+
+} } // namespace v8::base
diff --git a/chromium/v8/src/base/once.h b/chromium/v8/src/base/once.h
new file mode 100644
index 00000000000..a8e8437afaa
--- /dev/null
+++ b/chromium/v8/src/base/once.h
@@ -0,0 +1,100 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// emulates google3/base/once.h
+//
+// This header is intended to be included only by v8's internal code. Users
+// should not use this directly.
+//
+// This is basically a portable version of pthread_once().
+//
+// This header declares:
+// * A type called OnceType.
+// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
+// OnceType.
+// * A function CallOnce(OnceType* once, void (*init_func)()).
+// This function, when invoked multiple times given the same OnceType object,
+// will invoke init_func on the first call only, and will make sure none of
+// the calls return before that first call to init_func has finished.
+//
+// Additionally, the following features are supported:
+// * A macro V8_ONCE_INIT which is expanded into the expression used to
+// initialize a OnceType. This is only useful when clients embed a OnceType
+// into a structure of their own and want to initialize it statically.
+// * The user can provide a parameter which CallOnce() forwards to the
+// user-provided function when it is called. Usage example:
+// CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
+// * This implementation guarantees that OnceType is a POD (i.e. no static
+// initializer generated).
+//
+// This implements a way to perform lazy initialization. It's more efficient
+// than using mutexes as no lock is needed if initialization has already
+// happened.
+//
+// Example usage:
+// void Init();
+// V8_DECLARE_ONCE(once_init);
+//
+// // Calls Init() exactly once.
+// void InitOnce() {
+// CallOnce(&once_init, &Init);
+// }
+//
+// Note that if CallOnce() is called before main() has begun, it must
+// only be called by the thread that will eventually call main() -- that is,
+// the thread that performs dynamic initialization. In general this is a safe
+// assumption since people don't usually construct threads before main() starts,
+// but it is technically not guaranteed. Unfortunately, Win32 provides no way
+// whatsoever to statically-initialize its synchronization primitives, so our
+// only choice is to assume that dynamic initialization is single-threaded.
+
+#ifndef V8_BASE_ONCE_H_
+#define V8_BASE_ONCE_H_
+
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace base {
+
+typedef AtomicWord OnceType;
+
+#define V8_ONCE_INIT 0
+
+#define V8_DECLARE_ONCE(NAME) ::v8::base::OnceType NAME
+
+enum {
+ ONCE_STATE_UNINITIALIZED = 0,
+ ONCE_STATE_EXECUTING_FUNCTION = 1,
+ ONCE_STATE_DONE = 2
+};
+
+typedef void (*NoArgFunction)();
+typedef void (*PointerArgFunction)(void* arg);
+
+template <typename T>
+struct OneArgFunction {
+ typedef void (*type)(T);
+};
+
+void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+
+inline void CallOnce(OnceType* once, NoArgFunction init_func) {
+ if (Acquire_Load(once) != ONCE_STATE_DONE) {
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
+ }
+}
+
+
+template <typename Arg>
+inline void CallOnce(OnceType* once,
+ typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+ if (Acquire_Load(once) != ONCE_STATE_DONE) {
+ CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
+ static_cast<void*>(arg));
+ }
+}
+
+} } // namespace v8::base
+
+#endif // V8_BASE_ONCE_H_
diff --git a/chromium/v8/src/base/safe_conversions.h b/chromium/v8/src/base/safe_conversions.h
new file mode 100644
index 00000000000..0a1bd696469
--- /dev/null
+++ b/chromium/v8/src/base/safe_conversions.h
@@ -0,0 +1,67 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_H_
+#define V8_BASE_SAFE_CONVERSIONS_H_
+
+#include <limits>
+
+#include "src/base/safe_conversions_impl.h"
+
+namespace v8 {
+namespace base {
+
+// Convenience function that returns true if the supplied value is in range
+// for the destination type.
+template <typename Dst, typename Src>
+inline bool IsValueInRangeForNumericType(Src value) {
+ return internal::DstRangeRelationToSrcRange<Dst>(value) ==
+ internal::RANGE_VALID;
+}
+
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst, typename Src>
+inline Dst checked_cast(Src value) {
+ CHECK(IsValueInRangeForNumericType<Dst>(value));
+ return static_cast<Dst>(value);
+}
+
+// saturated_cast<> is analogous to static_cast<> for numeric types, except
+// that the specified numeric conversion will saturate rather than overflow or
+// underflow. NaN assignment to an integral will trigger a CHECK condition.
+template <typename Dst, typename Src>
+inline Dst saturated_cast(Src value) {
+ // Optimization for floating point values, which already saturate.
+ if (std::numeric_limits<Dst>::is_iec559)
+ return static_cast<Dst>(value);
+
+ switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
+ case internal::RANGE_VALID:
+ return static_cast<Dst>(value);
+
+ case internal::RANGE_UNDERFLOW:
+ return std::numeric_limits<Dst>::min();
+
+ case internal::RANGE_OVERFLOW:
+ return std::numeric_limits<Dst>::max();
+
+ // Should fail only on attempting to assign NaN to a saturated integer.
+ case internal::RANGE_INVALID:
+ CHECK(false);
+ return std::numeric_limits<Dst>::max();
+ }
+
+ UNREACHABLE();
+ return static_cast<Dst>(value);
+}
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_H_
diff --git a/chromium/v8/src/base/safe_conversions_impl.h b/chromium/v8/src/base/safe_conversions_impl.h
new file mode 100644
index 00000000000..2226f17aebd
--- /dev/null
+++ b/chromium/v8/src/base/safe_conversions_impl.h
@@ -0,0 +1,220 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+#define V8_BASE_SAFE_CONVERSIONS_IMPL_H_
+
+#include <limits>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// The std library doesn't provide a binary max_exponent for integers, however
+// we can compute one by adding one to the number of non-sign bits. This allows
+// for accurate range comparisons between floating point and integer types.
+template <typename NumericType>
+struct MaxExponent {
+ static const int value = std::numeric_limits<NumericType>::is_iec559
+ ? std::numeric_limits<NumericType>::max_exponent
+ : (sizeof(NumericType) * 8 + 1 -
+ std::numeric_limits<NumericType>::is_signed);
+};
+
+enum IntegerRepresentation {
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED
+};
+
+// A range for a given nunmeric Src type is contained for a given numeric Dst
+// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
+// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// We implement this as template specializations rather than simple static
+// comparisons to ensure type correctness in our comparisons.
+enum NumericRangeRepresentation {
+ NUMERIC_RANGE_NOT_CONTAINED,
+ NUMERIC_RANGE_CONTAINED
+};
+
+// Helper templates to statically determine if our destination type can contain
+// maximum and minimum values represented by the source type.
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign =
+ std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED >
+struct StaticDstRangeRelationToSrcRange;
+
+// Same sign: Dst is guaranteed to contain Src only if its range is equal or
+// larger.
+template <typename Dst, typename Src, IntegerRepresentation Sign>
+struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Unsigned to signed: Dst is guaranteed to contain source only if its range is
+// larger.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED> {
+ static const NumericRangeRepresentation value =
+ MaxExponent<Dst>::value > MaxExponent<Src>::value
+ ? NUMERIC_RANGE_CONTAINED
+ : NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+// Signed to unsigned: Dst cannot be statically determined to contain Src.
+template <typename Dst, typename Src>
+struct StaticDstRangeRelationToSrcRange<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED> {
+ static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
+};
+
+enum RangeConstraint {
+ RANGE_VALID = 0x0, // Value can be represented by the destination type.
+ RANGE_UNDERFLOW = 0x1, // Value would overflow.
+ RANGE_OVERFLOW = 0x2, // Value would underflow.
+ RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+};
+
+// Helper function for coercing an int back to a RangeContraint.
+inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+ // TODO(jochen/jkummerow): Re-enable this when checks.h is available in base.
+ // ASSERT(integer_range_constraint >= RANGE_VALID &&
+ // integer_range_constraint <= RANGE_INVALID);
+ return static_cast<RangeConstraint>(integer_range_constraint);
+}
+
+// This function creates a RangeConstraint from an upper and lower bound
+// check by taking advantage of the fact that only NaN can be out of range in
+// both directions at once.
+inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+ bool is_in_lower_bound) {
+ return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
+ (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
+}
+
+template <
+ typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+struct DstRangeRelationToSrcRangeImpl;
+
+// The following templates are for ranges that must be verified at runtime. We
+// split it into checks based on signedness to avoid confusing casts and
+// compiler warnings on signed an unsigned comparisons.
+
+// Dst range is statically determined to contain Src: Nothing to check.
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign,
+ IntegerRepresentation SrcSign>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ DstSign,
+ SrcSign,
+ NUMERIC_RANGE_CONTAINED> {
+ static RangeConstraint Check(Src value) { return RANGE_VALID; }
+};
+
+// Signed to signed narrowing: Both the upper and lower boundaries may be
+// exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return std::numeric_limits<Dst>::is_iec559
+ ? GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+ value >= -std::numeric_limits<Dst>::max())
+ : GetRangeConstraint(value <= std::numeric_limits<Dst>::max(),
+ value >= std::numeric_limits<Dst>::min());
+ }
+};
+
+// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return GetRangeConstraint(value <= std::numeric_limits<Dst>::max(), true);
+ }
+};
+
+// Unsigned to signed: The upper boundary may be exceeded.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_SIGNED,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return sizeof(Dst) > sizeof(Src)
+ ? RANGE_VALID
+ : GetRangeConstraint(
+ value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+ true);
+ }
+};
+
+// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
+// and any negative value exceeds the lower boundary.
+template <typename Dst, typename Src>
+struct DstRangeRelationToSrcRangeImpl<Dst,
+ Src,
+ INTEGER_REPRESENTATION_UNSIGNED,
+ INTEGER_REPRESENTATION_SIGNED,
+ NUMERIC_RANGE_NOT_CONTAINED> {
+ static RangeConstraint Check(Src value) {
+ return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
+ ? GetRangeConstraint(true, value >= static_cast<Src>(0))
+ : GetRangeConstraint(
+ value <= static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value >= static_cast<Src>(0));
+ }
+};
+
+template <typename Dst, typename Src>
+inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
+ // Both source and destination must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ STATIC_ASSERT(std::numeric_limits<Dst>::is_specialized);
+ return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+}
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_CONVERSIONS_IMPL_H_
diff --git a/chromium/v8/src/base/safe_math.h b/chromium/v8/src/base/safe_math.h
new file mode 100644
index 00000000000..62a2f723f2b
--- /dev/null
+++ b/chromium/v8/src/base/safe_math.h
@@ -0,0 +1,276 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_H_
+#define V8_BASE_SAFE_MATH_H_
+
+#include "src/base/safe_math_impl.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+// CheckedNumeric implements all the logic and operators for detecting integer
+// boundary conditions such as overflow, underflow, and invalid conversions.
+// The CheckedNumeric type implicitly converts from floating point and integer
+// data types, and contains overloads for basic arithmetic operations (i.e.: +,
+// -, *, /, %).
+//
+// The following methods convert from CheckedNumeric to standard numeric values:
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+// has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+// call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+// state is not valid.
+// ValueFloating() - Returns the underlying floating point value (valid only
+// only for floating point CheckedNumeric types).
+//
+// Bitwise operations are explicitly not supported, because correct
+// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
+// operations are explicitly not supported because they could result in a crash
+// on a CHECK condition. You should use patterns like the following for these
+// operations:
+// Bitwise operation:
+// CheckedNumeric<int> checked_int = untrusted_input_value;
+// int x = checked_int.ValueOrDefault(0) | kFlagValues;
+// Comparison:
+// CheckedNumeric<size_t> checked_size;
+// CheckedNumeric<int> checked_size = untrusted_input_value;
+// checked_size = checked_size + HEADER LENGTH;
+// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
+// Do stuff...
+template <typename T>
+class CheckedNumeric {
+ public:
+ typedef T type;
+
+ CheckedNumeric() {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+
+ template <typename Src>
+ CheckedNumeric(Src value, RangeConstraint validity)
+ : state_(value, validity) {}
+
+ // This is not an explicit constructor because we implicitly upgrade regular
+ // numerics to CheckedNumerics to make them easier to use.
+ template <typename Src>
+ CheckedNumeric(Src value) // NOLINT
+ : state_(value) {
+ // Argument must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ }
+
+ // IsValid() is the public API to test if a CheckedNumeric is currently valid.
+ bool IsValid() const { return validity() == RANGE_VALID; }
+
+ // ValueOrDie() The primary accessor for the underlying value. If the current
+ // state is not valid it will CHECK and crash.
+ T ValueOrDie() const {
+ CHECK(IsValid());
+ return state_.value();
+ }
+
+ // ValueOrDefault(T default_value) A convenience method that returns the
+ // current value if the state is valid, and the supplied default_value for
+ // any other state.
+ T ValueOrDefault(T default_value) const {
+ return IsValid() ? state_.value() : default_value;
+ }
+
+ // ValueFloating() - Since floating point values include their validity state,
+ // we provide an easy method for extracting them directly, without a risk of
+ // crashing on a CHECK.
+ T ValueFloating() const {
+ // Argument must be a floating-point value.
+ STATIC_ASSERT(std::numeric_limits<T>::is_iec559);
+ return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+ }
+
+ // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
+ // tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: current validity state (i.e. valid, overflow, underflow, nan).
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ RangeConstraint validity() const { return state_.validity(); }
+
+ // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
+ // for tests and to avoid a big matrix of friend operator overloads. But the
+ // values it returns are likely to change in the future.
+ // Returns: the raw numeric value, regardless of the current state.
+ // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
+ // saturation/wrapping so we can expose this state consistently and implement
+ // saturated arithmetic.
+ T ValueUnsafe() const { return state_.value(); }
+
+ // Prototypes for the supported arithmetic operator overloads.
+ template <typename Src> CheckedNumeric& operator+=(Src rhs);
+ template <typename Src> CheckedNumeric& operator-=(Src rhs);
+ template <typename Src> CheckedNumeric& operator*=(Src rhs);
+ template <typename Src> CheckedNumeric& operator/=(Src rhs);
+ template <typename Src> CheckedNumeric& operator%=(Src rhs);
+
+ CheckedNumeric operator-() const {
+ RangeConstraint validity;
+ T value = CheckedNeg(state_.value(), &validity);
+ // Negation is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ CheckedNumeric Abs() const {
+ RangeConstraint validity;
+ T value = CheckedAbs(state_.value(), &validity);
+ // Absolute value is always valid for floating point.
+ if (std::numeric_limits<T>::is_iec559)
+ return CheckedNumeric<T>(value);
+
+ validity = GetRangeConstraint(state_.validity() | validity);
+ return CheckedNumeric<T>(value, validity);
+ }
+
+ CheckedNumeric& operator++() {
+ *this += 1;
+ return *this;
+ }
+
+ CheckedNumeric operator++(int) {
+ CheckedNumeric value = *this;
+ *this += 1;
+ return value;
+ }
+
+ CheckedNumeric& operator--() {
+ *this -= 1;
+ return *this;
+ }
+
+ CheckedNumeric operator--(int) {
+ CheckedNumeric value = *this;
+ *this -= 1;
+ return value;
+ }
+
+ // These static methods behave like a convenience cast operator targeting
+ // the desired CheckedNumeric type. As an optimization, a reference is
+ // returned when Src is the same type as T.
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ Src u,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0) {
+ return u;
+ }
+
+ template <typename Src>
+ static CheckedNumeric<T> cast(
+ const CheckedNumeric<Src>& u,
+ typename enable_if<!is_same<Src, T>::value, int>::type = 0) {
+ return u;
+ }
+
+ static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+
+ private:
+ CheckedNumericState<T> state_;
+};
+
+// This is the boilerplate for the standard arithmetic operator overloads. A
+// macro isn't the prettiest solution, but it beats rewriting these five times.
+// Some details worth noting are:
+// * We apply the standard arithmetic promotions.
+// * We skip range checks for floating points.
+// * We skip range checks for destination integers with sufficient range.
+// TODO(jschuh): extract these out into templates.
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+ /* Binary arithmetic operator for CheckedNumerics of the same type. */ \
+ template <typename T> \
+ CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T>::type Promotion; \
+ /* Floating point always takes the fast path */ \
+ if (std::numeric_limits<T>::is_iec559) \
+ return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
+ if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ RangeConstraint validity = RANGE_VALID; \
+ T result = Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \
+ static_cast<Promotion>(rhs.ValueUnsafe()), \
+ &validity); \
+ return CheckedNumeric<Promotion>( \
+ result, \
+ GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \
+ } \
+ /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
+ template <typename T> \
+ template <typename Src> \
+ CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
+ *this = CheckedNumeric<T>::cast(*this) OP CheckedNumeric<Src>::cast(rhs); \
+ return *this; \
+ } \
+ /* Binary arithmetic operator for CheckedNumeric of different type. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>( \
+ lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
+ GetRangeConstraint(rhs.validity() | lhs.validity())); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ const CheckedNumeric<T>& lhs, Src rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \
+ lhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ } \
+ /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \
+ template <typename T, typename Src> \
+ CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
+ Src lhs, const CheckedNumeric<T>& rhs) { \
+ typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
+ if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
+ return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \
+ rhs.validity()); \
+ return CheckedNumeric<Promotion>::cast(lhs) \
+ OP CheckedNumeric<Promotion>::cast(rhs); \
+ }
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+
+#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+
+} // namespace internal
+
+using internal::CheckedNumeric;
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_MATH_H_
diff --git a/chromium/v8/src/base/safe_math_impl.h b/chromium/v8/src/base/safe_math_impl.h
new file mode 100644
index 00000000000..055e2a02750
--- /dev/null
+++ b/chromium/v8/src/base/safe_math_impl.h
@@ -0,0 +1,531 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_SAFE_MATH_IMPL_H_
+#define V8_BASE_SAFE_MATH_IMPL_H_
+
+#include <stdint.h>
+
+#include <cmath>
+#include <cstdlib>
+#include <limits>
+
+#include "src/base/macros.h"
+#include "src/base/safe_conversions.h"
+
+namespace v8 {
+namespace base {
+namespace internal {
+
+
+// From Chromium's base/template_util.h:
+
+template<class T, T v>
+struct integral_constant {
+ static const T value = v;
+ typedef T value_type;
+ typedef integral_constant<T, v> type;
+};
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true> true_type;
+typedef integral_constant<bool, false> false_type;
+
+template <class T, class U> struct is_same : public false_type {};
+template <class T> struct is_same<T, T> : true_type {};
+
+template<bool B, class T = void>
+struct enable_if {};
+
+template<class T>
+struct enable_if<true, T> { typedef T type; };
+
+// </template_util.h>
+
+
+// Everything from here up to the floating point operations is portable C++,
+// but it may not be fast. This code could be split based on
+// platform/architecture and replaced with potentially faster implementations.
+
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForSizeAndSign;
+template <>
+struct IntegerForSizeAndSign<1, true> {
+ typedef int8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<1, false> {
+ typedef uint8_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, true> {
+ typedef int16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<2, false> {
+ typedef uint16_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, true> {
+ typedef int32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<4, false> {
+ typedef uint32_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, true> {
+ typedef int64_t type;
+};
+template <>
+struct IntegerForSizeAndSign<8, false> {
+ typedef uint64_t type;
+};
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+
+template <typename Integer>
+struct UnsignedIntegerForSize {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
+};
+
+template <typename Integer>
+struct SignedIntegerForSize {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
+};
+
+template <typename Integer>
+struct TwiceWiderInteger {
+ typedef typename enable_if<
+ std::numeric_limits<Integer>::is_integer,
+ typename IntegerForSizeAndSign<
+ sizeof(Integer) * 2,
+ std::numeric_limits<Integer>::is_signed>::type>::type type;
+};
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const typename enable_if<std::numeric_limits<Integer>::is_integer,
+ size_t>::type value = 8 * sizeof(Integer) - 1;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename T>
+bool HasSignBit(T x) {
+ // Cast to unsigned since right shift on signed is undefined.
+ return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
+ PositionOfSignBit<T>::value);
+}
+
+// This wrapper undoes the standard integer promotions.
+template <typename T>
+T BinaryComplement(T x) {
+ return ~x;
+}
+
+// Here are the actual portable checked integer math implementations.
+// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
+// way to coalesce things into the CheckedNumericState specializations below.
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedAdd(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = ux + uy;
+ // Addition is valid if the sign of (x + y) is equal to either that of x or
+ // that of y.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy))))
+ *validity = RANGE_VALID;
+ else // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+ } else { // Unsigned is either valid or overflow.
+ *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer, T>::type
+CheckedSub(T x, T y, RangeConstraint* validity) {
+ // Since the value of x+y is undefined if we have a signed type, we compute
+ // it using the unsigned type of the same size.
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ UnsignedDst ux = static_cast<UnsignedDst>(x);
+ UnsignedDst uy = static_cast<UnsignedDst>(y);
+ UnsignedDst uresult = ux - uy;
+ // Subtraction is valid if either x and y have same sign, or (x-y) and x have
+ // the same sign.
+ if (std::numeric_limits<T>::is_signed) {
+ if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy))))
+ *validity = RANGE_VALID;
+ else // Direction of wrap is inverse of result sign.
+ *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
+
+ } else { // Unsigned is either valid or underflow.
+ *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+ }
+ return static_cast<T>(uresult);
+}
+
+// Integer multiplication is a bit complicated. In the fast case we just
+// we just promote to a twice wider type, and range check the result. In the
+// slow case we need to manually check that the result won't be truncated by
+// checking with division against the appropriate bound.
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && sizeof(T) * 2 <= sizeof(uintmax_t),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ typedef typename TwiceWiderInteger<T>::type IntermediateType;
+ IntermediateType tmp =
+ static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
+ *validity = DstRangeRelationToSrcRange<T>(tmp);
+ return static_cast<T>(tmp);
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ // if either side is zero then the result will be zero.
+ if (!(x || y)) {
+ return RANGE_VALID;
+
+ } else if (x > 0) {
+ if (y > 0)
+ *validity =
+ x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
+ else
+ *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+
+ } else {
+ if (y > 0)
+ *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
+ : RANGE_UNDERFLOW;
+ else
+ *validity =
+ y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+ }
+
+ return x * y;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed &&
+ (sizeof(T) * 2 > sizeof(uintmax_t)),
+ T>::type
+CheckedMul(T x, T y, RangeConstraint* validity) {
+ *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
+ ? RANGE_VALID
+ : RANGE_OVERFLOW;
+ return x * y;
+}
+
+// Division just requires a check for an invalid negation on signed min/-1.
+template <typename T>
+T CheckedDiv(
+ T x,
+ T y,
+ RangeConstraint* validity,
+ typename enable_if<std::numeric_limits<T>::is_integer, int>::type = 0) {
+ if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
+ y == static_cast<T>(-1)) {
+ *validity = RANGE_OVERFLOW;
+ return std::numeric_limits<T>::min();
+ }
+
+ *validity = RANGE_VALID;
+ return x / y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
+ return x % y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedMod(T x, T y, RangeConstraint* validity) {
+ *validity = RANGE_VALID;
+ return x % y;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ // The negation of signed min is min, so catch that one.
+ return -value;
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedNeg(T value, RangeConstraint* validity) {
+ // The only legal unsigned negation is zero.
+ *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
+ return static_cast<T>(
+ -static_cast<typename SignedIntegerForSize<T>::type>(value));
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ *validity =
+ value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ return std::abs(value);
+}
+
+template <typename T>
+typename enable_if<
+ std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedAbs(T value, RangeConstraint* validity) {
+ // Absolute value of a positive is just its identiy.
+ *validity = RANGE_VALID;
+ return value;
+}
+
+// These are the floating point stubs that the compiler needs to see. Only the
+// negation operation is ever called.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
+ template <typename T> \
+ typename enable_if<std::numeric_limits<T>::is_iec559, T>::type \
+ Checked##NAME(T, T, RangeConstraint*) { \
+ UNREACHABLE(); \
+ return 0; \
+ }
+
+BASE_FLOAT_ARITHMETIC_STUBS(Add)
+BASE_FLOAT_ARITHMETIC_STUBS(Sub)
+BASE_FLOAT_ARITHMETIC_STUBS(Mul)
+BASE_FLOAT_ARITHMETIC_STUBS(Div)
+BASE_FLOAT_ARITHMETIC_STUBS(Mod)
+
+#undef BASE_FLOAT_ARITHMETIC_STUBS
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+ T value,
+ RangeConstraint*) {
+ return -value;
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+ T value,
+ RangeConstraint*) {
+ return std::abs(value);
+}
+
+// Floats carry around their validity state with them, but integers do not. So,
+// we wrap the underlying value in a specialization in order to hide that detail
+// and expose an interface via accessors.
+enum NumericRepresentation {
+ NUMERIC_INTEGER,
+ NUMERIC_FLOATING,
+ NUMERIC_UNKNOWN
+};
+
+template <typename NumericType>
+struct GetNumericRepresentation {
+ static const NumericRepresentation value =
+ std::numeric_limits<NumericType>::is_integer
+ ? NUMERIC_INTEGER
+ : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
+};
+
+template <typename T, NumericRepresentation type =
+ GetNumericRepresentation<T>::value>
+class CheckedNumericState {};
+
+// Integrals require quite a bit of additional housekeeping to manage state.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_INTEGER> {
+ private:
+ T value_;
+ RangeConstraint validity_;
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+
+ template <typename Src>
+ CheckedNumericState(Src value, RangeConstraint validity)
+ : value_(value),
+ validity_(GetRangeConstraint(validity |
+ DstRangeRelationToSrcRange<T>(value))) {
+ // Argument must be numeric.
+ STATIC_ASSERT(std::numeric_limits<Src>::is_specialized);
+ }
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())),
+ validity_(GetRangeConstraint(
+ rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0)
+ : value_(static_cast<T>(value)),
+ validity_(DstRangeRelationToSrcRange<T>(value)) {}
+
+ RangeConstraint validity() const { return validity_; }
+ T value() const { return value_; }
+};
+
+// Floating points maintain their own validity, but need translation wrappers.
+template <typename T>
+class CheckedNumericState<T, NUMERIC_FLOATING> {
+ private:
+ T value_;
+
+ public:
+ template <typename Src, NumericRepresentation type>
+ friend class CheckedNumericState;
+
+ CheckedNumericState() : value_(0.0) {}
+
+ template <typename Src>
+ CheckedNumericState(
+ Src value,
+ RangeConstraint validity,
+ typename enable_if<std::numeric_limits<Src>::is_integer, int>::type = 0) {
+ switch (DstRangeRelationToSrcRange<T>(value)) {
+ case RANGE_VALID:
+ value_ = static_cast<T>(value);
+ break;
+
+ case RANGE_UNDERFLOW:
+ value_ = -std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_OVERFLOW:
+ value_ = std::numeric_limits<T>::infinity();
+ break;
+
+ case RANGE_INVALID:
+ value_ = std::numeric_limits<T>::quiet_NaN();
+ break;
+ }
+ }
+
+ template <typename Src>
+ explicit CheckedNumericState(
+ Src value,
+ typename enable_if<std::numeric_limits<Src>::is_specialized, int>::type =
+ 0)
+ : value_(static_cast<T>(value)) {}
+
+ // Copy constructor.
+ template <typename Src>
+ CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(static_cast<T>(rhs.value())) {}
+
+ RangeConstraint validity() const {
+ return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
+ value_ >= -std::numeric_limits<T>::max());
+ }
+ T value() const { return value_; }
+};
+
+// For integers less than 128-bit and floats 32-bit or larger, we can distil
+// C/C++ arithmetic promotions down to two simple rules:
+// 1. The type with the larger maximum exponent always takes precedence.
+// 2. The resulting type must be promoted to at least an int.
+// The following template specializations implement that promotion logic.
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION,
+ RIGHT_PROMOTION,
+ DEFAULT_PROMOTION
+};
+
+template <typename Lhs,
+ typename Rhs = Lhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? (MaxExponent<Lhs>::value > MaxExponent<int>::value
+ ? LEFT_PROMOTION
+ : DEFAULT_PROMOTION)
+ : (MaxExponent<Rhs>::value > MaxExponent<int>::value
+ ? RIGHT_PROMOTION
+ : DEFAULT_PROMOTION) >
+struct ArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ typedef Lhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ typedef Rhs type;
+};
+
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> {
+ typedef int type;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value = !std::numeric_limits<T>::is_iec559 &&
+ StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Lhs)) &&
+ StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Rhs));
+};
+
+} // namespace internal
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_SAFE_MATH_IMPL_H_
diff --git a/chromium/v8/src/base/win32-headers.h b/chromium/v8/src/base/win32-headers.h
new file mode 100644
index 00000000000..5432de165c3
--- /dev/null
+++ b/chromium/v8/src/base/win32-headers.h
@@ -0,0 +1,79 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_WIN32_HEADERS_H_
+#define V8_BASE_WIN32_HEADERS_H_
+
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+// Require Windows XP or higher (this is required for the RtlCaptureContext
+// function to be present).
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif
+
+#include <windows.h>
+
+#include <signal.h> // For raise().
+#include <time.h> // For LocalOffset() implementation.
+#include <mmsystem.h> // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif // __MINGW32__
+#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#include <dbghelp.h> // For SymLoadModule64 and al.
+#include <errno.h> // For STRUNCATE
+#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#include <limits.h> // For INT_MAX and al.
+#include <tlhelp32.h> // For Module32First and al.
+
+// These additional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#ifndef __MINGW32__
+#include <wspiapi.h>
+#endif // __MINGW32__
+#include <process.h> // For _beginthreadex().
+#include <stdlib.h>
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef UNKNOWN
+#undef NONE
+#undef ANY
+#undef IGNORE
+#undef STRICT
+#undef GetObject
+#undef CreateSemaphore
+#undef Yield
+
+#endif // V8_BASE_WIN32_HEADERS_H_