aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/3rdparty/double-conversion/README6
-rw-r--r--src/3rdparty/double-conversion/bignum-dtoa.cc640
-rw-r--r--src/3rdparty/double-conversion/bignum-dtoa.h84
-rw-r--r--src/3rdparty/double-conversion/bignum.cc764
-rw-r--r--src/3rdparty/double-conversion/bignum.h145
-rw-r--r--src/3rdparty/double-conversion/cached-powers.cc175
-rw-r--r--src/3rdparty/double-conversion/cached-powers.h64
-rw-r--r--src/3rdparty/double-conversion/diy-fp.cc57
-rw-r--r--src/3rdparty/double-conversion/diy-fp.h118
-rw-r--r--src/3rdparty/double-conversion/double-conversion.cc889
-rw-r--r--src/3rdparty/double-conversion/double-conversion.h536
-rw-r--r--src/3rdparty/double-conversion/double-conversion.pri4
-rw-r--r--src/3rdparty/double-conversion/fast-dtoa.cc664
-rw-r--r--src/3rdparty/double-conversion/fast-dtoa.h88
-rw-r--r--src/3rdparty/double-conversion/fixed-dtoa.cc402
-rw-r--r--src/3rdparty/double-conversion/fixed-dtoa.h56
-rw-r--r--src/3rdparty/double-conversion/ieee.h398
-rw-r--r--src/3rdparty/double-conversion/strtod.cc554
-rw-r--r--src/3rdparty/double-conversion/strtod.h45
-rw-r--r--src/3rdparty/double-conversion/utils.h313
-rw-r--r--src/3rdparty/masm/WeakRandom.h52
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.cpp444
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.h1129
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.cpp36
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.h2790
-rw-r--r--src/3rdparty/masm/assembler/AbstractMacroAssembler.h842
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h342
-rw-r--r--src/3rdparty/masm/assembler/CodeLocation.h218
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.cpp230
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.h297
-rw-r--r--src/3rdparty/masm/assembler/MIPSAssembler.h1107
-rw-r--r--src/3rdparty/masm/assembler/MacroAssembler.h1465
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.cpp99
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.h1386
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARMv7.h1914
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h406
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerMIPS.h2751
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp52
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.h2293
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86.h314
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86Common.h1541
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86_64.h643
-rw-r--r--src/3rdparty/masm/assembler/RepatchBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/SH4Assembler.h2152
-rw-r--r--src/3rdparty/masm/assembler/X86Assembler.h2540
-rw-r--r--src/3rdparty/masm/config.h56
-rw-r--r--src/3rdparty/masm/create_regex_tables121
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.cpp43
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.h52
-rw-r--r--src/3rdparty/masm/disassembler/UDis86Disassembler.cpp63
-rw-r--r--src/3rdparty/masm/disassembler/udis86/differences.txt24
-rw-r--r--src/3rdparty/masm/disassembler/udis86/itab.py360
-rw-r--r--src/3rdparty/masm/disassembler/udis86/optable.xml8959
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_opcode.py235
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_optable.py103
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.c182
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.h33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.c1141
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.h258
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_extern.h88
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.c262
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.h67
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c252
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c278
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.c86
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.h47
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_types.h238
-rw-r--r--src/3rdparty/masm/jit/JITCompilationEffort.h39
-rw-r--r--src/3rdparty/masm/masm-defs.pri28
-rw-r--r--src/3rdparty/masm/masm.pri86
-rw-r--r--src/3rdparty/masm/runtime/MatchResult.h71
-rw-r--r--src/3rdparty/masm/stubs/ExecutableAllocator.h120
-rw-r--r--src/3rdparty/masm/stubs/JSGlobalData.h65
-rw-r--r--src/3rdparty/masm/stubs/LLIntData.h0
-rw-r--r--src/3rdparty/masm/stubs/Options.h53
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.cpp131
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.h50
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastAllocBase.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastMalloc.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/Noncopyable.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/OwnPtr.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassOwnPtr.h120
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassRefPtr.h101
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefCounted.h70
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefPtr.h93
-rw-r--r--src/3rdparty/masm/stubs/wtf/TypeTraits.h58
-rw-r--r--src/3rdparty/masm/stubs/wtf/UnusedParam.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/Vector.h104
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/CString.h44
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/WTFString.h75
-rw-r--r--src/3rdparty/masm/stubs/wtf/unicode/Unicode.h59
-rw-r--r--src/3rdparty/masm/wtf/ASCIICType.h181
-rw-r--r--src/3rdparty/masm/wtf/Assertions.h428
-rw-r--r--src/3rdparty/masm/wtf/Atomics.h227
-rw-r--r--src/3rdparty/masm/wtf/BumpPointerAllocator.h252
-rw-r--r--src/3rdparty/masm/wtf/CheckedArithmetic.h721
-rw-r--r--src/3rdparty/masm/wtf/Compiler.h302
-rw-r--r--src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h45
-rw-r--r--src/3rdparty/masm/wtf/DataLog.h128
-rw-r--r--src/3rdparty/masm/wtf/DynamicAnnotations.h96
-rw-r--r--src/3rdparty/masm/wtf/EnumClass.h134
-rw-r--r--src/3rdparty/masm/wtf/FeatureDefines.h874
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.cpp64
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.h62
-rw-r--r--src/3rdparty/masm/wtf/Locker.h48
-rw-r--r--src/3rdparty/masm/wtf/MathExtras.h459
-rw-r--r--src/3rdparty/masm/wtf/NotFound.h37
-rw-r--r--src/3rdparty/masm/wtf/NullPtr.h56
-rw-r--r--src/3rdparty/masm/wtf/OSAllocator.h115
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorPosix.cpp193
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorWin.cpp84
-rw-r--r--src/3rdparty/masm/wtf/PageAllocation.h120
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.cpp85
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.h70
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.cpp78
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.h88
-rw-r--r--src/3rdparty/masm/wtf/PageReservation.h149
-rw-r--r--src/3rdparty/masm/wtf/Platform.h1019
-rw-r--r--src/3rdparty/masm/wtf/PossiblyNull.h59
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.cpp114
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.h300
-rw-r--r--src/3rdparty/masm/wtf/RawPointer.h58
-rw-r--r--src/3rdparty/masm/wtf/StdLibExtras.h282
-rw-r--r--src/3rdparty/masm/wtf/VMTags.h75
-rw-r--r--src/3rdparty/masm/yarr/Yarr.h69
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp463
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h138
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js219
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.cpp1959
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.h380
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.cpp2702
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.h141
-rw-r--r--src/3rdparty/masm/yarr/YarrParser.h880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.cpp880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.h401
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp59
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.h38
-rw-r--r--src/3rdparty/masm/yarr/yarr.pri12
-rw-r--r--src/qml/qml/v4vm/debugging.cpp308
-rw-r--r--src/qml/qml/v4vm/debugging.h157
-rw-r--r--src/qml/qml/v4vm/llvm_installation.pri23
-rw-r--r--src/qml/qml/v4vm/llvm_runtime.cpp513
-rw-r--r--src/qml/qml/v4vm/moth/moth.pri13
-rw-r--r--src/qml/qml/v4vm/moth/qv4instr_moth.cpp15
-rw-r--r--src/qml/qml/v4vm/moth/qv4instr_moth_p.h527
-rw-r--r--src/qml/qml/v4vm/moth/qv4isel_moth.cpp812
-rw-r--r--src/qml/qml/v4vm/moth/qv4isel_moth_p.h169
-rw-r--r--src/qml/qml/v4vm/moth/qv4vme_moth.cpp532
-rw-r--r--src/qml/qml/v4vm/moth/qv4vme_moth_p.h35
-rw-r--r--src/qml/qml/v4vm/qcalculatehash_p.h73
-rw-r--r--src/qml/qml/v4vm/qv4_llvm_p.h65
-rw-r--r--src/qml/qml/v4vm/qv4alloca_p.h54
-rw-r--r--src/qml/qml/v4vm/qv4argumentsobject.cpp176
-rw-r--r--src/qml/qml/v4vm/qv4argumentsobject.h99
-rw-r--r--src/qml/qml/v4vm/qv4arrayobject.cpp859
-rw-r--r--src/qml/qml/v4vm/qv4arrayobject.h103
-rw-r--r--src/qml/qml/v4vm/qv4booleanobject.cpp97
-rw-r--r--src/qml/qml/v4vm/qv4booleanobject.h79
-rw-r--r--src/qml/qml/v4vm/qv4codegen.cpp3256
-rw-r--r--src/qml/qml/v4vm/qv4codegen_p.h444
-rw-r--r--src/qml/qml/v4vm/qv4context.cpp576
-rw-r--r--src/qml/qml/v4vm/qv4context.h194
-rw-r--r--src/qml/qml/v4vm/qv4dateobject.cpp1316
-rw-r--r--src/qml/qml/v4vm/qv4dateobject.h132
-rw-r--r--src/qml/qml/v4vm/qv4engine.cpp551
-rw-r--r--src/qml/qml/v4vm/qv4engine.h271
-rw-r--r--src/qml/qml/v4vm/qv4errorobject.cpp238
-rw-r--r--src/qml/qml/v4vm/qv4errorobject.h235
-rw-r--r--src/qml/qml/v4vm/qv4executableallocator.cpp208
-rw-r--r--src/qml/qml/v4vm/qv4executableallocator.h121
-rw-r--r--src/qml/qml/v4vm/qv4functionobject.cpp522
-rw-r--r--src/qml/qml/v4vm/qv4functionobject.h243
-rw-r--r--src/qml/qml/v4vm/qv4global.h169
-rw-r--r--src/qml/qml/v4vm/qv4globalobject.cpp731
-rw-r--r--src/qml/qml/v4vm/qv4globalobject.h93
-rw-r--r--src/qml/qml/v4vm/qv4identifier.h111
-rw-r--r--src/qml/qml/v4vm/qv4internalclass.cpp188
-rw-r--r--src/qml/qml/v4vm/qv4internalclass.h91
-rw-r--r--src/qml/qml/v4vm/qv4isel_llvm.cpp1375
-rw-r--r--src/qml/qml/v4vm/qv4isel_llvm_p.h177
-rw-r--r--src/qml/qml/v4vm/qv4isel_masm.cpp1252
-rw-r--r--src/qml/qml/v4vm/qv4isel_masm_p.h894
-rw-r--r--src/qml/qml/v4vm/qv4isel_p.cpp398
-rw-r--r--src/qml/qml/v4vm/qv4isel_p.h158
-rw-r--r--src/qml/qml/v4vm/qv4isel_util_p.h77
-rw-r--r--src/qml/qml/v4vm/qv4jsir.cpp948
-rw-r--r--src/qml/qml/v4vm/qv4jsir_p.h821
-rw-r--r--src/qml/qml/v4vm/qv4jsonobject.cpp936
-rw-r--r--src/qml/qml/v4vm/qv4jsonobject.h65
-rw-r--r--src/qml/qml/v4vm/qv4lookup.cpp332
-rw-r--r--src/qml/qml/v4vm/qv4lookup.h144
-rw-r--r--src/qml/qml/v4vm/qv4managed.cpp187
-rw-r--r--src/qml/qml/v4vm/qv4managed.h247
-rw-r--r--src/qml/qml/v4vm/qv4math.h120
-rw-r--r--src/qml/qml/v4vm/qv4mathobject.cpp311
-rw-r--r--src/qml/qml/v4vm/qv4mathobject.h80
-rw-r--r--src/qml/qml/v4vm/qv4mm.cpp493
-rw-r--r--src/qml/qml/v4vm/qv4mm.h155
-rw-r--r--src/qml/qml/v4vm/qv4numberobject.cpp237
-rw-r--r--src/qml/qml/v4vm/qv4numberobject.h83
-rw-r--r--src/qml/qml/v4vm/qv4object.cpp1177
-rw-r--r--src/qml/qml/v4vm/qv4object.h417
-rw-r--r--src/qml/qml/v4vm/qv4objectiterator.cpp185
-rw-r--r--src/qml/qml/v4vm/qv4objectiterator.h84
-rw-r--r--src/qml/qml/v4vm/qv4objectproto.cpp565
-rw-r--r--src/qml/qml/v4vm/qv4objectproto.h104
-rw-r--r--src/qml/qml/v4vm/qv4property.h152
-rw-r--r--src/qml/qml/v4vm/qv4regexp.cpp167
-rw-r--r--src/qml/qml/v4vm/qv4regexp.h149
-rw-r--r--src/qml/qml/v4vm/qv4regexpobject.cpp256
-rw-r--r--src/qml/qml/v4vm/qv4regexpobject.h108
-rw-r--r--src/qml/qml/v4vm/qv4runtime.cpp1319
-rw-r--r--src/qml/qml/v4vm/qv4runtime.h745
-rw-r--r--src/qml/qml/v4vm/qv4sparsearray.cpp464
-rw-r--r--src/qml/qml/v4vm/qv4sparsearray.h369
-rw-r--r--src/qml/qml/v4vm/qv4string.cpp242
-rw-r--r--src/qml/qml/v4vm/qv4string.h136
-rw-r--r--src/qml/qml/v4vm/qv4stringobject.cpp726
-rw-r--r--src/qml/qml/v4vm/qv4stringobject.h108
-rw-r--r--src/qml/qml/v4vm/qv4syntaxchecker.cpp119
-rw-r--r--src/qml/qml/v4vm/qv4syntaxchecker_p.h73
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper.cpp37
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper.h27
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper_p-arm.h176
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper_p-dw2.h189
-rw-r--r--src/qml/qml/v4vm/qv4util.h74
-rw-r--r--src/qml/qml/v4vm/qv4v8.cpp2141
-rw-r--r--src/qml/qml/v4vm/qv4v8.h2581
-rw-r--r--src/qml/qml/v4vm/qv4value.cpp214
-rw-r--r--src/qml/qml/v4vm/qv4value.h572
-rw-r--r--src/qml/qml/v4vm/v4.pri8
-rw-r--r--src/qml/qml/v4vm/v4.pro177
234 files changed, 99085 insertions, 0 deletions
diff --git a/src/3rdparty/double-conversion/README b/src/3rdparty/double-conversion/README
new file mode 100644
index 0000000000..40ed4a7efd
--- /dev/null
+++ b/src/3rdparty/double-conversion/README
@@ -0,0 +1,6 @@
+This is a copy of the library for binary-decimal and decimal-binary conversion routines for IEEE doubles, taken
+from
+
+ http://code.google.com/p/double-conversion/
+
+commit e5b34421b763f7bf7e4f9081403db417d5a55a36
diff --git a/src/3rdparty/double-conversion/bignum-dtoa.cc b/src/3rdparty/double-conversion/bignum-dtoa.cc
new file mode 100644
index 0000000000..b6c2e85d17
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum-dtoa.cc
@@ -0,0 +1,640 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+
+#include "bignum-dtoa.h"
+
+#include "bignum.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+static int NormalizedExponent(uint64_t significand, int exponent) {
+ ASSERT(significand != 0);
+ while ((significand & Double::kHiddenBit) == 0) {
+ significand = significand << 1;
+ exponent = exponent - 1;
+ }
+ return exponent;
+}
+
+
+// Forward declarations:
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
+static int EstimatePower(int exponent);
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator.
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus);
+// Multiplies numerator/denominator so that its values lies in the range 1-10.
+// Returns decimal_point s.t.
+// v = numerator'/denominator' * 10^(decimal_point-1)
+// where numerator' and denominator' are the values of numerator and
+// denominator after the call to this function.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus);
+// Generates digits from the left to the right and stops when the generated
+// digits yield the shortest decimal representation of v.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length);
+// Generates 'requested_digits' after the decimal point.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length);
+// Generates 'count' digits of numerator/denominator.
+// Once 'count' digits have been produced rounds the result depending on the
+// remainder (remainders of exactly .5 round upwards). Might update the
+// decimal_point when rounding up (for example for 0.9999).
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length);
+
+
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* decimal_point) {
+ ASSERT(v > 0);
+ ASSERT(!Double(v).IsSpecial());
+ uint64_t significand;
+ int exponent;
+ bool lower_boundary_is_closer;
+ if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) {
+ float f = static_cast<float>(v);
+ ASSERT(f == v);
+ significand = Single(f).Significand();
+ exponent = Single(f).Exponent();
+ lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser();
+ } else {
+ significand = Double(v).Significand();
+ exponent = Double(v).Exponent();
+ lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser();
+ }
+ bool need_boundary_deltas =
+ (mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE);
+
+ bool is_even = (significand & 1) == 0;
+ int normalized_exponent = NormalizedExponent(significand, exponent);
+ // estimated_power might be too low by 1.
+ int estimated_power = EstimatePower(normalized_exponent);
+
+ // Shortcut for Fixed.
+ // The requested digits correspond to the digits after the point. If the
+ // number is much too small, then there is no need in trying to get any
+ // digits.
+ if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
+ buffer[0] = '\0';
+ *length = 0;
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ return;
+ }
+
+ Bignum numerator;
+ Bignum denominator;
+ Bignum delta_minus;
+ Bignum delta_plus;
+ // Make sure the bignum can grow large enough. The smallest double equals
+ // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
+ // The maximum double is 1.7976931348623157e308 which needs fewer than
+ // 308*4 binary digits.
+ ASSERT(Bignum::kMaxSignificantBits >= 324*4);
+ InitialScaledStartValues(significand, exponent, lower_boundary_is_closer,
+ estimated_power, need_boundary_deltas,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^estimated_power.
+ FixupMultiply10(estimated_power, is_even, decimal_point,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^(decimal_point-1), and
+ // 1 <= (numerator + delta_plus) / denominator < 10
+ switch (mode) {
+ case BIGNUM_DTOA_SHORTEST:
+ case BIGNUM_DTOA_SHORTEST_SINGLE:
+ GenerateShortestDigits(&numerator, &denominator,
+ &delta_minus, &delta_plus,
+ is_even, buffer, length);
+ break;
+ case BIGNUM_DTOA_FIXED:
+ BignumToFixed(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ case BIGNUM_DTOA_PRECISION:
+ GenerateCountedDigits(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ buffer[*length] = '\0';
+}
+
+
+// The procedure starts generating digits from the left to the right and stops
+// when the generated digits yield the shortest decimal representation of v. A
+// decimal representation of v is a number lying closer to v than to any other
+// double, so it converts to v when read.
+//
+// This is true if d, the decimal representation, is between m- and m+, the
+// upper and lower boundaries. d must be strictly between them if !is_even.
+// m- := (numerator - delta_minus) / denominator
+// m+ := (numerator + delta_plus) / denominator
+//
+// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
+// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
+// will be produced. This should be the standard precondition.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length) {
+ // Small optimization: if delta_minus and delta_plus are the same just reuse
+ // one of the two bignums.
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_plus = delta_minus;
+ }
+ *length = 0;
+ while (true) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[(*length)++] = digit + '0';
+
+ // Can we stop already?
+ // If the remainder of the division is less than the distance to the lower
+ // boundary we can stop. In this case we simply round down (discarding the
+ // remainder).
+ // Similarly we test if we can round up (using the upper boundary).
+ bool in_delta_room_minus;
+ bool in_delta_room_plus;
+ if (is_even) {
+ in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
+ } else {
+ in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
+ }
+ if (is_even) {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (!in_delta_room_minus && !in_delta_room_plus) {
+ // Prepare for next iteration.
+ numerator->Times10();
+ delta_minus->Times10();
+ // We optimized delta_plus to be equal to delta_minus (if they share the
+ // same value). So don't multiply delta_plus if they point to the same
+ // object.
+ if (delta_minus != delta_plus) {
+ delta_plus->Times10();
+ }
+ } else if (in_delta_room_minus && in_delta_room_plus) {
+ // Let's see if 2*numerator < denominator.
+ // If yes, then the next digit would be < 5 and we can round down.
+ int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
+ if (compare < 0) {
+ // Remaining digits are less than .5. -> Round down (== do nothing).
+ } else if (compare > 0) {
+ // Remaining digits are more than .5 of denominator. -> Round up.
+ // Note that the last digit could not be a '9' as otherwise the whole
+ // loop would have stopped earlier.
+ // We still have an assert here in case the preconditions were not
+ // satisfied.
+ ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ } else {
+ // Halfway case.
+ // TODO(floitsch): need a way to solve half-way cases.
+ // For now let's round towards even (since this is what Gay seems to
+ // do).
+
+ if ((buffer[(*length) - 1] - '0') % 2 == 0) {
+ // Round down => Do nothing.
+ } else {
+ ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ }
+ }
+ return;
+ } else if (in_delta_room_minus) {
+ // Round down (== do nothing).
+ return;
+ } else { // in_delta_room_plus
+ // Round up.
+ // Note again that the last digit could not be '9' since this would have
+ // stopped the loop earlier.
+ // We still have an ASSERT here, in case the preconditions were not
+ // satisfied.
+ ASSERT(buffer[(*length) -1] != '9');
+ buffer[(*length) - 1]++;
+ return;
+ }
+ }
+}
+
+
+// Let v = numerator / denominator < 10.
+// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
+// from left to right. Once 'count' digits have been produced we decide wether
+// to round up or down. Remainders of exactly .5 round upwards. Numbers such
+// as 9.999999 propagate a carry all the way, and change the
+// exponent (decimal_point), when rounding upwards.
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length) {
+ ASSERT(count >= 0);
+ for (int i = 0; i < count - 1; ++i) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[i] = digit + '0';
+ // Prepare for next iteration.
+ numerator->Times10();
+ }
+ // Generate the last digit.
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ digit++;
+ }
+ buffer[count - 1] = digit + '0';
+ // Correct bad digits (in case we had a sequence of '9's). Propagate the
+ // carry until we hat a non-'9' or til we reach the first digit.
+ for (int i = count - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ if (buffer[0] == '0' + 10) {
+ // Propagate a carry past the top place.
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+ *length = count;
+}
+
+
+// Generates 'requested_digits' after the decimal point. It might omit
+// trailing '0's. If the input number is too small then no digits at all are
+// generated (ex.: 2 fixed digits for 0.00001).
+//
+// Input verifies: 1 <= (numerator + delta) / denominator < 10.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length) {
+ // Note that we have to look at more than just the requested_digits, since
+ // a number could be rounded up. Example: v=0.5 with requested_digits=0.
+ // Even though the power of v equals 0 we can't just stop here.
+ if (-(*decimal_point) > requested_digits) {
+ // The number is definitively too small.
+ // Ex: 0.001 with requested_digits == 1.
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ *length = 0;
+ return;
+ } else if (-(*decimal_point) == requested_digits) {
+ // We only need to verify if the number rounds down or up.
+ // Ex: 0.04 and 0.06 with requested_digits == 1.
+ ASSERT(*decimal_point == -requested_digits);
+ // Initially the fraction lies in range (1, 10]. Multiply the denominator
+ // by 10 so that we can compare more easily.
+ denominator->Times10();
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ // If the fraction is >= 0.5 then we have to include the rounded
+ // digit.
+ buffer[0] = '1';
+ *length = 1;
+ (*decimal_point)++;
+ } else {
+ // Note that we caught most of similar cases earlier.
+ *length = 0;
+ }
+ return;
+ } else {
+ // The requested digits correspond to the digits after the point.
+ // The variable 'needed_digits' includes the digits before the point.
+ int needed_digits = (*decimal_point) + requested_digits;
+ GenerateCountedDigits(needed_digits, decimal_point,
+ numerator, denominator,
+ buffer, length);
+ }
+}
+
+
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
+// v = f * 2^exponent and 2^52 <= f < 2^53.
+// v is hence a normalized double with the given exponent. The output is an
+// approximation for the exponent of the decimal approimation .digits * 10^k.
+//
+// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
+// Note: this property holds for v's upper boundary m+ too.
+// 10^k <= m+ < 10^k+1.
+// (see explanation below).
+//
+// Examples:
+// EstimatePower(0) => 16
+// EstimatePower(-52) => 0
+//
+// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
+static int EstimatePower(int exponent) {
+ // This function estimates log10 of v where v = f*2^e (with e == exponent).
+ // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
+ // Note that f is bounded by its container size. Let p = 53 (the double's
+ // significand size). Then 2^(p-1) <= f < 2^p.
+ //
+ // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
+ // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
+ // The computed number undershoots by less than 0.631 (when we compute log3
+ // and not log10).
+ //
+ // Optimization: since we only need an approximated result this computation
+ // can be performed on 64 bit integers. On x86/x64 architecture the speedup is
+ // not really measurable, though.
+ //
+ // Since we want to avoid overshooting we decrement by 1e10 so that
+ // floating-point imprecisions don't affect us.
+ //
+ // Explanation for v's boundary m+: the computation takes advantage of
+ // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
+ // (even for denormals where the delta can be much more important).
+
+ const double k1Log10 = 0.30102999566398114; // 1/lg(10)
+
+ // For doubles len(f) == 53 (don't forget the hidden bit).
+ const int kSignificandSize = Double::kSignificandSize;
+ double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
+ return static_cast<int>(estimate);
+}
+
+
+// See comments for InitialScaledStartValues.
+static void InitialScaledStartValuesPositiveExponent(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // A positive exponent implies a positive power.
+ ASSERT(estimated_power >= 0);
+ // Since the estimated_power is positive we simply multiply the denominator
+ // by 10^estimated_power.
+
+ // numerator = v.
+ numerator->AssignUInt64(significand);
+ numerator->ShiftLeft(exponent);
+ // denominator = 10^estimated_power.
+ denominator->AssignPowerUInt16(10, estimated_power);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ delta_plus->AssignUInt16(1);
+ delta_plus->ShiftLeft(exponent);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ delta_minus->ShiftLeft(exponent);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentPositivePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // v = f * 2^e with e < 0, and with estimated_power >= 0.
+ // This means that e is close to 0 (have a look at how estimated_power is
+ // computed).
+
+ // numerator = significand
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * / 2^-exponent
+ numerator->AssignUInt64(significand);
+ // denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
+ denominator->AssignPowerUInt16(10, estimated_power);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ // Given that the denominator already includes v's exponent the distance
+ // to the boundaries is simply 1.
+ delta_plus->AssignUInt16(1);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentNegativePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // Instead of multiplying the denominator with 10^estimated_power we
+ // multiply all values (numerator and deltas) by 10^-estimated_power.
+
+ // Use numerator as temporary container for power_ten.
+ Bignum* power_ten = numerator;
+ power_ten->AssignPowerUInt16(10, -estimated_power);
+
+ if (need_boundary_deltas) {
+ // Since power_ten == numerator we must make a copy of 10^estimated_power
+ // before we complete the computation of the numerator.
+ // delta_plus = delta_minus = 10^estimated_power
+ delta_plus->AssignBignum(*power_ten);
+ delta_minus->AssignBignum(*power_ten);
+ }
+
+ // numerator = significand * 2 * 10^-estimated_power
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
+ // Remember: numerator has been abused as power_ten. So no need to assign it
+ // to itself.
+ ASSERT(numerator == power_ten);
+ numerator->MultiplyByUInt64(significand);
+
+ // denominator = 2 * 2^-exponent with exponent < 0.
+ denominator->AssignUInt16(1);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ numerator->ShiftLeft(1);
+ denominator->ShiftLeft(1);
+ // With this shift the boundaries have their correct value, since
+ // delta_plus = 10^-estimated_power, and
+ // delta_minus = 10^-estimated_power.
+ // These assignments have been done earlier.
+ // The adjustments if f == 2^p-1 (lower boundary is closer) are done later.
+ }
+}
+
+
+// Let v = significand * 2^exponent.
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator. The functions GenerateShortestDigits and
+// GenerateCountedDigits will then convert this ratio to its decimal
+// representation d, with the required accuracy.
+// Then d * 10^estimated_power is the representation of v.
+// (Note: the fraction and the estimated_power might get adjusted before
+// generating the decimal representation.)
+//
+// The initial start values consist of:
+// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
+// - a scaled (common) denominator.
+// optionally (used by GenerateShortestDigits to decide if it has the shortest
+// decimal converting back to v):
+// - v - m-: the distance to the lower boundary.
+// - m+ - v: the distance to the upper boundary.
+//
+// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
+//
+// Let ep == estimated_power, then the returned values will satisfy:
+// v / 10^ep = numerator / denominator.
+// v's boundarys m- and m+:
+// m- / 10^ep == v / 10^ep - delta_minus / denominator
+// m+ / 10^ep == v / 10^ep + delta_plus / denominator
+// Or in other words:
+// m- == v - delta_minus * 10^ep / denominator;
+// m+ == v + delta_plus * 10^ep / denominator;
+//
+// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
+// or 10^k <= v < 10^(k+1)
+// we then have 0.1 <= numerator/denominator < 1
+// or 1 <= numerator/denominator < 10
+//
+// It is then easy to kickstart the digit-generation routine.
+//
+// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST
+// or BIGNUM_DTOA_SHORTEST_SINGLE.
+
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus) {
+ if (exponent >= 0) {
+ InitialScaledStartValuesPositiveExponent(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else if (estimated_power >= 0) {
+ InitialScaledStartValuesNegativeExponentPositivePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else {
+ InitialScaledStartValuesNegativeExponentNegativePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ }
+
+ if (need_boundary_deltas && lower_boundary_is_closer) {
+ // The lower boundary is closer at half the distance of "normal" numbers.
+ // Increase the common denominator and adapt all but the delta_minus.
+ denominator->ShiftLeft(1); // *2
+ numerator->ShiftLeft(1); // *2
+ delta_plus->ShiftLeft(1); // *2
+ }
+}
+
+
+// This routine multiplies numerator/denominator so that its values lies in the
+// range 1-10. That is after a call to this function we have:
+// 1 <= (numerator + delta_plus) /denominator < 10.
+// Let numerator the input before modification and numerator' the argument
+// after modification, then the output-parameter decimal_point is such that
+// numerator / denominator * 10^estimated_power ==
+// numerator' / denominator' * 10^(decimal_point - 1)
+// In some cases estimated_power was too low, and this is already the case. We
+// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
+// estimated_power) but do not touch the numerator or denominator.
+// Otherwise the routine multiplies the numerator and the deltas by 10.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ bool in_range;
+ if (is_even) {
+ // For IEEE doubles half-way cases (in decimal system numbers ending with 5)
+ // are rounded to the closest floating-point number with even significand.
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (in_range) {
+ // Since numerator + delta_plus >= denominator we already have
+ // 1 <= numerator/denominator < 10. Simply update the estimated_power.
+ *decimal_point = estimated_power + 1;
+ } else {
+ *decimal_point = estimated_power;
+ numerator->Times10();
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_minus->Times10();
+ delta_plus->AssignBignum(*delta_minus);
+ } else {
+ delta_minus->Times10();
+ delta_plus->Times10();
+ }
+ }
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/bignum-dtoa.h b/src/3rdparty/double-conversion/bignum-dtoa.h
new file mode 100644
index 0000000000..34b961992d
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum-dtoa.h
@@ -0,0 +1,84 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum BignumDtoaMode {
+ // Return the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate but
+ // correct) 0.3.
+ BIGNUM_DTOA_SHORTEST,
+ // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
+ BIGNUM_DTOA_SHORTEST_SINGLE,
+ // Return a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ BIGNUM_DTOA_FIXED,
+ // Return a fixed number of digits, no matter what the exponent is.
+ BIGNUM_DTOA_PRECISION
+};
+
+// Converts the given double 'v' to ascii.
+// The result should be interpreted as buffer * 10^(point-length).
+// The buffer will be null-terminated.
+//
+// The input v must be > 0 and different from NaN, and Infinity.
+//
+// The output depends on the given mode:
+// - SHORTEST: produce the least amount of digits for which the internal
+// identity requirement is still satisfied. If the digits are printed
+// (together with the correct exponent) then reading this number will give
+// 'v' again. The buffer will choose the representation that is closest to
+// 'v'. If there are two at the same distance, than the number is round up.
+// In this mode the 'requested_digits' parameter is ignored.
+// - FIXED: produces digits necessary to print a given number with
+// 'requested_digits' digits after the decimal point. The produced digits
+// might be too short in which case the caller has to fill the gaps with '0's.
+// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
+// buffer="2", point=0.
+// Note: the length of the returned buffer has no meaning wrt the significance
+// of its digits. That is, just because it contains '0's does not mean that
+// any other digit would not satisfy the internal identity requirement.
+// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+// Even though the length of produced digits usually equals
+// 'requested_digits', the function is allowed to return fewer digits, in
+// which case the caller has to fill the missing digits with '0's.
+// Halfway cases are again rounded up.
+// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
+// and a terminating null-character.
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_
diff --git a/src/3rdparty/double-conversion/bignum.cc b/src/3rdparty/double-conversion/bignum.cc
new file mode 100644
index 0000000000..747491a089
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum.cc
@@ -0,0 +1,764 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "bignum.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+Bignum::Bignum()
+ : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
+ for (int i = 0; i < kBigitCapacity; ++i) {
+ bigits_[i] = 0;
+ }
+}
+
+
+template<typename S>
+static int BitSize(S value) {
+ return 8 * sizeof(value);
+}
+
+// Guaranteed to lie in one Bigit.
+void Bignum::AssignUInt16(uint16_t value) {
+ ASSERT(kBigitSize >= BitSize(value));
+ Zero();
+ if (value == 0) return;
+
+ EnsureCapacity(1);
+ bigits_[0] = value;
+ used_digits_ = 1;
+}
+
+
+void Bignum::AssignUInt64(uint64_t value) {
+ const int kUInt64Size = 64;
+
+ Zero();
+ if (value == 0) return;
+
+ int needed_bigits = kUInt64Size / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ for (int i = 0; i < needed_bigits; ++i) {
+ bigits_[i] = value & kBigitMask;
+ value = value >> kBigitSize;
+ }
+ used_digits_ = needed_bigits;
+ Clamp();
+}
+
+
+void Bignum::AssignBignum(const Bignum& other) {
+ exponent_ = other.exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ bigits_[i] = other.bigits_[i];
+ }
+ // Clear the excess digits (if there were any).
+ for (int i = other.used_digits_; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = other.used_digits_;
+}
+
+
+static uint64_t ReadUInt64(Vector<const char> buffer,
+ int from,
+ int digits_to_read) {
+ uint64_t result = 0;
+ for (int i = from; i < from + digits_to_read; ++i) {
+ int digit = buffer[i] - '0';
+ ASSERT(0 <= digit && digit <= 9);
+ result = result * 10 + digit;
+ }
+ return result;
+}
+
+
+void Bignum::AssignDecimalString(Vector<const char> value) {
+ // 2^64 = 18446744073709551616 > 10^19
+ const int kMaxUint64DecimalDigits = 19;
+ Zero();
+ int length = value.length();
+ int pos = 0;
+ // Let's just say that each digit needs 4 bits.
+ while (length >= kMaxUint64DecimalDigits) {
+ uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
+ pos += kMaxUint64DecimalDigits;
+ length -= kMaxUint64DecimalDigits;
+ MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
+ AddUInt64(digits);
+ }
+ uint64_t digits = ReadUInt64(value, pos, length);
+ MultiplyByPowerOfTen(length);
+ AddUInt64(digits);
+ Clamp();
+}
+
+
+static int HexCharValue(char c) {
+ if ('0' <= c && c <= '9') return c - '0';
+ if ('a' <= c && c <= 'f') return 10 + c - 'a';
+ if ('A' <= c && c <= 'F') return 10 + c - 'A';
+ UNREACHABLE();
+ return 0; // To make compiler happy.
+}
+
+
+void Bignum::AssignHexString(Vector<const char> value) {
+ Zero();
+ int length = value.length();
+
+ int needed_bigits = length * 4 / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ int string_index = length - 1;
+ for (int i = 0; i < needed_bigits - 1; ++i) {
+ // These bigits are guaranteed to be "full".
+ Chunk current_bigit = 0;
+ for (int j = 0; j < kBigitSize / 4; j++) {
+ current_bigit += HexCharValue(value[string_index--]) << (j * 4);
+ }
+ bigits_[i] = current_bigit;
+ }
+ used_digits_ = needed_bigits - 1;
+
+ Chunk most_significant_bigit = 0; // Could be = 0;
+ for (int j = 0; j <= string_index; ++j) {
+ most_significant_bigit <<= 4;
+ most_significant_bigit += HexCharValue(value[j]);
+ }
+ if (most_significant_bigit != 0) {
+ bigits_[used_digits_] = most_significant_bigit;
+ used_digits_++;
+ }
+ Clamp();
+}
+
+
+void Bignum::AddUInt64(uint64_t operand) {
+ if (operand == 0) return;
+ Bignum other;
+ other.AssignUInt64(operand);
+ AddBignum(other);
+}
+
+
+void Bignum::AddBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+
+ // If this has a greater exponent than other append zero-bigits to this.
+ // After this call exponent_ <= other.exponent_.
+ Align(other);
+
+ // There are two possibilities:
+ // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
+ // bbbbb 00000000
+ // ----------------
+ // ccccccccccc 0000
+ // or
+ // aaaaaaaaaa 0000
+ // bbbbbbbbb 0000000
+ // -----------------
+ // cccccccccccc 0000
+ // In both cases we might need a carry bigit.
+
+ EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
+ Chunk carry = 0;
+ int bigit_pos = other.exponent_ - exponent_;
+ ASSERT(bigit_pos >= 0);
+ for (int i = 0; i < other.used_digits_; ++i) {
+ Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+
+ while (carry != 0) {
+ Chunk sum = bigits_[bigit_pos] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+ used_digits_ = Max(bigit_pos, used_digits_);
+ ASSERT(IsClamped());
+}
+
+
+void Bignum::SubtractBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ // We require this to be bigger than other.
+ ASSERT(LessEqual(other, *this));
+
+ Align(other);
+
+ int offset = other.exponent_ - exponent_;
+ Chunk borrow = 0;
+ int i;
+ for (i = 0; i < other.used_digits_; ++i) {
+ ASSERT((borrow == 0) || (borrow == 1));
+ Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ }
+ while (borrow != 0) {
+ Chunk difference = bigits_[i + offset] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+void Bignum::ShiftLeft(int shift_amount) {
+ if (used_digits_ == 0) return;
+ exponent_ += shift_amount / kBigitSize;
+ int local_shift = shift_amount % kBigitSize;
+ EnsureCapacity(used_digits_ + 1);
+ BigitsShiftLeft(local_shift);
+}
+
+
+void Bignum::MultiplyByUInt32(uint32_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ if (used_digits_ == 0) return;
+
+ // The product of a bigit with the factor is of size kBigitSize + 32.
+ // Assert that this number + 1 (for the carry) fits into double chunk.
+ ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
+ DoubleChunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
+ bigits_[i] = static_cast<Chunk>(product & kBigitMask);
+ carry = (product >> kBigitSize);
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByUInt64(uint64_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ ASSERT(kBigitSize < 32);
+ uint64_t carry = 0;
+ uint64_t low = factor & 0xFFFFFFFF;
+ uint64_t high = factor >> 32;
+ for (int i = 0; i < used_digits_; ++i) {
+ uint64_t product_low = low * bigits_[i];
+ uint64_t product_high = high * bigits_[i];
+ uint64_t tmp = (carry & kBigitMask) + product_low;
+ bigits_[i] = tmp & kBigitMask;
+ carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
+ (product_high << (32 - kBigitSize));
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByPowerOfTen(int exponent) {
+ const uint64_t kFive27 = UINT64_2PART_C(0x6765c793, fa10079d);
+ const uint16_t kFive1 = 5;
+ const uint16_t kFive2 = kFive1 * 5;
+ const uint16_t kFive3 = kFive2 * 5;
+ const uint16_t kFive4 = kFive3 * 5;
+ const uint16_t kFive5 = kFive4 * 5;
+ const uint16_t kFive6 = kFive5 * 5;
+ const uint32_t kFive7 = kFive6 * 5;
+ const uint32_t kFive8 = kFive7 * 5;
+ const uint32_t kFive9 = kFive8 * 5;
+ const uint32_t kFive10 = kFive9 * 5;
+ const uint32_t kFive11 = kFive10 * 5;
+ const uint32_t kFive12 = kFive11 * 5;
+ const uint32_t kFive13 = kFive12 * 5;
+ const uint32_t kFive1_to_12[] =
+ { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
+ kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
+
+ ASSERT(exponent >= 0);
+ if (exponent == 0) return;
+ if (used_digits_ == 0) return;
+
+ // We shift by exponent at the end just before returning.
+ int remaining_exponent = exponent;
+ while (remaining_exponent >= 27) {
+ MultiplyByUInt64(kFive27);
+ remaining_exponent -= 27;
+ }
+ while (remaining_exponent >= 13) {
+ MultiplyByUInt32(kFive13);
+ remaining_exponent -= 13;
+ }
+ if (remaining_exponent > 0) {
+ MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
+ }
+ ShiftLeft(exponent);
+}
+
+
+void Bignum::Square() {
+ ASSERT(IsClamped());
+ int product_length = 2 * used_digits_;
+ EnsureCapacity(product_length);
+
+ // Comba multiplication: compute each column separately.
+ // Example: r = a2a1a0 * b2b1b0.
+ // r = 1 * a0b0 +
+ // 10 * (a1b0 + a0b1) +
+ // 100 * (a2b0 + a1b1 + a0b2) +
+ // 1000 * (a2b1 + a1b2) +
+ // 10000 * a2b2
+ //
+ // In the worst case we have to accumulate nb-digits products of digit*digit.
+ //
+ // Assert that the additional number of bits in a DoubleChunk are enough to
+ // sum up used_digits of Bigit*Bigit.
+ if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
+ UNIMPLEMENTED();
+ }
+ DoubleChunk accumulator = 0;
+ // First shift the digits so we don't overwrite them.
+ int copy_offset = used_digits_;
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[copy_offset + i] = bigits_[i];
+ }
+ // We have two loops to avoid some 'if's in the loop.
+ for (int i = 0; i < used_digits_; ++i) {
+ // Process temporary digit i with power i.
+ // The sum of the two indices must be equal to i.
+ int bigit_index1 = i;
+ int bigit_index2 = 0;
+ // Sum all of the sub-products.
+ while (bigit_index1 >= 0) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ for (int i = used_digits_; i < product_length; ++i) {
+ int bigit_index1 = used_digits_ - 1;
+ int bigit_index2 = i - bigit_index1;
+ // Invariant: sum of both indices is again equal to i.
+ // Inner loop runs 0 times on last iteration, emptying accumulator.
+ while (bigit_index2 < used_digits_) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ // The overwritten bigits_[i] will never be read in further loop iterations,
+ // because bigit_index1 and bigit_index2 are always greater
+ // than i - used_digits_.
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ // Since the result was guaranteed to lie inside the number the
+ // accumulator must be 0 now.
+ ASSERT(accumulator == 0);
+
+ // Don't forget to update the used_digits and the exponent.
+ used_digits_ = product_length;
+ exponent_ *= 2;
+ Clamp();
+}
+
+
+void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
+ ASSERT(base != 0);
+ ASSERT(power_exponent >= 0);
+ if (power_exponent == 0) {
+ AssignUInt16(1);
+ return;
+ }
+ Zero();
+ int shifts = 0;
+ // We expect base to be in range 2-32, and most often to be 10.
+ // It does not make much sense to implement different algorithms for counting
+ // the bits.
+ while ((base & 1) == 0) {
+ base >>= 1;
+ shifts++;
+ }
+ int bit_size = 0;
+ int tmp_base = base;
+ while (tmp_base != 0) {
+ tmp_base >>= 1;
+ bit_size++;
+ }
+ int final_size = bit_size * power_exponent;
+ // 1 extra bigit for the shifting, and one for rounded final_size.
+ EnsureCapacity(final_size / kBigitSize + 2);
+
+ // Left to Right exponentiation.
+ int mask = 1;
+ while (power_exponent >= mask) mask <<= 1;
+
+ // The mask is now pointing to the bit above the most significant 1-bit of
+ // power_exponent.
+ // Get rid of first 1-bit;
+ mask >>= 2;
+ uint64_t this_value = base;
+
+ bool delayed_multipliciation = false;
+ const uint64_t max_32bits = 0xFFFFFFFF;
+ while (mask != 0 && this_value <= max_32bits) {
+ this_value = this_value * this_value;
+ // Verify that there is enough space in this_value to perform the
+ // multiplication. The first bit_size bits must be 0.
+ if ((power_exponent & mask) != 0) {
+ uint64_t base_bits_mask =
+ ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
+ bool high_bits_zero = (this_value & base_bits_mask) == 0;
+ if (high_bits_zero) {
+ this_value *= base;
+ } else {
+ delayed_multipliciation = true;
+ }
+ }
+ mask >>= 1;
+ }
+ AssignUInt64(this_value);
+ if (delayed_multipliciation) {
+ MultiplyByUInt32(base);
+ }
+
+ // Now do the same thing as a bignum.
+ while (mask != 0) {
+ Square();
+ if ((power_exponent & mask) != 0) {
+ MultiplyByUInt32(base);
+ }
+ mask >>= 1;
+ }
+
+ // And finally add the saved shifts.
+ ShiftLeft(shifts * power_exponent);
+}
+
+
+// Precondition: this/other < 16bit.
+uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ ASSERT(other.used_digits_ > 0);
+
+ // Easy case: if we have less digits than the divisor than the result is 0.
+ // Note: this handles the case where this == 0, too.
+ if (BigitLength() < other.BigitLength()) {
+ return 0;
+ }
+
+ Align(other);
+
+ uint16_t result = 0;
+
+ // Start by removing multiples of 'other' until both numbers have the same
+ // number of digits.
+ while (BigitLength() > other.BigitLength()) {
+ // This naive approach is extremely inefficient if the this divided other
+ // might be big. This function is implemented for doubleToString where
+ // the result should be small (less than 10).
+ ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
+ // Remove the multiples of the first digit.
+ // Example this = 23 and other equals 9. -> Remove 2 multiples.
+ result += bigits_[used_digits_ - 1];
+ SubtractTimes(other, bigits_[used_digits_ - 1]);
+ }
+
+ ASSERT(BigitLength() == other.BigitLength());
+
+ // Both bignums are at the same length now.
+ // Since other has more than 0 digits we know that the access to
+ // bigits_[used_digits_ - 1] is safe.
+ Chunk this_bigit = bigits_[used_digits_ - 1];
+ Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
+
+ if (other.used_digits_ == 1) {
+ // Shortcut for easy (and common) case.
+ int quotient = this_bigit / other_bigit;
+ bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
+ result += quotient;
+ Clamp();
+ return result;
+ }
+
+ int division_estimate = this_bigit / (other_bigit + 1);
+ result += division_estimate;
+ SubtractTimes(other, division_estimate);
+
+ if (other_bigit * (division_estimate + 1) > this_bigit) {
+ // No need to even try to subtract. Even if other's remaining digits were 0
+ // another subtraction would be too much.
+ return result;
+ }
+
+ while (LessEqual(other, *this)) {
+ SubtractBignum(other);
+ result++;
+ }
+ return result;
+}
+
+
+template<typename S>
+static int SizeInHexChars(S number) {
+ ASSERT(number > 0);
+ int result = 0;
+ while (number != 0) {
+ number >>= 4;
+ result++;
+ }
+ return result;
+}
+
+
+static char HexCharOfValue(int value) {
+ ASSERT(0 <= value && value <= 16);
+ if (value < 10) return value + '0';
+ return value - 10 + 'A';
+}
+
+
+bool Bignum::ToHexString(char* buffer, int buffer_size) const {
+ ASSERT(IsClamped());
+ // Each bigit must be printable as separate hex-character.
+ ASSERT(kBigitSize % 4 == 0);
+ const int kHexCharsPerBigit = kBigitSize / 4;
+
+ if (used_digits_ == 0) {
+ if (buffer_size < 2) return false;
+ buffer[0] = '0';
+ buffer[1] = '\0';
+ return true;
+ }
+ // We add 1 for the terminating '\0' character.
+ int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
+ SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
+ if (needed_chars > buffer_size) return false;
+ int string_index = needed_chars - 1;
+ buffer[string_index--] = '\0';
+ for (int i = 0; i < exponent_; ++i) {
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = '0';
+ }
+ }
+ for (int i = 0; i < used_digits_ - 1; ++i) {
+ Chunk current_bigit = bigits_[i];
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
+ current_bigit >>= 4;
+ }
+ }
+ // And finally the last bigit.
+ Chunk most_significant_bigit = bigits_[used_digits_ - 1];
+ while (most_significant_bigit != 0) {
+ buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
+ most_significant_bigit >>= 4;
+ }
+ return true;
+}
+
+
+Bignum::Chunk Bignum::BigitAt(int index) const {
+ if (index >= BigitLength()) return 0;
+ if (index < exponent_) return 0;
+ return bigits_[index - exponent_];
+}
+
+
+int Bignum::Compare(const Bignum& a, const Bignum& b) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ int bigit_length_a = a.BigitLength();
+ int bigit_length_b = b.BigitLength();
+ if (bigit_length_a < bigit_length_b) return -1;
+ if (bigit_length_a > bigit_length_b) return +1;
+ for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
+ Chunk bigit_a = a.BigitAt(i);
+ Chunk bigit_b = b.BigitAt(i);
+ if (bigit_a < bigit_b) return -1;
+ if (bigit_a > bigit_b) return +1;
+ // Otherwise they are equal up to this digit. Try the next digit.
+ }
+ return 0;
+}
+
+
+int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ ASSERT(c.IsClamped());
+ if (a.BigitLength() < b.BigitLength()) {
+ return PlusCompare(b, a, c);
+ }
+ if (a.BigitLength() + 1 < c.BigitLength()) return -1;
+ if (a.BigitLength() > c.BigitLength()) return +1;
+ // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
+ // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
+ // of 'a'.
+ if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
+ return -1;
+ }
+
+ Chunk borrow = 0;
+ // Starting at min_exponent all digits are == 0. So no need to compare them.
+ int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
+ for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
+ Chunk chunk_a = a.BigitAt(i);
+ Chunk chunk_b = b.BigitAt(i);
+ Chunk chunk_c = c.BigitAt(i);
+ Chunk sum = chunk_a + chunk_b;
+ if (sum > chunk_c + borrow) {
+ return +1;
+ } else {
+ borrow = chunk_c + borrow - sum;
+ if (borrow > 1) return -1;
+ borrow <<= kBigitSize;
+ }
+ }
+ if (borrow == 0) return 0;
+ return -1;
+}
+
+
+void Bignum::Clamp() {
+ while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
+ used_digits_--;
+ }
+ if (used_digits_ == 0) {
+ // Zero.
+ exponent_ = 0;
+ }
+}
+
+
+bool Bignum::IsClamped() const {
+ return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
+}
+
+
+void Bignum::Zero() {
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = 0;
+ exponent_ = 0;
+}
+
+
+void Bignum::Align(const Bignum& other) {
+ if (exponent_ > other.exponent_) {
+ // If "X" represents a "hidden" digit (by the exponent) then we are in the
+ // following case (a == this, b == other):
+ // a: aaaaaaXXXX or a: aaaaaXXX
+ // b: bbbbbbX b: bbbbbbbbXX
+ // We replace some of the hidden digits (X) of a with 0 digits.
+ // a: aaaaaa000X or a: aaaaa0XX
+ int zero_digits = exponent_ - other.exponent_;
+ EnsureCapacity(used_digits_ + zero_digits);
+ for (int i = used_digits_ - 1; i >= 0; --i) {
+ bigits_[i + zero_digits] = bigits_[i];
+ }
+ for (int i = 0; i < zero_digits; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ += zero_digits;
+ exponent_ -= zero_digits;
+ ASSERT(used_digits_ >= 0);
+ ASSERT(exponent_ >= 0);
+ }
+}
+
+
+void Bignum::BigitsShiftLeft(int shift_amount) {
+ ASSERT(shift_amount < kBigitSize);
+ ASSERT(shift_amount >= 0);
+ Chunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
+ bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
+ carry = new_carry;
+ }
+ if (carry != 0) {
+ bigits_[used_digits_] = carry;
+ used_digits_++;
+ }
+}
+
+
+void Bignum::SubtractTimes(const Bignum& other, int factor) {
+ ASSERT(exponent_ <= other.exponent_);
+ if (factor < 3) {
+ for (int i = 0; i < factor; ++i) {
+ SubtractBignum(other);
+ }
+ return;
+ }
+ Chunk borrow = 0;
+ int exponent_diff = other.exponent_ - exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
+ DoubleChunk remove = borrow + product;
+ Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
+ bigits_[i + exponent_diff] = difference & kBigitMask;
+ borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
+ (remove >> kBigitSize));
+ }
+ for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
+ if (borrow == 0) return;
+ Chunk difference = bigits_[i] - borrow;
+ bigits_[i] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/bignum.h b/src/3rdparty/double-conversion/bignum.h
new file mode 100644
index 0000000000..5ec3544f57
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum.h
@@ -0,0 +1,145 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_H_
+#define DOUBLE_CONVERSION_BIGNUM_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+class Bignum {
+ public:
+ // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
+ // This bignum can encode much bigger numbers, since it contains an
+ // exponent.
+ static const int kMaxSignificantBits = 3584;
+
+ Bignum();
+ void AssignUInt16(uint16_t value);
+ void AssignUInt64(uint64_t value);
+ void AssignBignum(const Bignum& other);
+
+ void AssignDecimalString(Vector<const char> value);
+ void AssignHexString(Vector<const char> value);
+
+ void AssignPowerUInt16(uint16_t base, int exponent);
+
+ void AddUInt16(uint16_t operand);
+ void AddUInt64(uint64_t operand);
+ void AddBignum(const Bignum& other);
+ // Precondition: this >= other.
+ void SubtractBignum(const Bignum& other);
+
+ void Square();
+ void ShiftLeft(int shift_amount);
+ void MultiplyByUInt32(uint32_t factor);
+ void MultiplyByUInt64(uint64_t factor);
+ void MultiplyByPowerOfTen(int exponent);
+ void Times10() { return MultiplyByUInt32(10); }
+ // Pseudocode:
+ // int result = this / other;
+ // this = this % other;
+ // In the worst case this function is in O(this/other).
+ uint16_t DivideModuloIntBignum(const Bignum& other);
+
+ bool ToHexString(char* buffer, int buffer_size) const;
+
+ // Returns
+ // -1 if a < b,
+ // 0 if a == b, and
+ // +1 if a > b.
+ static int Compare(const Bignum& a, const Bignum& b);
+ static bool Equal(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) == 0;
+ }
+ static bool LessEqual(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) <= 0;
+ }
+ static bool Less(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) < 0;
+ }
+ // Returns Compare(a + b, c);
+ static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
+ // Returns a + b == c
+ static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) == 0;
+ }
+ // Returns a + b <= c
+ static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) <= 0;
+ }
+ // Returns a + b < c
+ static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) < 0;
+ }
+ private:
+ typedef uint32_t Chunk;
+ typedef uint64_t DoubleChunk;
+
+ static const int kChunkSize = sizeof(Chunk) * 8;
+ static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
+ // With bigit size of 28 we loose some bits, but a double still fits easily
+ // into two chunks, and more importantly we can use the Comba multiplication.
+ static const int kBigitSize = 28;
+ static const Chunk kBigitMask = (1 << kBigitSize) - 1;
+ // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
+ // grow. There are no checks if the stack-allocated space is sufficient.
+ static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
+
+ void EnsureCapacity(int size) {
+ if (size > kBigitCapacity) {
+ UNREACHABLE();
+ }
+ }
+ void Align(const Bignum& other);
+ void Clamp();
+ bool IsClamped() const;
+ void Zero();
+ // Requires this to have enough capacity (no tests done).
+ // Updates used_digits_ if necessary.
+ // shift_amount must be < kBigitSize.
+ void BigitsShiftLeft(int shift_amount);
+ // BigitLength includes the "hidden" digits encoded in the exponent.
+ int BigitLength() const { return used_digits_ + exponent_; }
+ Chunk BigitAt(int index) const;
+ void SubtractTimes(const Bignum& other, int factor);
+
+ Chunk bigits_buffer_[kBigitCapacity];
+ // A vector backed by bigits_buffer_. This way accesses to the array are
+ // checked for out-of-bounds errors.
+ Vector<Chunk> bigits_;
+ int used_digits_;
+ // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
+ int exponent_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bignum);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_H_
diff --git a/src/3rdparty/double-conversion/cached-powers.cc b/src/3rdparty/double-conversion/cached-powers.cc
new file mode 100644
index 0000000000..c676429194
--- /dev/null
+++ b/src/3rdparty/double-conversion/cached-powers.cc
@@ -0,0 +1,175 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+#include <math.h>
+
+#include "utils.h"
+
+#include "cached-powers.h"
+
+namespace double_conversion {
+
+struct CachedPower {
+ uint64_t significand;
+ int16_t binary_exponent;
+ int16_t decimal_exponent;
+};
+
+static const CachedPower kCachedPowers[] = {
+ {UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348},
+ {UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340},
+ {UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332},
+ {UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324},
+ {UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316},
+ {UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308},
+ {UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300},
+ {UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292},
+ {UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284},
+ {UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276},
+ {UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268},
+ {UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260},
+ {UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252},
+ {UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244},
+ {UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236},
+ {UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228},
+ {UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220},
+ {UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212},
+ {UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204},
+ {UINT64_2PART_C(0xef340a98, 172aace5), -715, -196},
+ {UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188},
+ {UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180},
+ {UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172},
+ {UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164},
+ {UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156},
+ {UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+ {UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140},
+ {UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132},
+ {UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124},
+ {UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116},
+ {UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108},
+ {UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100},
+ {UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+ {UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84},
+ {UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76},
+ {UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68},
+ {UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60},
+ {UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52},
+ {UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44},
+ {UINT64_2PART_C(0xaa242499, 697392d3), -183, -36},
+ {UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28},
+ {UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20},
+ {UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12},
+ {UINT64_2PART_C(0xd1b71758, e219652c), -77, -4},
+ {UINT64_2PART_C(0x9c400000, 00000000), -50, 4},
+ {UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12},
+ {UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20},
+ {UINT64_2PART_C(0x813f3978, f8940984), 30, 28},
+ {UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36},
+ {UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44},
+ {UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52},
+ {UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60},
+ {UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68},
+ {UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76},
+ {UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84},
+ {UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92},
+ {UINT64_2PART_C(0x924d692c, a61be758), 269, 100},
+ {UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108},
+ {UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116},
+ {UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124},
+ {UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132},
+ {UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140},
+ {UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148},
+ {UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156},
+ {UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164},
+ {UINT64_2PART_C(0xa59bc234, db398c25), 508, 172},
+ {UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180},
+ {UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188},
+ {UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196},
+ {UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204},
+ {UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212},
+ {UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220},
+ {UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228},
+ {UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236},
+ {UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244},
+ {UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252},
+ {UINT64_2PART_C(0xd01fef10, a657842c), 800, 260},
+ {UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268},
+ {UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276},
+ {UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284},
+ {UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292},
+ {UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300},
+ {UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308},
+ {UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316},
+ {UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324},
+ {UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332},
+ {UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+};
+
+static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
+static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
+// Difference between the decimal exponents in the table above.
+const int PowersOfTenCache::kDecimalExponentDistance = 8;
+const int PowersOfTenCache::kMinDecimalExponent = -348;
+const int PowersOfTenCache::kMaxDecimalExponent = 340;
+
+void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent) {
+ int kQ = DiyFp::kSignificandSize;
+ double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ int foo = kCachedPowersOffset;
+ int index =
+ (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
+ ASSERT(0 <= index && index < kCachedPowersLength);
+ CachedPower cached_power = kCachedPowers[index];
+ ASSERT(min_exponent <= cached_power.binary_exponent);
+ ASSERT(cached_power.binary_exponent <= max_exponent);
+ *decimal_exponent = cached_power.decimal_exponent;
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+}
+
+
+void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent) {
+ ASSERT(kMinDecimalExponent <= requested_exponent);
+ ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
+ int index =
+ (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
+ CachedPower cached_power = kCachedPowers[index];
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+ *found_exponent = cached_power.decimal_exponent;
+ ASSERT(*found_exponent <= requested_exponent);
+ ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/cached-powers.h b/src/3rdparty/double-conversion/cached-powers.h
new file mode 100644
index 0000000000..61a50614cf
--- /dev/null
+++ b/src/3rdparty/double-conversion/cached-powers.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
+#define DOUBLE_CONVERSION_CACHED_POWERS_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+class PowersOfTenCache {
+ public:
+
+ // Not all powers of ten are cached. The decimal exponent of two neighboring
+ // cached numbers will differ by kDecimalExponentDistance.
+ static const int kDecimalExponentDistance;
+
+ static const int kMinDecimalExponent;
+ static const int kMaxDecimalExponent;
+
+ // Returns a cached power-of-ten with a binary exponent in the range
+ // [min_exponent; max_exponent] (boundaries included).
+ static void GetCachedPowerForBinaryExponentRange(int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent);
+
+ // Returns a cached power of ten x ~= 10^k such that
+ // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
+ // The given decimal_exponent must satisfy
+ // kMinDecimalExponent <= requested_exponent, and
+ // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
+ static void GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_
diff --git a/src/3rdparty/double-conversion/diy-fp.cc b/src/3rdparty/double-conversion/diy-fp.cc
new file mode 100644
index 0000000000..ddd1891b16
--- /dev/null
+++ b/src/3rdparty/double-conversion/diy-fp.cc
@@ -0,0 +1,57 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "diy-fp.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+void DiyFp::Multiply(const DiyFp& other) {
+ // Simply "emulates" a 128 bit multiplication.
+ // However: the resulting number only contains 64 bits. The least
+ // significant 64 bits are only used for rounding the most significant 64
+ // bits.
+ const uint64_t kM32 = 0xFFFFFFFFU;
+ uint64_t a = f_ >> 32;
+ uint64_t b = f_ & kM32;
+ uint64_t c = other.f_ >> 32;
+ uint64_t d = other.f_ & kM32;
+ uint64_t ac = a * c;
+ uint64_t bc = b * c;
+ uint64_t ad = a * d;
+ uint64_t bd = b * d;
+ uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
+ // By adding 1U << 31 to tmp we round the final result.
+ // Halfway cases will be round up.
+ tmp += 1U << 31;
+ uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ e_ += other.e_ + 64;
+ f_ = result_f;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/diy-fp.h b/src/3rdparty/double-conversion/diy-fp.h
new file mode 100644
index 0000000000..9dcf8fbdba
--- /dev/null
+++ b/src/3rdparty/double-conversion/diy-fp.h
@@ -0,0 +1,118 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DIY_FP_H_
+#define DOUBLE_CONVERSION_DIY_FP_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// This "Do It Yourself Floating Point" class implements a floating-point number
+// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
+// have the most significant bit of the significand set.
+// Multiplication and Subtraction do not normalize their results.
+// DiyFp are not designed to contain special doubles (NaN and Infinity).
+class DiyFp {
+ public:
+ static const int kSignificandSize = 64;
+
+ DiyFp() : f_(0), e_(0) {}
+ DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
+
+ // this = this - other.
+ // The exponents of both numbers must be the same and the significand of this
+ // must be bigger than the significand of other.
+ // The result will not be normalized.
+ void Subtract(const DiyFp& other) {
+ ASSERT(e_ == other.e_);
+ ASSERT(f_ >= other.f_);
+ f_ -= other.f_;
+ }
+
+ // Returns a - b.
+ // The exponents of both numbers must be the same and this must be bigger
+ // than other. The result will not be normalized.
+ static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Subtract(b);
+ return result;
+ }
+
+
+ // this = this * other.
+ void Multiply(const DiyFp& other);
+
+ // returns a * b;
+ static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Multiply(b);
+ return result;
+ }
+
+ void Normalize() {
+ ASSERT(f_ != 0);
+ uint64_t f = f_;
+ int e = e_;
+
+ // This method is mainly called for normalizing boundaries. In general
+ // boundaries need to be shifted by 10 bits. We thus optimize for this case.
+ const uint64_t k10MSBits = UINT64_2PART_C(0xFFC00000, 00000000);
+ while ((f & k10MSBits) == 0) {
+ f <<= 10;
+ e -= 10;
+ }
+ while ((f & kUint64MSB) == 0) {
+ f <<= 1;
+ e--;
+ }
+ f_ = f;
+ e_ = e;
+ }
+
+ static DiyFp Normalize(const DiyFp& a) {
+ DiyFp result = a;
+ result.Normalize();
+ return result;
+ }
+
+ uint64_t f() const { return f_; }
+ int e() const { return e_; }
+
+ void set_f(uint64_t new_value) { f_ = new_value; }
+ void set_e(int new_value) { e_ = new_value; }
+
+ private:
+ static const uint64_t kUint64MSB = UINT64_2PART_C(0x80000000, 00000000);
+
+ uint64_t f_;
+ int e_;
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DIY_FP_H_
diff --git a/src/3rdparty/double-conversion/double-conversion.cc b/src/3rdparty/double-conversion/double-conversion.cc
new file mode 100644
index 0000000000..a79fe92d22
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.cc
@@ -0,0 +1,889 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+#include <math.h>
+
+#include "double-conversion.h"
+
+#include "bignum-dtoa.h"
+#include "fast-dtoa.h"
+#include "fixed-dtoa.h"
+#include "ieee.h"
+#include "strtod.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() {
+ int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN;
+ static DoubleToStringConverter converter(flags,
+ "Infinity",
+ "NaN",
+ 'e',
+ -6, 21,
+ 6, 0);
+ return converter;
+}
+
+
+bool DoubleToStringConverter::HandleSpecialValues(
+ double value,
+ StringBuilder* result_builder) const {
+ Double double_inspect(value);
+ if (double_inspect.IsInfinite()) {
+ if (infinity_symbol_ == NULL) return false;
+ if (value < 0) {
+ result_builder->AddCharacter('-');
+ }
+ result_builder->AddString(infinity_symbol_);
+ return true;
+ }
+ if (double_inspect.IsNan()) {
+ if (nan_symbol_ == NULL) return false;
+ result_builder->AddString(nan_symbol_);
+ return true;
+ }
+ return false;
+}
+
+
+void DoubleToStringConverter::CreateExponentialRepresentation(
+ const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const {
+ ASSERT(length != 0);
+ result_builder->AddCharacter(decimal_digits[0]);
+ if (length != 1) {
+ result_builder->AddCharacter('.');
+ result_builder->AddSubstring(&decimal_digits[1], length-1);
+ }
+ result_builder->AddCharacter(exponent_character_);
+ if (exponent < 0) {
+ result_builder->AddCharacter('-');
+ exponent = -exponent;
+ } else {
+ if ((flags_ & EMIT_POSITIVE_EXPONENT_SIGN) != 0) {
+ result_builder->AddCharacter('+');
+ }
+ }
+ if (exponent == 0) {
+ result_builder->AddCharacter('0');
+ return;
+ }
+ ASSERT(exponent < 1e4);
+ const int kMaxExponentLength = 5;
+ char buffer[kMaxExponentLength + 1];
+ buffer[kMaxExponentLength] = '\0';
+ int first_char_pos = kMaxExponentLength;
+ while (exponent > 0) {
+ buffer[--first_char_pos] = '0' + (exponent % 10);
+ exponent /= 10;
+ }
+ result_builder->AddSubstring(&buffer[first_char_pos],
+ kMaxExponentLength - first_char_pos);
+}
+
+
+void DoubleToStringConverter::CreateDecimalRepresentation(
+ const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const {
+ // Create a representation that is padded with zeros if needed.
+ if (decimal_point <= 0) {
+ // "0.00000decimal_rep".
+ result_builder->AddCharacter('0');
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', -decimal_point);
+ ASSERT(length <= digits_after_point - (-decimal_point));
+ result_builder->AddSubstring(decimal_digits, length);
+ int remaining_digits = digits_after_point - (-decimal_point) - length;
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ } else if (decimal_point >= length) {
+ // "decimal_rep0000.00000" or "decimal_rep.0000"
+ result_builder->AddSubstring(decimal_digits, length);
+ result_builder->AddPadding('0', decimal_point - length);
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', digits_after_point);
+ }
+ } else {
+ // "decima.l_rep000"
+ ASSERT(digits_after_point > 0);
+ result_builder->AddSubstring(decimal_digits, decimal_point);
+ result_builder->AddCharacter('.');
+ ASSERT(length - decimal_point <= digits_after_point);
+ result_builder->AddSubstring(&decimal_digits[decimal_point],
+ length - decimal_point);
+ int remaining_digits = digits_after_point - (length - decimal_point);
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ if (digits_after_point == 0) {
+ if ((flags_ & EMIT_TRAILING_DECIMAL_POINT) != 0) {
+ result_builder->AddCharacter('.');
+ }
+ if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) {
+ result_builder->AddCharacter('0');
+ }
+ }
+}
+
+
+bool DoubleToStringConverter::ToShortestIeeeNumber(
+ double value,
+ StringBuilder* result_builder,
+ DoubleToStringConverter::DtoaMode mode) const {
+ ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE);
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ int decimal_point;
+ bool sign;
+ const int kDecimalRepCapacity = kBase10MaximalLength + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, mode, 0, decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = (flags_ & UNIQUE_ZERO) != 0;
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ if ((decimal_in_shortest_low_ <= exponent) &&
+ (exponent < decimal_in_shortest_high_)) {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length,
+ decimal_point,
+ Max(0, decimal_rep_length - decimal_point),
+ result_builder);
+ } else {
+ CreateExponentialRepresentation(decimal_rep, decimal_rep_length, exponent,
+ result_builder);
+ }
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ ASSERT(kMaxFixedDigitsBeforePoint == 60);
+ const double kFirstNonFixed = 1e60;
+
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits > kMaxFixedDigitsAfterPoint) return false;
+ if (value >= kFirstNonFixed || value <= -kFirstNonFixed) return false;
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add space for the '\0' byte.
+ const int kDecimalRepCapacity =
+ kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+ DoubleToAscii(value, FIXED, requested_digits,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ requested_digits, result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToExponential(
+ double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits < -1) return false;
+ if (requested_digits > kMaxExponentialDigits) return false;
+
+ int decimal_point;
+ bool sign;
+ // Add space for digit before the decimal point and the '\0' character.
+ const int kDecimalRepCapacity = kMaxExponentialDigits + 2;
+ ASSERT(kDecimalRepCapacity > kBase10MaximalLength);
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ if (requested_digits == -1) {
+ DoubleToAscii(value, SHORTEST, 0,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ } else {
+ DoubleToAscii(value, PRECISION, requested_digits + 1,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ ASSERT(decimal_rep_length <= requested_digits + 1);
+
+ for (int i = decimal_rep_length; i < requested_digits + 1; ++i) {
+ decimal_rep[i] = '0';
+ }
+ decimal_rep_length = requested_digits + 1;
+ }
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ CreateExponentialRepresentation(decimal_rep,
+ decimal_rep_length,
+ exponent,
+ result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (precision < kMinPrecisionDigits || precision > kMaxPrecisionDigits) {
+ return false;
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add one for the terminating null character.
+ const int kDecimalRepCapacity = kMaxPrecisionDigits + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, PRECISION, precision,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ ASSERT(decimal_rep_length <= precision);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ // The exponent if we print the number as x.xxeyyy. That is with the
+ // decimal point after the first digit.
+ int exponent = decimal_point - 1;
+
+ int extra_zero = ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) ? 1 : 0;
+ if ((-decimal_point + 1 > max_leading_padding_zeroes_in_precision_mode_) ||
+ (decimal_point - precision + extra_zero >
+ max_trailing_padding_zeroes_in_precision_mode_)) {
+ // Fill buffer to contain 'precision' digits.
+ // Usually the buffer is already at the correct length, but 'DoubleToAscii'
+ // is allowed to return less characters.
+ for (int i = decimal_rep_length; i < precision; ++i) {
+ decimal_rep[i] = '0';
+ }
+
+ CreateExponentialRepresentation(decimal_rep,
+ precision,
+ exponent,
+ result_builder);
+ } else {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ Max(0, precision - decimal_point),
+ result_builder);
+ }
+ return true;
+}
+
+
+static BignumDtoaMode DtoaToBignumDtoaMode(
+ DoubleToStringConverter::DtoaMode dtoa_mode) {
+ switch (dtoa_mode) {
+ case DoubleToStringConverter::SHORTEST: return BIGNUM_DTOA_SHORTEST;
+ case DoubleToStringConverter::SHORTEST_SINGLE:
+ return BIGNUM_DTOA_SHORTEST_SINGLE;
+ case DoubleToStringConverter::FIXED: return BIGNUM_DTOA_FIXED;
+ case DoubleToStringConverter::PRECISION: return BIGNUM_DTOA_PRECISION;
+ default:
+ UNREACHABLE();
+ return BIGNUM_DTOA_SHORTEST; // To silence compiler.
+ }
+}
+
+
+void DoubleToStringConverter::DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point) {
+ Vector<char> vector(buffer, buffer_length);
+ ASSERT(!Double(v).IsSpecial());
+ ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE || requested_digits >= 0);
+
+ if (Double(v).Sign() < 0) {
+ *sign = true;
+ v = -v;
+ } else {
+ *sign = false;
+ }
+
+ if (mode == PRECISION && requested_digits == 0) {
+ vector[0] = '\0';
+ *length = 0;
+ return;
+ }
+
+ if (v == 0) {
+ vector[0] = '0';
+ vector[1] = '\0';
+ *length = 1;
+ *point = 1;
+ return;
+ }
+
+ bool fast_worked;
+ switch (mode) {
+ case SHORTEST:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, vector, length, point);
+ break;
+ case SHORTEST_SINGLE:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST_SINGLE, 0,
+ vector, length, point);
+ break;
+ case FIXED:
+ fast_worked = FastFixedDtoa(v, requested_digits, vector, length, point);
+ break;
+ case PRECISION:
+ fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+ vector, length, point);
+ break;
+ default:
+ UNREACHABLE();
+ fast_worked = false;
+ }
+ if (fast_worked) return;
+
+ // If the fast dtoa didn't succeed use the slower bignum version.
+ BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
+ BignumDtoa(v, bignum_mode, requested_digits, vector, length, point);
+ vector[*length] = '\0';
+}
+
+
+// Consumes the given substring from the iterator.
+// Returns false, if the substring does not match.
+static bool ConsumeSubString(const char** current,
+ const char* end,
+ const char* substring) {
+ ASSERT(**current == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || **current != *substring) return false;
+ }
+ ++*current;
+ return true;
+}
+
+
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
+
+
+// Returns true if a nonspace found and false if the end has reached.
+static inline bool AdvanceToNonspace(const char** current, const char* end) {
+ while (*current != end) {
+ if (**current != ' ') return true;
+ ++*current;
+ }
+ return false;
+}
+
+
+static bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix)
+ || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+ || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool sign) {
+ return sign ? -0.0 : 0.0;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2>
+static double RadixStringToIeee(const char* current,
+ const char* end,
+ bool sign,
+ bool allow_trailing_junk,
+ double junk_string_value,
+ bool read_as_double,
+ const char** trailing_pointer) {
+ ASSERT(current != end);
+
+ const int kDoubleSize = Double::kSignificandSize;
+ const int kSingleSize = Single::kSignificandSize;
+ const int kSignificandSize = read_as_double? kDoubleSize: kSingleSize;
+
+ // Skip leading 0s.
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *trailing_pointer = end;
+ return SignedZero(sign);
+ }
+ }
+
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+
+ do {
+ int digit;
+ if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+ digit = static_cast<char>(*current) - '0';
+ } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+ digit = static_cast<char>(*current) - 'a' + 10;
+ } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+ digit = static_cast<char>(*current) - 'A' + 10;
+ } else {
+ if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
+ break;
+ } else {
+ return junk_string_value;
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> kSignificandSize);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent = overflow_bits_count;
+
+ bool zero_tail = true;
+ while (true) {
+ ++current;
+ if (current == end || !isDigit(*current, radix)) break;
+ zero_tail = zero_tail && *current == '0';
+ exponent += radix_log_2;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value;
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & ((int64_t)1 << kSignificandSize)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
+ }
+ ++current;
+ } while (current != end);
+
+ ASSERT(number < ((int64_t)1 << kSignificandSize));
+ ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ *trailing_pointer = current;
+
+ if (exponent == 0) {
+ if (sign) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
+ }
+
+ ASSERT(number != 0);
+ return Double(DiyFp(number, exponent)).value();
+}
+
+
+double StringToDoubleConverter::StringToIeee(
+ const char* input,
+ int length,
+ int* processed_characters_count,
+ bool read_as_double) {
+ const char* current = input;
+ const char* end = input + length;
+
+ *processed_characters_count = 0;
+
+ const bool allow_trailing_junk = (flags_ & ALLOW_TRAILING_JUNK) != 0;
+ const bool allow_leading_spaces = (flags_ & ALLOW_LEADING_SPACES) != 0;
+ const bool allow_trailing_spaces = (flags_ & ALLOW_TRAILING_SPACES) != 0;
+ const bool allow_spaces_after_sign = (flags_ & ALLOW_SPACES_AFTER_SIGN) != 0;
+
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (current == end) return empty_string_value_;
+
+ if (allow_leading_spaces || allow_trailing_spaces) {
+ if (!AdvanceToNonspace(&current, end)) {
+ *processed_characters_count = current - input;
+ return empty_string_value_;
+ }
+ if (!allow_leading_spaces && (input != current)) {
+ // No leading spaces allowed, but AdvanceToNonspace moved forward.
+ return junk_string_value_;
+ }
+ }
+
+ // The longest form of simplified number is: "-<significant digits>.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ bool sign = false;
+
+ if (*current == '+' || *current == '-') {
+ sign = (*current == '-');
+ ++current;
+ const char* next_non_space = current;
+ // Skip following spaces (if allowed).
+ if (!AdvanceToNonspace(&next_non_space, end)) return junk_string_value_;
+ if (!allow_spaces_after_sign && (current != next_non_space)) {
+ return junk_string_value_;
+ }
+ current = next_non_space;
+ }
+
+ if (infinity_symbol_ != NULL) {
+ if (*current == infinity_symbol_[0]) {
+ if (!ConsumeSubString(&current, end, infinity_symbol_)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ ASSERT(buffer_pos == 0);
+ *processed_characters_count = current - input;
+ return sign ? -Double::Infinity() : Double::Infinity();
+ }
+ }
+
+ if (nan_symbol_ != NULL) {
+ if (*current == nan_symbol_[0]) {
+ if (!ConsumeSubString(&current, end, nan_symbol_)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ ASSERT(buffer_pos == 0);
+ *processed_characters_count = current - input;
+ return sign ? -Double::NaN() : Double::NaN();
+ }
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if ((flags_ & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+ ++current;
+ if (current == end || !isDigit(*current, 16)) {
+ return junk_string_value_; // "0x".
+ }
+
+ const char* tail_pointer = NULL;
+ double result = RadixStringToIeee<4>(current,
+ end,
+ sign,
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &tail_pointer);
+ if (tail_pointer != NULL) {
+ if (allow_trailing_spaces) AdvanceToNonspace(&tail_pointer, end);
+ *processed_characters_count = tail_pointer - input;
+ }
+ return result;
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+ }
+ }
+
+ bool octal = leading_zero && (flags_ & ALLOW_OCTALS) != 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+
+ ++current;
+ if (current == end) {
+ if (significant_digits == 0 && !leading_zero) {
+ return junk_string_value_;
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is a fractional part.
+ // We don't emit a '.', but adjust the exponent instead.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ exponent--;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return junk_string_value_;
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ char sign = '+';
+ if (*current == '+' || *current == '-') {
+ sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10
+ && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (sign == '-' ? -num : num);
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+ if (allow_trailing_spaces) {
+ AdvanceToNonspace(&current, end);
+ }
+
+ parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ double result;
+ const char* tail_pointer = NULL;
+ result = RadixStringToIeee<3>(buffer,
+ buffer + buffer_pos,
+ sign,
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &tail_pointer);
+ ASSERT(tail_pointer != NULL);
+ *processed_characters_count = current - input;
+ return result;
+ }
+
+ if (nonzero_digit_dropped) {
+ buffer[buffer_pos++] = '1';
+ exponent--;
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ double converted;
+ if (read_as_double) {
+ converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
+ } else {
+ converted = Strtof(Vector<const char>(buffer, buffer_pos), exponent);
+ }
+ *processed_characters_count = current - input;
+ return sign? -converted: converted;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/double-conversion.h b/src/3rdparty/double-conversion/double-conversion.h
new file mode 100644
index 0000000000..f98edae75a
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.h
@@ -0,0 +1,536 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+class DoubleToStringConverter {
+ public:
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
+ // function returns false.
+ static const int kMaxFixedDigitsBeforePoint = 60;
+ static const int kMaxFixedDigitsAfterPoint = 60;
+
+ // When calling ToExponential with a requested_digits
+ // parameter > kMaxExponentialDigits then the function returns false.
+ static const int kMaxExponentialDigits = 120;
+
+ // When calling ToPrecision with a requested_digits
+ // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
+ // then the function returns false.
+ static const int kMinPrecisionDigits = 1;
+ static const int kMaxPrecisionDigits = 120;
+
+ enum Flags {
+ NO_FLAGS = 0,
+ EMIT_POSITIVE_EXPONENT_SIGN = 1,
+ EMIT_TRAILING_DECIMAL_POINT = 2,
+ EMIT_TRAILING_ZERO_AFTER_POINT = 4,
+ UNIQUE_ZERO = 8
+ };
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent
+ // form, emits a '+' for positive exponents. Example: 1.2e+2.
+ // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is
+ // converted into decimal format then a trailing decimal point is appended.
+ // Example: 2345.0 is converted to "2345.".
+ // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point
+ // emits a trailing '0'-character. This flag requires the
+ // EXMIT_TRAILING_DECIMAL_POINT flag.
+ // Example: 2345.0 is converted to "2345.0".
+ // - UNIQUE_ZERO: "-0.0" is converted to "0.0".
+ //
+ // Infinity symbol and nan_symbol provide the string representation for these
+ // special values. If the string is NULL and the special value is encountered
+ // then the conversion functions return false.
+ //
+ // The exponent_character is used in exponential representations. It is
+ // usually 'e' or 'E'.
+ //
+ // When converting to the shortest representation the converter will
+ // represent input numbers in decimal format if they are in the interval
+ // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[
+ // (lower boundary included, greater boundary excluded).
+ // Example: with decimal_in_shortest_low = -6 and
+ // decimal_in_shortest_high = 21:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // When converting to precision mode the converter may add
+ // max_leading_padding_zeroes before returning the number in exponential
+ // format.
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarily the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ DoubleToStringConverter(int flags,
+ const char* infinity_symbol,
+ const char* nan_symbol,
+ char exponent_character,
+ int decimal_in_shortest_low,
+ int decimal_in_shortest_high,
+ int max_leading_padding_zeroes_in_precision_mode,
+ int max_trailing_padding_zeroes_in_precision_mode)
+ : flags_(flags),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol),
+ exponent_character_(exponent_character),
+ decimal_in_shortest_low_(decimal_in_shortest_low),
+ decimal_in_shortest_high_(decimal_in_shortest_high),
+ max_leading_padding_zeroes_in_precision_mode_(
+ max_leading_padding_zeroes_in_precision_mode),
+ max_trailing_padding_zeroes_in_precision_mode_(
+ max_trailing_padding_zeroes_in_precision_mode) {
+ // When 'trailing zero after the point' is set, then 'trailing point'
+ // must be set too.
+ ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) ||
+ !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0));
+ }
+
+ // Returns a converter following the EcmaScript specification.
+ static const DoubleToStringConverter& EcmaScriptConverter();
+
+ // Computes the shortest string of digits that correctly represent the input
+ // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
+ // (see constructor) it then either returns a decimal representation, or an
+ // exponential representation.
+ // Example with decimal_in_shortest_low = -6,
+ // decimal_in_shortest_high = 21,
+ // EMIT_POSITIVE_EXPONENT_SIGN activated, and
+ // EMIT_TRAILING_DECIMAL_POINT deactived:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // Note: the conversion may round the output if the returned string
+ // is accurate enough to uniquely identify the input-number.
+ // For example the most precise representation of the double 9e59 equals
+ // "899999999999999918767229449717619953810131273674690656206848", but
+ // the converter will return the shorter (but still correct) "9e59".
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except when the input value is special and no infinity_symbol or
+ // nan_symbol has been given to the constructor.
+ bool ToShortest(double value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST);
+ }
+
+ // Same as ToShortest, but for single-precision floats.
+ bool ToShortestSingle(float value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE);
+ }
+
+
+ // Computes a decimal representation with a fixed number of digits after the
+ // decimal point. The last emitted digit is rounded.
+ //
+ // Examples:
+ // ToFixed(3.12, 1) -> "3.1"
+ // ToFixed(3.1415, 3) -> "3.142"
+ // ToFixed(1234.56789, 4) -> "1234.5679"
+ // ToFixed(1.23, 5) -> "1.23000"
+ // ToFixed(0.1, 4) -> "0.1000"
+ // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00"
+ // ToFixed(0.1, 30) -> "0.100000000000000005551115123126"
+ // ToFixed(0.1, 17) -> "0.10000000000000001"
+ //
+ // If requested_digits equals 0, then the tail of the result depends on
+ // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples, for requested_digits == 0,
+ // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be
+ // - false and false: then 123.45 -> 123
+ // 0.678 -> 1
+ // - true and false: then 123.45 -> 123.
+ // 0.678 -> 1.
+ // - true and true: then 123.45 -> 123.0
+ // 0.678 -> 1.0
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'value' > 10^kMaxFixedDigitsBeforePoint, or
+ // - 'requested_digits' > kMaxFixedDigitsAfterPoint.
+ // The last two conditions imply that the result will never contain more than
+ // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
+ // (one additional character for the sign, and one for the decimal point).
+ bool ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes a representation in exponential format with requested_digits
+ // after the decimal point. The last emitted digit is rounded.
+ // If requested_digits equals -1, then the shortest exponential representation
+ // is computed.
+ //
+ // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and
+ // exponent_character set to 'e'.
+ // ToExponential(3.12, 1) -> "3.1e0"
+ // ToExponential(5.0, 3) -> "5.000e0"
+ // ToExponential(0.001, 2) -> "1.00e-3"
+ // ToExponential(3.1415, -1) -> "3.1415e0"
+ // ToExponential(3.1415, 4) -> "3.1415e0"
+ // ToExponential(3.1415, 3) -> "3.142e0"
+ // ToExponential(123456789000000, 3) -> "1.235e14"
+ // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30"
+ // ToExponential(1000000000000000019884624838656.0, 32) ->
+ // "1.00000000000000001988462483865600e30"
+ // ToExponential(1234, 0) -> "1e3"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'requested_digits' > kMaxExponentialDigits.
+ // The last condition implies that the result will never contain more than
+ // kMaxExponentialDigits + 8 characters (the sign, the digit before the
+ // decimal point, the decimal point, the exponent character, the
+ // exponent's sign, and at most 3 exponent digits).
+ bool ToExponential(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes 'precision' leading digits of the given 'value' and returns them
+ // either in exponential or decimal format, depending on
+ // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
+ // constructor).
+ // The last computed digit is rounded.
+ //
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarily the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no
+ // EMIT_TRAILING_ZERO_AFTER_POINT:
+ // ToPrecision(123450.0, 6) -> "123450"
+ // ToPrecision(123450.0, 5) -> "123450"
+ // ToPrecision(123450.0, 4) -> "123500"
+ // ToPrecision(123450.0, 3) -> "123000"
+ // ToPrecision(123450.0, 2) -> "1.2e5"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - precision < kMinPericisionDigits
+ // - precision > kMaxPrecisionDigits
+ // The last condition implies that the result will never contain more than
+ // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
+ // exponent character, the exponent's sign, and at most 3 exponent digits).
+ bool ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const;
+
+ enum DtoaMode {
+ // Produce the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate
+ // but correct) 0.3.
+ SHORTEST,
+ // Same as SHORTEST, but for single-precision floats.
+ SHORTEST_SINGLE,
+ // Produce a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ FIXED,
+ // Fixed number of digits (independent of the decimal point).
+ PRECISION
+ };
+
+ // The maximal number of digits that are needed to emit a double in base 10.
+ // A higher precision can be achieved by using more digits, but the shortest
+ // accurate representation of any double will never use more digits than
+ // kBase10MaximalLength.
+ // Note that DoubleToAscii null-terminates its input. So the given buffer
+ // should be at least kBase10MaximalLength + 1 characters long.
+ static const int kBase10MaximalLength = 17;
+
+ // Converts the given double 'v' to ascii. 'v' must not be NaN, +Infinity, or
+ // -Infinity. In SHORTEST_SINGLE-mode this restriction also applies to 'v'
+ // after it has been casted to a single-precision float. That is, in this
+ // mode static_cast<float>(v) must not be NaN, +Infinity or -Infinity.
+ //
+ // The result should be interpreted as buffer * 10^(point-length).
+ //
+ // The output depends on the given mode:
+ // - SHORTEST: produce the least amount of digits for which the internal
+ // identity requirement is still satisfied. If the digits are printed
+ // (together with the correct exponent) then reading this number will give
+ // 'v' again. The buffer will choose the representation that is closest to
+ // 'v'. If there are two at the same distance, than the one farther away
+ // from 0 is chosen (halfway cases - ending with 5 - are rounded up).
+ // In this mode the 'requested_digits' parameter is ignored.
+ // - SHORTEST_SINGLE: same as SHORTEST but with single-precision.
+ // - FIXED: produces digits necessary to print a given number with
+ // 'requested_digits' digits after the decimal point. The produced digits
+ // might be too short in which case the caller has to fill the remainder
+ // with '0's.
+ // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+ // Halfway cases are rounded towards +/-Infinity (away from 0). The call
+ // toFixed(0.15, 2) thus returns buffer="2", point=0.
+ // The returned buffer may contain digits that would be truncated from the
+ // shortest representation of the input.
+ // - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+ // Even though the length of produced digits usually equals
+ // 'requested_digits', the function is allowed to return fewer digits, in
+ // which case the caller has to fill the missing digits with '0's.
+ // Halfway cases are again rounded away from 0.
+ // DoubleToAscii expects the given buffer to be big enough to hold all
+ // digits and a terminating null-character. In SHORTEST-mode it expects a
+ // buffer of at least kBase10MaximalLength + 1. In all other modes the
+ // requested_digits parameter and the padding-zeroes limit the size of the
+ // output. Don't forget the decimal point, the exponent character and the
+ // terminating null-character when computing the maximal output size.
+ // The given length is only used in debug mode to ensure the buffer is big
+ // enough.
+ static void DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point);
+
+ private:
+ // Implementation for ToShortest and ToShortestSingle.
+ bool ToShortestIeeeNumber(double value,
+ StringBuilder* result_builder,
+ DtoaMode mode) const;
+
+ // If the value is a special value (NaN or Infinity) constructs the
+ // corresponding string using the configured infinity/nan-symbol.
+ // If either of them is NULL or the value is not special then the
+ // function returns false.
+ bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
+ // Constructs an exponential representation (i.e. 1.234e56).
+ // The given exponent assumes a decimal point after the first decimal digit.
+ void CreateExponentialRepresentation(const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const;
+ // Creates a decimal representation (i.e 1234.5678).
+ void CreateDecimalRepresentation(const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const;
+
+ const int flags_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+ const char exponent_character_;
+ const int decimal_in_shortest_low_;
+ const int decimal_in_shortest_high_;
+ const int max_leading_padding_zeroes_in_precision_mode_;
+ const int max_trailing_padding_zeroes_in_precision_mode_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter);
+};
+
+
+class StringToDoubleConverter {
+ public:
+ // Enumeration for allowing octals and ignoring junk when converting
+ // strings to numbers.
+ enum Flags {
+ NO_FLAGS = 0,
+ ALLOW_HEX = 1,
+ ALLOW_OCTALS = 2,
+ ALLOW_TRAILING_JUNK = 4,
+ ALLOW_LEADING_SPACES = 8,
+ ALLOW_TRAILING_SPACES = 16,
+ ALLOW_SPACES_AFTER_SIGN = 32
+ };
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers.
+ // Ex: StringToDouble("0x1234") -> 4660.0
+ // In StringToDouble("0x1234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK,
+ // the string will not be parsed as "0" followed by junk.
+ //
+ // - ALLOW_OCTALS: recognizes the prefix "0" for octals:
+ // If a sequence of octal digits starts with '0', then the number is
+ // read as octal integer. Octal numbers may only be integers.
+ // Ex: StringToDouble("01234") -> 668.0
+ // StringToDouble("012349") -> 12349.0 // Not a sequence of octal
+ // // digits.
+ // In StringToDouble("01234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // In StringToDouble("01234e56") the characters "e56" are trailing
+ // junk, too.
+ // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of
+ // a double literal.
+ // - ALLOW_LEADING_SPACES: skip over leading spaces.
+ // - ALLOW_TRAILING_SPACES: ignore trailing spaces.
+ // - ALLOW_SPACES_AFTER_SIGN: ignore spaces after the sign.
+ // Ex: StringToDouble("- 123.2") -> -123.2.
+ // StringToDouble("+ 123.2") -> 123.2
+ //
+ // empty_string_value is returned when an empty string is given as input.
+ // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string
+ // containing only spaces is converted to the 'empty_string_value', too.
+ //
+ // junk_string_value is returned when
+ // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not
+ // part of a double-literal) is found.
+ // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a
+ // double literal.
+ //
+ // infinity_symbol and nan_symbol are strings that are used to detect
+ // inputs that represent infinity and NaN. They can be null, in which case
+ // they are ignored.
+ // The conversion routine first reads any possible signs. Then it compares the
+ // following character of the input-string with the first character of
+ // the infinity, and nan-symbol. If either matches, the function assumes, that
+ // a match has been found, and expects the following input characters to match
+ // the remaining characters of the special-value symbol.
+ // This means that the following restrictions apply to special-value symbols:
+ // - they must not start with signs ('+', or '-'),
+ // - they must not have the same first character.
+ // - they must not start with digits.
+ //
+ // Examples:
+ // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = "infinity",
+ // nan_symbol = "nan":
+ // StringToDouble("0x1234") -> 4660.0.
+ // StringToDouble("0x1234K") -> 4660.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> NaN // junk_string_value.
+ // StringToDouble(" 1") -> NaN // junk_string_value.
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("-123.45") -> -123.45.
+ // StringToDouble("--123.45") -> NaN // junk_string_value.
+ // StringToDouble("123e45") -> 123e45.
+ // StringToDouble("123E45") -> 123e45.
+ // StringToDouble("123e+45") -> 123e45.
+ // StringToDouble("123E-45") -> 123e-45.
+ // StringToDouble("123e") -> 123.0 // trailing junk ignored.
+ // StringToDouble("123e-") -> 123.0 // trailing junk ignored.
+ // StringToDouble("+NaN") -> NaN // NaN string literal.
+ // StringToDouble("-infinity") -> -inf. // infinity literal.
+ // StringToDouble("Infinity") -> NaN // junk_string_value.
+ //
+ // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = NULL,
+ // nan_symbol = NULL:
+ // StringToDouble("0x1234") -> NaN // junk_string_value.
+ // StringToDouble("01234") -> 668.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> 0.0 // empty_string_value.
+ // StringToDouble(" 1") -> 1.0
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("0123e45") -> NaN // junk_string_value.
+ // StringToDouble("01239E45") -> 1239e45.
+ // StringToDouble("-infinity") -> NaN // junk_string_value.
+ // StringToDouble("NaN") -> NaN // junk_string_value.
+ StringToDoubleConverter(int flags,
+ double empty_string_value,
+ double junk_string_value,
+ const char* infinity_symbol,
+ const char* nan_symbol)
+ : flags_(flags),
+ empty_string_value_(empty_string_value),
+ junk_string_value_(junk_string_value),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol) {
+ }
+
+ // Performs the conversion.
+ // The output parameter 'processed_characters_count' is set to the number
+ // of characters that have been processed to read the number.
+ // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
+ // in the 'processed_characters_count'. Trailing junk is never included.
+ double StringToDouble(const char* buffer,
+ int length,
+ int* processed_characters_count) {
+ return StringToIeee(buffer, length, processed_characters_count, true);
+ }
+
+ // Same as StringToDouble but reads a float.
+ // Note that this is not equivalent to static_cast<float>(StringToDouble(...))
+ // due to potential double-rounding.
+ float StringToFloat(const char* buffer,
+ int length,
+ int* processed_characters_count) {
+ return static_cast<float>(StringToIeee(buffer, length,
+ processed_characters_count, false));
+ }
+
+ private:
+ const int flags_;
+ const double empty_string_value_;
+ const double junk_string_value_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+
+ double StringToIeee(const char* buffer,
+ int length,
+ int* processed_characters_count,
+ bool read_as_double);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
diff --git a/src/3rdparty/double-conversion/double-conversion.pri b/src/3rdparty/double-conversion/double-conversion.pri
new file mode 100644
index 0000000000..4ad5f9f7a7
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.pri
@@ -0,0 +1,4 @@
+INCLUDEPATH += $$PWD
+VPATH += $$PWD
+SOURCES += $$PWD/*.cc
+HEADERS += $$PWD/*.h
diff --git a/src/3rdparty/double-conversion/fast-dtoa.cc b/src/3rdparty/double-conversion/fast-dtoa.cc
new file mode 100644
index 0000000000..1a0f823509
--- /dev/null
+++ b/src/3rdparty/double-conversion/fast-dtoa.cc
@@ -0,0 +1,664 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "fast-dtoa.h"
+
+#include "cached-powers.h"
+#include "diy-fp.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// The minimal and maximal target exponent define the range of w's binary
+// exponent, where 'w' is the result of multiplying the input by a cached power
+// of ten.
+//
+// A different range might be chosen on a different platform, to optimize digit
+// generation, but a smaller range requires more powers of ten to be cached.
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
+
+
+// Adjusts the last digit of the generated number, and screens out generated
+// solutions that may be inaccurate. A solution may be inaccurate if it is
+// outside the safe interval, or if we cannot prove that it is closer to the
+// input than a neighboring representation of the same length.
+//
+// Input: * buffer containing the digits of too_high / 10^kappa
+// * the buffer's length
+// * distance_too_high_w == (too_high - w).f() * unit
+// * unsafe_interval == (too_high - too_low).f() * unit
+// * rest = (too_high - buffer * 10^kappa).f() * unit
+// * ten_kappa = 10^kappa * unit
+// * unit = the common multiplier
+// Output: returns true if the buffer is guaranteed to contain the closest
+// representable number to the input.
+// Modifies the generated digits in the buffer to approach (round towards) w.
+static bool RoundWeed(Vector<char> buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
+ uint64_t small_distance = distance_too_high_w - unit;
+ uint64_t big_distance = distance_too_high_w + unit;
+ // Let w_low = too_high - big_distance, and
+ // w_high = too_high - small_distance.
+ // Note: w_low < w < w_high
+ //
+ // The real w (* unit) must lie somewhere inside the interval
+ // ]w_low; w_high[ (often written as "(w_low; w_high)")
+
+ // Basically the buffer currently contains a number in the unsafe interval
+ // ]too_low; too_high[ with too_low < w < too_high
+ //
+ // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ // ^v 1 unit ^ ^ ^ ^
+ // boundary_high --------------------- . . . .
+ // ^v 1 unit . . . .
+ // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
+ // . . ^ . .
+ // . big_distance . . .
+ // . . . . rest
+ // small_distance . . . .
+ // v . . . .
+ // w_high - - - - - - - - - - - - - - - - - - . . . .
+ // ^v 1 unit . . . .
+ // w ---------------------------------------- . . . .
+ // ^v 1 unit v . . .
+ // w_low - - - - - - - - - - - - - - - - - - - - - . . .
+ // . . v
+ // buffer --------------------------------------------------+-------+--------
+ // . .
+ // safe_interval .
+ // v .
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
+ // ^v 1 unit .
+ // boundary_low ------------------------- unsafe_interval
+ // ^v 1 unit v
+ // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ //
+ //
+ // Note that the value of buffer could lie anywhere inside the range too_low
+ // to too_high.
+ //
+ // boundary_low, boundary_high and w are approximations of the real boundaries
+ // and v (the input number). They are guaranteed to be precise up to one unit.
+ // In fact the error is guaranteed to be strictly less than one unit.
+ //
+ // Anything that lies outside the unsafe interval is guaranteed not to round
+ // to v when read again.
+ // Anything that lies inside the safe interval is guaranteed to round to v
+ // when read again.
+ // If the number inside the buffer lies inside the unsafe interval but not
+ // inside the safe interval then we simply do not know and bail out (returning
+ // false).
+ //
+ // Similarly we have to take into account the imprecision of 'w' when finding
+ // the closest representation of 'w'. If we have two potential
+ // representations, and one is closer to both w_low and w_high, then we know
+ // it is closer to the actual value v.
+ //
+ // By generating the digits of too_high we got the largest (closest to
+ // too_high) buffer that is still in the unsafe interval. In the case where
+ // w_high < buffer < too_high we try to decrement the buffer.
+ // This way the buffer approaches (rounds towards) w.
+ // There are 3 conditions that stop the decrementation process:
+ // 1) the buffer is already below w_high
+ // 2) decrementing the buffer would make it leave the unsafe interval
+ // 3) decrementing the buffer would yield a number below w_high and farther
+ // away than the current number. In other words:
+ // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
+ // Instead of using the buffer directly we use its distance to too_high.
+ // Conceptually rest ~= too_high - buffer
+ // We need to do the following tests in this order to avoid over- and
+ // underflows.
+ ASSERT(rest <= unsafe_interval);
+ while (rest < small_distance && // Negated condition 1
+ unsafe_interval - rest >= ten_kappa && // Negated condition 2
+ (rest + ten_kappa < small_distance || // buffer{-1} > w_high
+ small_distance - rest >= rest + ten_kappa - small_distance)) {
+ buffer[length - 1]--;
+ rest += ten_kappa;
+ }
+
+ // We have approached w+ as much as possible. We now test if approaching w-
+ // would require changing the buffer. If yes, then we have two possible
+ // representations close to w, but we cannot decide which one is closer.
+ if (rest < big_distance &&
+ unsafe_interval - rest >= ten_kappa &&
+ (rest + ten_kappa < big_distance ||
+ big_distance - rest > rest + ten_kappa - big_distance)) {
+ return false;
+ }
+
+ // Weeding test.
+ // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
+ // Since too_low = too_high - unsafe_interval this is equivalent to
+ // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
+ // Conceptually we have: rest ~= too_high - buffer
+ return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
+}
+
+
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+ int length,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit,
+ int* kappa) {
+ ASSERT(rest < ten_kappa);
+ // The following tests are done in a specific order to avoid overflows. They
+ // will work correctly with any uint64 values of rest < ten_kappa and unit.
+ //
+ // If the unit is too big, then we don't know which way to round. For example
+ // a unit of 50 means that the real number lies within rest +/- 50. If
+ // 10^kappa == 40 then there is no way to tell which way to round.
+ if (unit >= ten_kappa) return false;
+ // Even if unit is just half the size of 10^kappa we are already completely
+ // lost. (And after the previous test we know that the expression will not
+ // over/underflow.)
+ if (ten_kappa - unit <= unit) return false;
+ // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+ if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+ return true;
+ }
+ // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+ if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+ // Increment the last digit recursively until we find a non '9' digit.
+ buffer[length - 1]++;
+ for (int i = length - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+ // exception of the first digit all digits are now '0'. Simply switch the
+ // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+ // the power (the kappa) is increased.
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*kappa) += 1;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Returns the biggest power of ten that is less than or equal to the given
+// number. We furthermore receive the maximum number of bits 'number' has.
+//
+// Returns power == 10^(exponent_plus_one-1) such that
+// power <= number < power * 10.
+// If number_bits == 0 then 0^(0-1) is returned.
+// The number of bits must be <= 32.
+// Precondition: number < (1 << (number_bits + 1)).
+
+// Inspired by the method for finding an integer log base 10 from here:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+static unsigned int const kSmallPowersOfTen[] =
+ {0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000,
+ 1000000000};
+
+static void BiggestPowerTen(uint32_t number,
+ int number_bits,
+ uint32_t* power,
+ int* exponent_plus_one) {
+ ASSERT(number < (1u << (number_bits + 1)));
+ // 1233/4096 is approximately 1/lg(10).
+ int exponent_plus_one_guess = ((number_bits + 1) * 1233 >> 12);
+ // We increment to skip over the first entry in the kPowersOf10 table.
+ // Note: kPowersOf10[i] == 10^(i-1).
+ exponent_plus_one_guess++;
+ // We don't have any guarantees that 2^number_bits <= number.
+ // TODO(floitsch): can we change the 'while' into an 'if'? We definitely see
+ // number < (2^number_bits - 1), but I haven't encountered
+ // number < (2^number_bits - 2) yet.
+ while (number < kSmallPowersOfTen[exponent_plus_one_guess]) {
+ exponent_plus_one_guess--;
+ }
+ *power = kSmallPowersOfTen[exponent_plus_one_guess];
+ *exponent_plus_one = exponent_plus_one_guess;
+}
+
+// Generates the digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * low, w and high are correct up to 1 ulp (unit in the last place). That
+// is, their error must be less than a unit of their last digits.
+// * low.e() == w.e() == high.e()
+// * low < w < high, and taking into account their error: low~ <= high~
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but len contains the number of digits.
+// * buffer contains the shortest possible decimal digit-sequence
+// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
+// correct values of low and high (without their error).
+// * if more than one decimal representation gives the minimal number of
+// decimal digits then the one closest to W (where W is the correct value
+// of w) is chosen.
+// Remark: this procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely (~0.5%).
+//
+// Say, for the sake of example, that
+// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w's value can be computed by w.f() * 2^w.e()
+// We can obtain w's integral digits by simply shifting w.f() by -w.e().
+// -> w's integral part is 0x1234
+// w's fractional part is therefore 0x567890abcdef.
+// Printing w's integral part is easy (simply print 0x1234 in decimal).
+// In order to print its fraction we repeatedly multiply the fraction by 10 and
+// get each digit. Example the first digit after the point would be computed by
+// (0x567890abcdef * 10) >> 48. -> 3
+// The whole thing becomes slightly more complicated because we want to stop
+// once we have enough digits. That is, once the digits inside the buffer
+// represent 'w' we can stop. Everything inside the interval low - high
+// represents w. However we have to pay attention to low, high and w's
+// imprecision.
+static bool DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ ASSERT(low.e() == w.e() && w.e() == high.e());
+ ASSERT(low.f() + 1 <= high.f() - 1);
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ // low, w and high are imprecise, but by less than one ulp (unit in the last
+ // place).
+ // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
+ // the new numbers are outside of the interval we want the final
+ // representation to lie in.
+ // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
+ // numbers that are certain to lie in the interval. We will use this fact
+ // later on.
+ // We will now start by generating the digits within the uncertain
+ // interval. Later we will weed out representations that lie outside the safe
+ // interval and thus _might_ lie outside the correct interval.
+ uint64_t unit = 1;
+ DiyFp too_low = DiyFp(low.f() - unit, low.e());
+ DiyFp too_high = DiyFp(high.f() + unit, high.e());
+ // too_low and too_high are guaranteed to lie outside the interval we want the
+ // generated number in.
+ DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
+ // We now cut the input number into two parts: the integral digits and the
+ // fractionals. We will not write any decimal separator though, but adapt
+ // kappa instead.
+ // Reminder: we are currently computing the digits (stored inside the buffer)
+ // such that: too_low < buffer * 10^kappa < too_high
+ // We use too_high for the digit_generation and stop as soon as possible.
+ // If we stop early we effectively round down.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = too_high.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+ // Loop invariant: buffer = too_high / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than integrals.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
+ // Reminder: unsafe_interval.e() == one.e()
+ if (rest < unsafe_interval.f()) {
+ // Rounding down (by not emitting the remaining digits) yields a number
+ // that lies within the unsafe interval.
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+ unsafe_interval.f(), rest,
+ static_cast<uint64_t>(divisor) << -one.e(), unit);
+ }
+ divisor /= 10;
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (like the interval or 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (true) {
+ fractionals *= 10;
+ unit *= 10;
+ unsafe_interval.set_f(unsafe_interval.f() * 10);
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ if (fractionals < unsafe_interval.f()) {
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+ unsafe_interval.f(), fractionals, one.f(), unit);
+ }
+ }
+}
+
+
+
+// Generates (at most) requested_digits digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * w is correct up to 1 ulp (unit in the last place). That
+// is, its error must be strictly less than a unit of its last digit.
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but length contains the number of
+// digits.
+// * the representation in buffer is the most precise representation of
+// requested_digits digits.
+// * buffer contains at most requested_digits digits of w. If there are less
+// than requested_digits digits then some trailing '0's have been removed.
+// * kappa is such that
+// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely, but the failure-rate
+// increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ ASSERT(kMinimalTargetExponent >= -60);
+ ASSERT(kMaximalTargetExponent <= -32);
+ // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+ // also scale its error.
+ uint64_t w_error = 1;
+ // We cut the input number into two parts: the integral digits and the
+ // fractional digits. We don't emit any decimal separator, but adapt kappa
+ // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+ // increase kappa by 1.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = w.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+
+ // Loop invariant: buffer = w / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than 'integrals'.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ if (requested_digits == 0) break;
+ divisor /= 10;
+ }
+
+ if (requested_digits == 0) {
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ return RoundWeedCounted(buffer, *length, rest,
+ static_cast<uint64_t>(divisor) << -one.e(), w_error,
+ kappa);
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (the 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (requested_digits > 0 && fractionals > w_error) {
+ fractionals *= 10;
+ w_error *= 10;
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ }
+ if (requested_digits != 0) return false;
+ return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+ kappa);
+}
+
+
+// Provides a decimal representation of v.
+// Returns true if it succeeds, otherwise the result cannot be trusted.
+// There will be *length digits inside the buffer (not null-terminated).
+// If the function returns true then
+// v == (double) (buffer * 10^decimal_exponent).
+// The digits in the buffer are the shortest representation possible: no
+// 0.09999999999999999 instead of 0.1. The shorter representation will even be
+// chosen even if the longer one would be closer to v.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the closest will be
+// computed.
+static bool Grisu3(double v,
+ FastDtoaMode mode,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ // boundary_minus and boundary_plus are the boundaries between v and its
+ // closest floating-point neighbors. Any number strictly between
+ // boundary_minus and boundary_plus will round to v when convert to a double.
+ // Grisu3 will never output representations that lie exactly on a boundary.
+ DiyFp boundary_minus, boundary_plus;
+ if (mode == FAST_DTOA_SHORTEST) {
+ Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ } else {
+ ASSERT(mode == FAST_DTOA_SHORTEST_SINGLE);
+ float single_v = static_cast<float>(v);
+ Single(single_v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ }
+ ASSERT(boundary_plus.e() == w.e());
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+ ASSERT(scaled_w.e() ==
+ boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
+ // In theory it would be possible to avoid some recomputations by computing
+ // the difference between w and boundary_minus/plus (a power of 2) and to
+ // compute scaled_boundary_minus/plus by subtracting/adding from
+ // scaled_w. However the code becomes much less readable and the speed
+ // enhancements are not terriffic.
+ DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
+ DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
+
+ // DigitGen will generate the digits of scaled_w. Therefore we have
+ // v == (double) (scaled_w * 10^-mk).
+ // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
+ // integer than it will be updated. For instance if scaled_w == 1.23 then
+ // the buffer will be filled with "123" und the decimal_exponent will be
+ // decreased by 2.
+ int kappa;
+ bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+ // We now have (double) (scaled_w * 10^-mk).
+ // DigitGen will generate the first requested_digits digits of scaled_w and
+ // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+ // will not always be exactly the same since DigitGenCounted only produces a
+ // limited number of digits.)
+ int kappa;
+ bool result = DigitGenCounted(scaled_w, requested_digits,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+bool FastDtoa(double v,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ ASSERT(v > 0);
+ ASSERT(!Double(v).IsSpecial());
+
+ bool result = false;
+ int decimal_exponent = 0;
+ switch (mode) {
+ case FAST_DTOA_SHORTEST:
+ case FAST_DTOA_SHORTEST_SINGLE:
+ result = Grisu3(v, mode, buffer, length, &decimal_exponent);
+ break;
+ case FAST_DTOA_PRECISION:
+ result = Grisu3Counted(v, requested_digits,
+ buffer, length, &decimal_exponent);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (result) {
+ *decimal_point = *length + decimal_exponent;
+ buffer[*length] = '\0';
+ }
+ return result;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/fast-dtoa.h b/src/3rdparty/double-conversion/fast-dtoa.h
new file mode 100644
index 0000000000..5f1e8eee5e
--- /dev/null
+++ b/src/3rdparty/double-conversion/fast-dtoa.h
@@ -0,0 +1,88 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_
+#define DOUBLE_CONVERSION_FAST_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum FastDtoaMode {
+ // Computes the shortest representation of the given input. The returned
+ // result will be the most accurate number of this length. Longer
+ // representations might be more accurate.
+ FAST_DTOA_SHORTEST,
+ // Same as FAST_DTOA_SHORTEST but for single-precision floats.
+ FAST_DTOA_SHORTEST_SINGLE,
+ // Computes a representation where the precision (number of digits) is
+ // given as input. The precision is independent of the decimal point.
+ FAST_DTOA_PRECISION
+};
+
+// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
+// include the terminating '\0' character.
+static const int kFastDtoaMaximalLength = 17;
+// Same for single-precision numbers.
+static const int kFastDtoaMaximalSingleLength = 9;
+
+// Provides a decimal representation of v.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+// * v must be a strictly positive finite double.
+//
+// Returns true if it succeeds, otherwise the result can not be trusted.
+// There will be *length digits inside the buffer followed by a null terminator.
+// If the function returns true and mode equals
+// - FAST_DTOA_SHORTEST, then
+// the parameter requested_digits is ignored.
+// The result satisfies
+// v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible. E.g.
+// if 0.099999999999 and 0.1 represent the same double then "1" is returned
+// with point = 0.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain
+// the one closest to v.
+// - FAST_DTOA_PRECISION, then
+// the buffer contains requested_digits digits.
+// the difference v - (buffer * 10^(point-length)) is closest to zero for
+// all possible representations of requested_digits digits.
+// If there are two values that are equally close, then FastDtoa returns
+// false.
+// For both modes the buffer must be large enough to hold the result.
+bool FastDtoa(double d,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FAST_DTOA_H_
diff --git a/src/3rdparty/double-conversion/fixed-dtoa.cc b/src/3rdparty/double-conversion/fixed-dtoa.cc
new file mode 100644
index 0000000000..d56b1449b2
--- /dev/null
+++ b/src/3rdparty/double-conversion/fixed-dtoa.cc
@@ -0,0 +1,402 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+
+#include "fixed-dtoa.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// Represents a 128bit type. This class should be replaced by a native type on
+// platforms that support 128bit integers.
+class UInt128 {
+ public:
+ UInt128() : high_bits_(0), low_bits_(0) { }
+ UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
+
+ void Multiply(uint32_t multiplicand) {
+ uint64_t accumulator;
+
+ accumulator = (low_bits_ & kMask32) * multiplicand;
+ uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
+ low_bits_ = (accumulator << 32) + part;
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
+ part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
+ high_bits_ = (accumulator << 32) + part;
+ ASSERT((accumulator >> 32) == 0);
+ }
+
+ void Shift(int shift_amount) {
+ ASSERT(-64 <= shift_amount && shift_amount <= 64);
+ if (shift_amount == 0) {
+ return;
+ } else if (shift_amount == -64) {
+ high_bits_ = low_bits_;
+ low_bits_ = 0;
+ } else if (shift_amount == 64) {
+ low_bits_ = high_bits_;
+ high_bits_ = 0;
+ } else if (shift_amount <= 0) {
+ high_bits_ <<= -shift_amount;
+ high_bits_ += low_bits_ >> (64 + shift_amount);
+ low_bits_ <<= -shift_amount;
+ } else {
+ low_bits_ >>= shift_amount;
+ low_bits_ += high_bits_ << (64 - shift_amount);
+ high_bits_ >>= shift_amount;
+ }
+ }
+
+ // Modifies *this to *this MOD (2^power).
+ // Returns *this DIV (2^power).
+ int DivModPowerOf2(int power) {
+ if (power >= 64) {
+ int result = static_cast<int>(high_bits_ >> (power - 64));
+ high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
+ return result;
+ } else {
+ uint64_t part_low = low_bits_ >> power;
+ uint64_t part_high = high_bits_ << (64 - power);
+ int result = static_cast<int>(part_low + part_high);
+ high_bits_ = 0;
+ low_bits_ -= part_low << power;
+ return result;
+ }
+ }
+
+ bool IsZero() const {
+ return high_bits_ == 0 && low_bits_ == 0;
+ }
+
+ int BitAt(int position) {
+ if (position >= 64) {
+ return static_cast<int>(high_bits_ >> (position - 64)) & 1;
+ } else {
+ return static_cast<int>(low_bits_ >> position) & 1;
+ }
+ }
+
+ private:
+ static const uint64_t kMask32 = 0xFFFFFFFF;
+ // Value == (high_bits_ << 64) + low_bits_
+ uint64_t high_bits_;
+ uint64_t low_bits_;
+};
+
+
+static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
+
+
+static void FillDigits32FixedLength(uint32_t number, int requested_length,
+ Vector<char> buffer, int* length) {
+ for (int i = requested_length - 1; i >= 0; --i) {
+ buffer[(*length) + i] = '0' + number % 10;
+ number /= 10;
+ }
+ *length += requested_length;
+}
+
+
+static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
+ int number_length = 0;
+ // We fill the digits in reverse order and exchange them afterwards.
+ while (number != 0) {
+ int digit = number % 10;
+ number /= 10;
+ buffer[(*length) + number_length] = '0' + digit;
+ number_length++;
+ }
+ // Exchange the digits.
+ int i = *length;
+ int j = *length + number_length - 1;
+ while (i < j) {
+ char tmp = buffer[i];
+ buffer[i] = buffer[j];
+ buffer[j] = tmp;
+ i++;
+ j--;
+ }
+ *length += number_length;
+}
+
+
+static void FillDigits64FixedLength(uint64_t number, int requested_length,
+ Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ FillDigits32FixedLength(part0, 3, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+}
+
+
+static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ if (part0 != 0) {
+ FillDigits32(part0, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else if (part1 != 0) {
+ FillDigits32(part1, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else {
+ FillDigits32(part2, buffer, length);
+ }
+}
+
+
+static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
+ // An empty buffer represents 0.
+ if (*length == 0) {
+ buffer[0] = '1';
+ *decimal_point = 1;
+ *length = 1;
+ return;
+ }
+ // Round the last digit until we either have a digit that was not '9' or until
+ // we reached the first digit.
+ buffer[(*length) - 1]++;
+ for (int i = (*length) - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) {
+ return;
+ }
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0' + 10, we would need to set it to '0' and add
+ // a '1' in front. However we reach the first digit only if all following
+ // digits had been '9' before rounding up. Now all trailing digits are '0' and
+ // we simply switch the first digit to '1' and update the decimal-point
+ // (indicating that the point is now one digit to the right).
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+}
+
+
+// The given fractionals number represents a fixed-point number with binary
+// point at bit (-exponent).
+// Preconditions:
+// -128 <= exponent <= 0.
+// 0 <= fractionals * 2^exponent < 1
+// The buffer holds the result.
+// The function will round its result. During the rounding-process digits not
+// generated by this function might be updated, and the decimal-point variable
+// might be updated. If this function generates the digits 99 and the buffer
+// already contained "199" (thus yielding a buffer of "19999") then a
+// rounding-up will change the contents of the buffer to "20000".
+static void FillFractionals(uint64_t fractionals, int exponent,
+ int fractional_count, Vector<char> buffer,
+ int* length, int* decimal_point) {
+ ASSERT(-128 <= exponent && exponent <= 0);
+ // 'fractionals' is a fixed-point number, with binary point at bit
+ // (-exponent). Inside the function the non-converted remainder of fractionals
+ // is a fixed-point number, with binary point at bit 'point'.
+ if (-exponent <= 64) {
+ // One 64 bit number is sufficient.
+ ASSERT(fractionals >> 56 == 0);
+ int point = -exponent;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals == 0) break;
+ // Instead of multiplying by 10 we multiply by 5 and adjust the point
+ // location. This way the fractionals variable will not overflow.
+ // Invariant at the beginning of the loop: fractionals < 2^point.
+ // Initially we have: point <= 64 and fractionals < 2^56
+ // After each iteration the point is decremented by one.
+ // Note that 5^3 = 125 < 128 = 2^7.
+ // Therefore three iterations of this loop will not overflow fractionals
+ // (even without the subtraction at the end of the loop body). At this
+ // time point will satisfy point <= 61 and therefore fractionals < 2^point
+ // and any further multiplication of fractionals by 5 will not overflow.
+ fractionals *= 5;
+ point--;
+ int digit = static_cast<int>(fractionals >> point);
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ fractionals -= static_cast<uint64_t>(digit) << point;
+ }
+ // If the first bit after the point is set we have to round up.
+ if (((fractionals >> (point - 1)) & 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ } else { // We need 128 bits.
+ ASSERT(64 < -exponent && -exponent <= 128);
+ UInt128 fractionals128 = UInt128(fractionals, 0);
+ fractionals128.Shift(-exponent - 64);
+ int point = 128;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals128.IsZero()) break;
+ // As before: instead of multiplying by 10 we multiply by 5 and adjust the
+ // point location.
+ // This multiplication will not overflow for the same reasons as before.
+ fractionals128.Multiply(5);
+ point--;
+ int digit = fractionals128.DivModPowerOf2(point);
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ }
+ if (fractionals128.BitAt(point - 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ }
+}
+
+
+// Removes leading and trailing zeros.
+// If leading zeros are removed then the decimal point position is adjusted.
+static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
+ while (*length > 0 && buffer[(*length) - 1] == '0') {
+ (*length)--;
+ }
+ int first_non_zero = 0;
+ while (first_non_zero < *length && buffer[first_non_zero] == '0') {
+ first_non_zero++;
+ }
+ if (first_non_zero != 0) {
+ for (int i = first_non_zero; i < *length; ++i) {
+ buffer[i - first_non_zero] = buffer[i];
+ }
+ *length -= first_non_zero;
+ *decimal_point -= first_non_zero;
+ }
+}
+
+
+bool FastFixedDtoa(double v,
+ int fractional_count,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ const uint32_t kMaxUInt32 = 0xFFFFFFFF;
+ uint64_t significand = Double(v).Significand();
+ int exponent = Double(v).Exponent();
+ // v = significand * 2^exponent (with significand a 53bit integer).
+ // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
+ // don't know how to compute the representation. 2^73 ~= 9.5*10^21.
+ // If necessary this limit could probably be increased, but we don't need
+ // more.
+ if (exponent > 20) return false;
+ if (fractional_count > 20) return false;
+ *length = 0;
+ // At most kDoubleSignificandSize bits of the significand are non-zero.
+ // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
+ // bits: 0..11*..0xxx..53*..xx
+ if (exponent + kDoubleSignificandSize > 64) {
+ // The exponent must be > 11.
+ //
+ // We know that v = significand * 2^exponent.
+ // And the exponent > 11.
+ // We simplify the task by dividing v by 10^17.
+ // The quotient delivers the first digits, and the remainder fits into a 64
+ // bit number.
+ // Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
+ const uint64_t kFive17 = UINT64_2PART_C(0xB1, A2BC2EC5); // 5^17
+ uint64_t divisor = kFive17;
+ int divisor_power = 17;
+ uint64_t dividend = significand;
+ uint32_t quotient;
+ uint64_t remainder;
+ // Let v = f * 2^e with f == significand and e == exponent.
+ // Then need q (quotient) and r (remainder) as follows:
+ // v = q * 10^17 + r
+ // f * 2^e = q * 10^17 + r
+ // f * 2^e = q * 5^17 * 2^17 + r
+ // If e > 17 then
+ // f * 2^(e-17) = q * 5^17 + r/2^17
+ // else
+ // f = q * 5^17 * 2^(17-e) + r/2^e
+ if (exponent > divisor_power) {
+ // We only allow exponents of up to 20 and therefore (17 - e) <= 3
+ dividend <<= exponent - divisor_power;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << divisor_power;
+ } else {
+ divisor <<= divisor_power - exponent;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << exponent;
+ }
+ FillDigits32(quotient, buffer, length);
+ FillDigits64FixedLength(remainder, divisor_power, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent >= 0) {
+ // 0 <= exponent <= 11
+ significand <<= exponent;
+ FillDigits64(significand, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent > -kDoubleSignificandSize) {
+ // We have to cut the number.
+ uint64_t integrals = significand >> -exponent;
+ uint64_t fractionals = significand - (integrals << -exponent);
+ if (integrals > kMaxUInt32) {
+ FillDigits64(integrals, buffer, length);
+ } else {
+ FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
+ }
+ *decimal_point = *length;
+ FillFractionals(fractionals, exponent, fractional_count,
+ buffer, length, decimal_point);
+ } else if (exponent < -128) {
+ // This configuration (with at most 20 digits) means that all digits must be
+ // 0.
+ ASSERT(fractional_count <= 20);
+ buffer[0] = '\0';
+ *length = 0;
+ *decimal_point = -fractional_count;
+ } else {
+ *decimal_point = 0;
+ FillFractionals(significand, exponent, fractional_count,
+ buffer, length, decimal_point);
+ }
+ TrimZeros(buffer, length, decimal_point);
+ buffer[*length] = '\0';
+ if ((*length) == 0) {
+ // The string is empty and the decimal_point thus has no importance. Mimick
+ // Gay's dtoa and and set it to -fractional_count.
+ *decimal_point = -fractional_count;
+ }
+ return true;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/fixed-dtoa.h b/src/3rdparty/double-conversion/fixed-dtoa.h
new file mode 100644
index 0000000000..3bdd08e21f
--- /dev/null
+++ b/src/3rdparty/double-conversion/fixed-dtoa.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_
+#define DOUBLE_CONVERSION_FIXED_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// Produces digits necessary to print a given number with
+// 'fractional_count' digits after the decimal point.
+// The buffer must be big enough to hold the result plus one terminating null
+// character.
+//
+// The produced digits might be too short in which case the caller has to fill
+// the gaps with '0's.
+// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
+// decimal_point = -2.
+// Halfway cases are rounded towards +/-Infinity (away from 0). The call
+// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
+// The returned buffer may contain digits that would be truncated from the
+// shortest representation of the input.
+//
+// This method only works for some parameters. If it can't handle the input it
+// returns false. The output is null-terminated when the function succeeds.
+bool FastFixedDtoa(double v, int fractional_count,
+ Vector<char> buffer, int* length, int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_
diff --git a/src/3rdparty/double-conversion/ieee.h b/src/3rdparty/double-conversion/ieee.h
new file mode 100644
index 0000000000..839dc47d45
--- /dev/null
+++ b/src/3rdparty/double-conversion/ieee.h
@@ -0,0 +1,398 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_H_
+#define DOUBLE_CONVERSION_DOUBLE_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+// We assume that doubles and uint64_t have the same endianness.
+static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+static uint32_t float_to_uint32(float f) { return BitCast<uint32_t>(f); }
+static float uint32_to_float(uint32_t d32) { return BitCast<float>(d32); }
+
+// Helper functions for doubles.
+class Double {
+ public:
+ static const uint64_t kSignMask = UINT64_2PART_C(0x80000000, 00000000);
+ static const uint64_t kExponentMask = UINT64_2PART_C(0x7FF00000, 00000000);
+ static const uint64_t kSignificandMask = UINT64_2PART_C(0x000FFFFF, FFFFFFFF);
+ static const uint64_t kHiddenBit = UINT64_2PART_C(0x00100000, 00000000);
+ static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
+ static const int kSignificandSize = 53;
+
+ Double() : d64_(0) {}
+ explicit Double(double d) : d64_(double_to_uint64(d)) {}
+ explicit Double(uint64_t d64) : d64_(d64) {}
+ explicit Double(DiyFp diy_fp)
+ : d64_(DiyFpToUint64(diy_fp)) {}
+
+ // The value encoded by this Double must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ ASSERT(Sign() > 0);
+ ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // The value encoded by this Double must be strictly greater than 0.
+ DiyFp AsNormalizedDiyFp() const {
+ ASSERT(value() > 0.0);
+ uint64_t f = Significand();
+ int e = Exponent();
+
+ // The current double could be a denormal.
+ while ((f & kHiddenBit) == 0) {
+ f <<= 1;
+ e--;
+ }
+ // Do the final shifts in one go.
+ f <<= DiyFp::kSignificandSize - kSignificandSize;
+ e -= DiyFp::kSignificandSize - kSignificandSize;
+ return DiyFp(f, e);
+ }
+
+ // Returns the double's bit as uint64.
+ uint64_t AsUint64() const {
+ return d64_;
+ }
+
+ // Returns the next greater double. Returns +infinity on input +infinity.
+ double NextDouble() const {
+ if (d64_ == kInfinity) return Double(kInfinity).value();
+ if (Sign() < 0 && Significand() == 0) {
+ // -0.0
+ return 0.0;
+ }
+ if (Sign() < 0) {
+ return Double(d64_ - 1).value();
+ } else {
+ return Double(d64_ + 1).value();
+ }
+ }
+
+ double PreviousDouble() const {
+ if (d64_ == (kInfinity | kSignMask)) return -Double::Infinity();
+ if (Sign() < 0) {
+ return Double(d64_ + 1).value();
+ } else {
+ if (Significand() == 0) return -0.0;
+ return Double(d64_ - 1).value();
+ }
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint64_t d64 = AsUint64();
+ int biased_e =
+ static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint64_t Significand() const {
+ uint64_t d64 = AsUint64();
+ uint64_t significand = d64 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the double is a denormal.
+ bool IsDenormal() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) != 0);
+ }
+
+ bool IsInfinite() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kSignMask) == 0? 1: -1;
+ }
+
+ // Precondition: the value encoded by this Double must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Double must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ double value() const { return uint64_to_double(d64_); }
+
+ // Returns the significand size for a given order of magnitude.
+ // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
+ // This function returns the number of significant binary digits v will have
+ // once it's encoded into a double. In almost all cases this is equal to
+ // kSignificandSize. The only exceptions are denormals. They start with
+ // leading zeroes and their effective significand-size is hence smaller.
+ static int SignificandSizeForOrderOfMagnitude(int order) {
+ if (order >= (kDenormalExponent + kSignificandSize)) {
+ return kSignificandSize;
+ }
+ if (order <= kDenormalExponent) return 0;
+ return order - kDenormalExponent;
+ }
+
+ static double Infinity() {
+ return Double(kInfinity).value();
+ }
+
+ static double NaN() {
+ return Double(kNaN).value();
+ }
+
+ private:
+ static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const int kMaxExponent = 0x7FF - kExponentBias;
+ static const uint64_t kInfinity = UINT64_2PART_C(0x7FF00000, 00000000);
+ static const uint64_t kNaN = UINT64_2PART_C(0x7FF80000, 00000000);
+
+ const uint64_t d64_;
+
+ static uint64_t DiyFpToUint64(DiyFp diy_fp) {
+ uint64_t significand = diy_fp.f();
+ int exponent = diy_fp.e();
+ while (significand > kHiddenBit + kSignificandMask) {
+ significand >>= 1;
+ exponent++;
+ }
+ if (exponent >= kMaxExponent) {
+ return kInfinity;
+ }
+ if (exponent < kDenormalExponent) {
+ return 0;
+ }
+ while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
+ significand <<= 1;
+ exponent--;
+ }
+ uint64_t biased_exponent;
+ if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
+ biased_exponent = 0;
+ } else {
+ biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
+ }
+ return (significand & kSignificandMask) |
+ (biased_exponent << kPhysicalSignificandSize);
+ }
+};
+
+class Single {
+ public:
+ static const uint32_t kSignMask = 0x80000000;
+ static const uint32_t kExponentMask = 0x7F800000;
+ static const uint32_t kSignificandMask = 0x007FFFFF;
+ static const uint32_t kHiddenBit = 0x00800000;
+ static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit.
+ static const int kSignificandSize = 24;
+
+ Single() : d32_(0) {}
+ explicit Single(float f) : d32_(float_to_uint32(f)) {}
+ explicit Single(uint32_t d32) : d32_(d32) {}
+
+ // The value encoded by this Single must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ ASSERT(Sign() > 0);
+ ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // Returns the single's bit as uint64.
+ uint32_t AsUint32() const {
+ return d32_;
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint32_t d32 = AsUint32();
+ int biased_e =
+ static_cast<int>((d32 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint32_t Significand() const {
+ uint32_t d32 = AsUint32();
+ uint32_t significand = d32 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the single is a denormal.
+ bool IsDenormal() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) != 0);
+ }
+
+ bool IsInfinite() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kSignMask) == 0? 1: -1;
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Single must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ // Precondition: the value encoded by this Single must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ float value() const { return uint32_to_float(d32_); }
+
+ static float Infinity() {
+ return Single(kInfinity).value();
+ }
+
+ static float NaN() {
+ return Single(kNaN).value();
+ }
+
+ private:
+ static const int kExponentBias = 0x7F + kPhysicalSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const int kMaxExponent = 0xFF - kExponentBias;
+ static const uint32_t kInfinity = 0x7F800000;
+ static const uint32_t kNaN = 0x7FC00000;
+
+ const uint32_t d32_;
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_H_
diff --git a/src/3rdparty/double-conversion/strtod.cc b/src/3rdparty/double-conversion/strtod.cc
new file mode 100644
index 0000000000..9758989f71
--- /dev/null
+++ b/src/3rdparty/double-conversion/strtod.cc
@@ -0,0 +1,554 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+
+#include "strtod.h"
+#include "bignum.h"
+#include "cached-powers.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// 2^53 = 9007199254740992.
+// Any integer with at most 15 decimal digits will hence fit into a double
+// (which has a 53bit significand) without loss of precision.
+static const int kMaxExactDoubleIntegerDecimalDigits = 15;
+// 2^64 = 18446744073709551616 > 10^19
+static const int kMaxUint64DecimalDigits = 19;
+
+// Max double: 1.7976931348623157 x 10^308
+// Min non-zero double: 4.9406564584124654 x 10^-324
+// Any x >= 10^309 is interpreted as +infinity.
+// Any x <= 10^-324 is interpreted as 0.
+// Note that 2.5e-324 (despite being smaller than the min double) will be read
+// as non-zero (equal to the min non-zero double).
+static const int kMaxDecimalPower = 309;
+static const int kMinDecimalPower = -324;
+
+// 2^64 = 18446744073709551616
+static const uint64_t kMaxUint64 = UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF);
+
+
+static const double exact_powers_of_ten[] = {
+ 1.0, // 10^0
+ 10.0,
+ 100.0,
+ 1000.0,
+ 10000.0,
+ 100000.0,
+ 1000000.0,
+ 10000000.0,
+ 100000000.0,
+ 1000000000.0,
+ 10000000000.0, // 10^10
+ 100000000000.0,
+ 1000000000000.0,
+ 10000000000000.0,
+ 100000000000000.0,
+ 1000000000000000.0,
+ 10000000000000000.0,
+ 100000000000000000.0,
+ 1000000000000000000.0,
+ 10000000000000000000.0,
+ 100000000000000000000.0, // 10^20
+ 1000000000000000000000.0,
+ // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
+ 10000000000000000000000.0
+};
+static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
+
+// Maximum number of significant digits in the decimal representation.
+// In fact the value is 772 (see conversions.cc), but to give us some margin
+// we round up to 780.
+static const int kMaxSignificantDecimalDigits = 780;
+
+static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
+ for (int i = 0; i < buffer.length(); i++) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(i, buffer.length());
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+
+static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
+ for (int i = buffer.length() - 1; i >= 0; --i) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(0, i + 1);
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+
+static void CutToMaxSignificantDigits(Vector<const char> buffer,
+ int exponent,
+ char* significant_buffer,
+ int* significant_exponent) {
+ for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
+ significant_buffer[i] = buffer[i];
+ }
+ // The input buffer has been trimmed. Therefore the last digit must be
+ // different from '0'.
+ ASSERT(buffer[buffer.length() - 1] != '0');
+ // Set the last digit to be non-zero. This is sufficient to guarantee
+ // correct rounding.
+ significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
+ *significant_exponent =
+ exponent + (buffer.length() - kMaxSignificantDecimalDigits);
+}
+
+
+// Trims the buffer and cuts it to at most kMaxSignificantDecimalDigits.
+// If possible the input-buffer is reused, but if the buffer needs to be
+// modified (due to cutting), then the input needs to be copied into the
+// buffer_copy_space.
+static void TrimAndCut(Vector<const char> buffer, int exponent,
+ char* buffer_copy_space, int space_size,
+ Vector<const char>* trimmed, int* updated_exponent) {
+ Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
+ Vector<const char> right_trimmed = TrimTrailingZeros(left_trimmed);
+ exponent += left_trimmed.length() - right_trimmed.length();
+ if (right_trimmed.length() > kMaxSignificantDecimalDigits) {
+ ASSERT(space_size >= kMaxSignificantDecimalDigits);
+ CutToMaxSignificantDigits(right_trimmed, exponent,
+ buffer_copy_space, updated_exponent);
+ *trimmed = Vector<const char>(buffer_copy_space,
+ kMaxSignificantDecimalDigits);
+ } else {
+ *trimmed = right_trimmed;
+ *updated_exponent = exponent;
+ }
+}
+
+
+// Reads digits from the buffer and converts them to a uint64.
+// Reads in as many digits as fit into a uint64.
+// When the string starts with "1844674407370955161" no further digit is read.
+// Since 2^64 = 18446744073709551616 it would still be possible read another
+// digit if it was less or equal than 6, but this would complicate the code.
+static uint64_t ReadUint64(Vector<const char> buffer,
+ int* number_of_read_digits) {
+ uint64_t result = 0;
+ int i = 0;
+ while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
+ int digit = buffer[i++] - '0';
+ ASSERT(0 <= digit && digit <= 9);
+ result = 10 * result + digit;
+ }
+ *number_of_read_digits = i;
+ return result;
+}
+
+
+// Reads a DiyFp from the buffer.
+// The returned DiyFp is not necessarily normalized.
+// If remaining_decimals is zero then the returned DiyFp is accurate.
+// Otherwise it has been rounded and has error of at most 1/2 ulp.
+static void ReadDiyFp(Vector<const char> buffer,
+ DiyFp* result,
+ int* remaining_decimals) {
+ int read_digits;
+ uint64_t significand = ReadUint64(buffer, &read_digits);
+ if (buffer.length() == read_digits) {
+ *result = DiyFp(significand, 0);
+ *remaining_decimals = 0;
+ } else {
+ // Round the significand.
+ if (buffer[read_digits] >= '5') {
+ significand++;
+ }
+ // Compute the binary exponent.
+ int exponent = 0;
+ *result = DiyFp(significand, exponent);
+ *remaining_decimals = buffer.length() - read_digits;
+ }
+}
+
+
+static bool DoubleStrtod(Vector<const char> trimmed,
+ int exponent,
+ double* result) {
+#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+ // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
+ // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
+ // result is not accurate.
+ // We know that Windows32 uses 64 bits and is therefore accurate.
+ // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
+ // the same problem.
+ return false;
+#endif
+ if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
+ int read_digits;
+ // The trimmed input fits into a double.
+ // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
+ // can compute the result-double simply by multiplying (resp. dividing) the
+ // two numbers.
+ // This is possible because IEEE guarantees that floating-point operations
+ // return the best possible approximation.
+ if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
+ // 10^-exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result /= exact_powers_of_ten[-exponent];
+ return true;
+ }
+ if (0 <= exponent && exponent < kExactPowersOfTenSize) {
+ // 10^exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[exponent];
+ return true;
+ }
+ int remaining_digits =
+ kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
+ if ((0 <= exponent) &&
+ (exponent - remaining_digits < kExactPowersOfTenSize)) {
+ // The trimmed string was short and we can multiply it with
+ // 10^remaining_digits. As a result the remaining exponent now fits
+ // into a double too.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[remaining_digits];
+ *result *= exact_powers_of_ten[exponent - remaining_digits];
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// Returns 10^exponent as an exact DiyFp.
+// The given exponent must be in the range [1; kDecimalExponentDistance[.
+static DiyFp AdjustmentPowerOfTen(int exponent) {
+ ASSERT(0 < exponent);
+ ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
+ // Simply hardcode the remaining powers for the given decimal exponent
+ // distance.
+ ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
+ switch (exponent) {
+ case 1: return DiyFp(UINT64_2PART_C(0xa0000000, 00000000), -60);
+ case 2: return DiyFp(UINT64_2PART_C(0xc8000000, 00000000), -57);
+ case 3: return DiyFp(UINT64_2PART_C(0xfa000000, 00000000), -54);
+ case 4: return DiyFp(UINT64_2PART_C(0x9c400000, 00000000), -50);
+ case 5: return DiyFp(UINT64_2PART_C(0xc3500000, 00000000), -47);
+ case 6: return DiyFp(UINT64_2PART_C(0xf4240000, 00000000), -44);
+ case 7: return DiyFp(UINT64_2PART_C(0x98968000, 00000000), -40);
+ default:
+ UNREACHABLE();
+ return DiyFp(0, 0);
+ }
+}
+
+
+// If the function returns true then the result is the correct double.
+// Otherwise it is either the correct double or the double that is just below
+// the correct double.
+static bool DiyFpStrtod(Vector<const char> buffer,
+ int exponent,
+ double* result) {
+ DiyFp input;
+ int remaining_decimals;
+ ReadDiyFp(buffer, &input, &remaining_decimals);
+ // Since we may have dropped some digits the input is not accurate.
+ // If remaining_decimals is different than 0 than the error is at most
+ // .5 ulp (unit in the last place).
+ // We don't want to deal with fractions and therefore keep a common
+ // denominator.
+ const int kDenominatorLog = 3;
+ const int kDenominator = 1 << kDenominatorLog;
+ // Move the remaining decimals into the exponent.
+ exponent += remaining_decimals;
+ int error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
+
+ int old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
+ if (exponent < PowersOfTenCache::kMinDecimalExponent) {
+ *result = 0.0;
+ return true;
+ }
+ DiyFp cached_power;
+ int cached_decimal_exponent;
+ PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
+ &cached_power,
+ &cached_decimal_exponent);
+
+ if (cached_decimal_exponent != exponent) {
+ int adjustment_exponent = exponent - cached_decimal_exponent;
+ DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
+ input.Multiply(adjustment_power);
+ if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
+ // The product of input with the adjustment power fits into a 64 bit
+ // integer.
+ ASSERT(DiyFp::kSignificandSize == 64);
+ } else {
+ // The adjustment power is exact. There is hence only an error of 0.5.
+ error += kDenominator / 2;
+ }
+ }
+
+ input.Multiply(cached_power);
+ // The error introduced by a multiplication of a*b equals
+ // error_a + error_b + error_a*error_b/2^64 + 0.5
+ // Substituting a with 'input' and b with 'cached_power' we have
+ // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
+ int error_b = kDenominator / 2;
+ int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
+ int fixed_error = kDenominator / 2;
+ error += error_b + error_ab + fixed_error;
+
+ old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ // See if the double's significand changes if we add/subtract the error.
+ int order_of_magnitude = DiyFp::kSignificandSize + input.e();
+ int effective_significand_size =
+ Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
+ int precision_digits_count =
+ DiyFp::kSignificandSize - effective_significand_size;
+ if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
+ // This can only happen for very small denormals. In this case the
+ // half-way multiplied by the denominator exceeds the range of an uint64.
+ // Simply shift everything to the right.
+ int shift_amount = (precision_digits_count + kDenominatorLog) -
+ DiyFp::kSignificandSize + 1;
+ input.set_f(input.f() >> shift_amount);
+ input.set_e(input.e() + shift_amount);
+ // We add 1 for the lost precision of error, and kDenominator for
+ // the lost precision of input.f().
+ error = (error >> shift_amount) + 1 + kDenominator;
+ precision_digits_count -= shift_amount;
+ }
+ // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
+ ASSERT(DiyFp::kSignificandSize == 64);
+ ASSERT(precision_digits_count < 64);
+ uint64_t one64 = 1;
+ uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
+ uint64_t precision_bits = input.f() & precision_bits_mask;
+ uint64_t half_way = one64 << (precision_digits_count - 1);
+ precision_bits *= kDenominator;
+ half_way *= kDenominator;
+ DiyFp rounded_input(input.f() >> precision_digits_count,
+ input.e() + precision_digits_count);
+ if (precision_bits >= half_way + error) {
+ rounded_input.set_f(rounded_input.f() + 1);
+ }
+ // If the last_bits are too close to the half-way case than we are too
+ // inaccurate and round down. In this case we return false so that we can
+ // fall back to a more precise algorithm.
+
+ *result = Double(rounded_input).value();
+ if (half_way - error < precision_bits && precision_bits < half_way + error) {
+ // Too imprecise. The caller will have to fall back to a slower version.
+ // However the returned number is guaranteed to be either the correct
+ // double, or the next-lower double.
+ return false;
+ } else {
+ return true;
+ }
+}
+
+
+// Returns
+// - -1 if buffer*10^exponent < diy_fp.
+// - 0 if buffer*10^exponent == diy_fp.
+// - +1 if buffer*10^exponent > diy_fp.
+// Preconditions:
+// buffer.length() + exponent <= kMaxDecimalPower + 1
+// buffer.length() + exponent > kMinDecimalPower
+// buffer.length() <= kMaxDecimalSignificantDigits
+static int CompareBufferWithDiyFp(Vector<const char> buffer,
+ int exponent,
+ DiyFp diy_fp) {
+ ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
+ ASSERT(buffer.length() + exponent > kMinDecimalPower);
+ ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
+ // Make sure that the Bignum will be able to hold all our numbers.
+ // Our Bignum implementation has a separate field for exponents. Shifts will
+ // consume at most one bigit (< 64 bits).
+ // ln(10) == 3.3219...
+ ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+ Bignum buffer_bignum;
+ Bignum diy_fp_bignum;
+ buffer_bignum.AssignDecimalString(buffer);
+ diy_fp_bignum.AssignUInt64(diy_fp.f());
+ if (exponent >= 0) {
+ buffer_bignum.MultiplyByPowerOfTen(exponent);
+ } else {
+ diy_fp_bignum.MultiplyByPowerOfTen(-exponent);
+ }
+ if (diy_fp.e() > 0) {
+ diy_fp_bignum.ShiftLeft(diy_fp.e());
+ } else {
+ buffer_bignum.ShiftLeft(-diy_fp.e());
+ }
+ return Bignum::Compare(buffer_bignum, diy_fp_bignum);
+}
+
+
+// Returns true if the guess is the correct double.
+// Returns false, when guess is either correct or the next-lower double.
+static bool ComputeGuess(Vector<const char> trimmed, int exponent,
+ double* guess) {
+ if (trimmed.length() == 0) {
+ *guess = 0.0;
+ return true;
+ }
+ if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) {
+ *guess = Double::Infinity();
+ return true;
+ }
+ if (exponent + trimmed.length() <= kMinDecimalPower) {
+ *guess = 0.0;
+ return true;
+ }
+
+ if (DoubleStrtod(trimmed, exponent, guess) ||
+ DiyFpStrtod(trimmed, exponent, guess)) {
+ return true;
+ }
+ if (*guess == Double::Infinity()) {
+ return true;
+ }
+ return false;
+}
+
+double Strtod(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ exponent = updated_exponent;
+
+ double guess;
+ bool is_correct = ComputeGuess(trimmed, exponent, &guess);
+ if (is_correct) return guess;
+
+ DiyFp upper_boundary = Double(guess).UpperBoundary();
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return Double(guess).NextDouble();
+ } else if ((Double(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return Double(guess).NextDouble();
+ }
+}
+
+float Strtof(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ exponent = updated_exponent;
+
+ double double_guess;
+ bool is_correct = ComputeGuess(trimmed, exponent, &double_guess);
+
+ float float_guess = static_cast<float>(double_guess);
+ if (float_guess == double_guess) {
+ // This shortcut triggers for integer values.
+ return float_guess;
+ }
+
+ // We must catch double-rounding. Say the double has been rounded up, and is
+ // now a boundary of a float, and rounds up again. This is why we have to
+ // look at previous too.
+ // Example (in decimal numbers):
+ // input: 12349
+ // high-precision (4 digits): 1235
+ // low-precision (3 digits):
+ // when read from input: 123
+ // when rounded from high precision: 124.
+ // To do this we simply look at the neigbors of the correct result and see
+ // if they would round to the same float. If the guess is not correct we have
+ // to look at four values (since two different doubles could be the correct
+ // double).
+
+ double double_next = Double(double_guess).NextDouble();
+ double double_previous = Double(double_guess).PreviousDouble();
+
+ float f1 = static_cast<float>(double_previous);
+ float f2 = float_guess;
+ float f3 = static_cast<float>(double_next);
+ float f4;
+ if (is_correct) {
+ f4 = f3;
+ } else {
+ double double_next2 = Double(double_next).NextDouble();
+ f4 = static_cast<float>(double_next2);
+ }
+ ASSERT(f1 <= f2 && f2 <= f3 && f3 <= f4);
+
+ // If the guess doesn't lie near a single-precision boundary we can simply
+ // return its float-value.
+ if (f1 == f4) {
+ return float_guess;
+ }
+
+ ASSERT((f1 != f2 && f2 == f3 && f3 == f4) ||
+ (f1 == f2 && f2 != f3 && f3 == f4) ||
+ (f1 == f2 && f2 == f3 && f3 != f4));
+
+ // guess and next are the two possible canditates (in the same way that
+ // double_guess was the lower candidate for a double-precision guess).
+ float guess = f1;
+ float next = f4;
+ DiyFp upper_boundary;
+ if (guess == 0.0f) {
+ float min_float = 1e-45f;
+ upper_boundary = Double(static_cast<double>(min_float) / 2).AsDiyFp();
+ } else {
+ upper_boundary = Single(guess).UpperBoundary();
+ }
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return next;
+ } else if ((Single(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return next;
+ }
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/strtod.h b/src/3rdparty/double-conversion/strtod.h
new file mode 100644
index 0000000000..ed0293b8f5
--- /dev/null
+++ b/src/3rdparty/double-conversion/strtod.h
@@ -0,0 +1,45 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_STRTOD_H_
+#define DOUBLE_CONVERSION_STRTOD_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+double Strtod(Vector<const char> buffer, int exponent);
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+float Strtof(Vector<const char> buffer, int exponent);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_STRTOD_H_
diff --git a/src/3rdparty/double-conversion/utils.h b/src/3rdparty/double-conversion/utils.h
new file mode 100644
index 0000000000..767094b8b7
--- /dev/null
+++ b/src/3rdparty/double-conversion/utils.h
@@ -0,0 +1,313 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_UTILS_H_
+#define DOUBLE_CONVERSION_UTILS_H_
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <assert.h>
+#ifndef ASSERT
+#define ASSERT(condition) (assert(condition))
+#endif
+#ifndef UNIMPLEMENTED
+#define UNIMPLEMENTED() (abort())
+#endif
+#ifndef UNREACHABLE
+#define UNREACHABLE() (abort())
+#endif
+
+// Double operations detection based on target architecture.
+// Linux uses a 80bit wide floating point stack on x86. This induces double
+// rounding, which in turn leads to wrong results.
+// An easy way to test if the floating-point operations are correct is to
+// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then
+// the result is equal to 89255e-22.
+// The best way to test this, is to create a division-function and to compare
+// the output of the division with the expected result. (Inlining must be
+// disabled.)
+// On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
+#if defined(_M_X64) || defined(__x86_64__) || \
+ defined(__ARMEL__) || defined(__avr32__) || \
+ defined(__hppa__) || defined(__ia64__) || \
+ defined(__mips__) || defined(__powerpc__) || \
+ defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
+ defined(__SH4__) || defined(__alpha__) || \
+ defined(_MIPS_ARCH_MIPS32R2)
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
+#if defined(_WIN32)
+// Windows uses a 64bit wide floating point stack.
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#else
+#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
+#endif // _WIN32
+#else
+#error Target architecture was not detected as supported by Double-Conversion.
+#endif
+
+
+#if defined(_WIN32) && !defined(__MINGW32__)
+
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t; // NOLINT
+typedef unsigned short uint16_t; // NOLINT
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+// intptr_t and friends are defined in crtdefs.h through stdio.h.
+
+#else
+
+#include <stdint.h>
+
+#endif
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write UINT64_2PART_C(0x12345678,90123456);
+#define UINT64_2PART_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+#endif
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#ifndef DISALLOW_COPY_AND_ASSIGN
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+#endif
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#ifndef DISALLOW_IMPLICIT_CONSTRUCTORS
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+#endif
+
+namespace double_conversion {
+
+static const int kCharSize = sizeof(char);
+
+// Returns the maximum of the two parameters.
+template <typename T>
+static T Max(T a, T b) {
+ return a < b ? b : a;
+}
+
+
+// Returns the minimum of the two parameters.
+template <typename T>
+static T Min(T a, T b) {
+ return a < b ? a : b;
+}
+
+
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+// This is a simplified version of V8's Vector class.
+template <typename T>
+class Vector {
+ public:
+ Vector() : start_(NULL), length_(0) {}
+ Vector(T* data, int length) : start_(data), length_(length) {
+ ASSERT(length == 0 || (length > 0 && data != NULL));
+ }
+
+ // Returns a vector using the same backing storage as this one,
+ // spanning from and including 'from', to but not including 'to'.
+ Vector<T> SubVector(int from, int to) {
+ ASSERT(to <= length_);
+ ASSERT(from < to);
+ ASSERT(0 <= from);
+ return Vector<T>(start() + from, to - from);
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ T& first() { return start_[0]; }
+
+ T& last() { return start_[length_ - 1]; }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+ StringBuilder(char* buffer, int size)
+ : buffer_(buffer, size), position_(0) { }
+
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+ int size() const { return buffer_.length(); }
+
+ // Get the current position in the builder.
+ int position() const {
+ ASSERT(!is_finalized());
+ return position_;
+ }
+
+ // Reset the position.
+ void Reset() { position_ = 0; }
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_++] = c;
+ }
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n) {
+ ASSERT(!is_finalized() && position_ + n < buffer_.length());
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ memmove(&buffer_[position_], s, n * kCharSize);
+ position_ += n;
+ }
+
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count) {
+ for (int i = 0; i < count; i++) {
+ AddCharacter(c);
+ }
+ }
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize() {
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_] = '\0';
+ // Make sure nobody managed to add a 0-character to the
+ // buffer while building the string.
+ ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
+ position_ = -1;
+ ASSERT(is_finalized());
+ return buffer_.start();
+ }
+
+ private:
+ Vector<char> buffer_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
+
+// The type-based aliasing rule allows the compiler to assume that pointers of
+// different types (for some definition of different) never alias each other.
+// Thus the following code does not work:
+//
+// float f = foo();
+// int fbits = *(int*)(&f);
+//
+// The compiler 'knows' that the int pointer can't refer to f since the types
+// don't match, so the compiler may cache f in a register, leaving random data
+// in fbits. Using C++ style casts makes no difference, however a pointer to
+// char data is assumed to alias any other pointer. This is the 'memcpy
+// exception'.
+//
+// Bit_cast uses the memcpy exception to move the bits from a variable of one
+// type of a variable of another type. Of course the end result is likely to
+// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
+// will completely optimize BitCast away.
+//
+// There is an additional use for BitCast.
+// Recent gccs will warn when they see casts that may result in breakage due to
+// the type-based aliasing rule. If you have checked that there is no breakage
+// you can use BitCast to cast one pointer type to another. This confuses gcc
+// enough that it can no longer see that you have cast one pointer type to
+// another thus avoiding the warning.
+template <class Dest, class Source>
+inline Dest BitCast(const Source& source) {
+ // Compile time assertion: sizeof(Dest) == sizeof(Source)
+ // A compile error here means your Dest and Source have different sizes.
+ typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+
+ Dest dest;
+ memmove(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+template <class Dest, class Source>
+inline Dest BitCast(Source* source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+}
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_UTILS_H_
diff --git a/src/3rdparty/masm/WeakRandom.h b/src/3rdparty/masm/WeakRandom.h
new file mode 100644
index 0000000000..325d1f6ac6
--- /dev/null
+++ b/src/3rdparty/masm/WeakRandom.h
@@ -0,0 +1,52 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef MASM_WEAKRANDOM_H
+#define MASM_WEAKRANDOM_H
+
+#include <stdint.h>
+
+struct WeakRandom {
+ WeakRandom(int) {}
+ uint32_t getUint32() { return 0; }
+};
+
+#endif // MASM_WEAKRANDOM_H
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.cpp b/src/3rdparty/masm/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000000..6912d1ea39
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return Op2Immediate | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return Op2Immediate | (imm >> 24) | (rol << 8);
+
+ return InvalidImmediate;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov(reg, imm1);
+ orr(reg, reg, imm2);
+ } else {
+ mvn(reg, imm1);
+ bic(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ if (invert)
+ return tmp | Op2InvertedImmediate;
+ mvn(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate) {
+ mov(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ mvn(dest, tmp);
+ return;
+ }
+
+ encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ ARMWord tmp = getImm16Op2(imm);
+ if (tmp != InvalidImmediate) {
+ movw(dest, tmp);
+ return dest;
+ }
+ movw(dest, getImm16Op2(imm & 0xffff));
+ movt(dest, getImm16Op2(imm >> 16));
+ return dest;
+#else
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return dest;
+ if (genInt(dest, ~imm, false))
+ return dest;
+
+ ldrImmediate(dest, imm);
+ return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtrUp(transferType, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
+ dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xfff)
+ dtrDown(transferType, srcDst, base, -offset);
+ else if (offset >= -0xfffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
+ dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ASSERT(scale >= 0 && scale <= 3);
+ ARMWord op2 = lsl(index, scale);
+
+ if (!offset) {
+ dtrUpRegister(transferType, srcDst, base, op2);
+ return;
+ }
+
+ if (offset <= 0xfffff && offset >= -0xfffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xff)
+ halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
+ else if (offset <= 0xffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
+ halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xff)
+ halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
+ else if (offset >= -0xffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
+ halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ if (!scale && !offset) {
+ halfDtrUpRegister(transferType, srcDst, base, index);
+ return;
+ }
+
+ ARMWord op2 = lsl(index, scale);
+
+ if (offset <= 0xffff && offset >= -0xffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ // VFP cannot directly access memory that is not four-byte-aligned
+ if (!(offset & 0x3)) {
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrUp(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrDown(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, base);
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
+}
+
+void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ add(ARMRegisters::S1, base, lsl(index, scale));
+ dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
+}
+
+PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+{
+ // 64-bit alignment is required for next constant pool and JIT code as well
+ m_buffer.flushWithoutBarrier(true);
+ if (!m_buffer.isAligned(8))
+ bkpt(0);
+
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ char* data = reinterpret_cast<char*>(result->start());
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ // The last bit is set if the constant must be placed on constant pool.
+ int pos = (iter->m_offset) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
+ ARMWord* addr = getLdrImmAddress(ldrAddr);
+ if (*addr != InvalidBranchTarget) {
+ if (!(iter->m_offset & 1)) {
+ intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
+
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
+ continue;
+ }
+ }
+ *addr = reinterpret_cast<ARMWord>(data + *addr);
+ }
+ }
+
+ return result;
+}
+
+#if OS(LINUX) && COMPILER(RVCT)
+
+__asm void ARMAssembler::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.h b/src/3rdparty/masm/assembler/ARMAssembler.h
new file mode 100644
index 0000000000..3888226b21
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.h
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+ typedef uint32_t ARMWord;
+
+ namespace ARMRegisters {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3, S0 = r3, /* Same as thumb assembler. */
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12, S1 = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7, SD0 = d7, /* Same as thumb assembler. */
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31
+ } FPRegisterID;
+
+ } // namespace ARMRegisters
+
+ class ARMAssembler {
+ public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ ARMAssembler()
+ : m_indexOfTailOfLastWatchpoint(1)
+ {
+ }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ VMOV_F64 = 0x0eb00b40,
+ VADD_F64 = 0x0e300b00,
+ VDIV_F64 = 0x0e800b00,
+ VSUB_F64 = 0x0e300b40,
+ VMUL_F64 = 0x0e200b00,
+ VCMP_F64 = 0x0eb40b40,
+ VSQRT_F64 = 0x0eb10bc0,
+ VABS_F64 = 0x0eb00bc0,
+ VNEG_F64 = 0x0eb10b40,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ BX = 0x012fff10,
+ VMOV_VFP64 = 0x0c400a10,
+ VMOV_ARM64 = 0x0c500a10,
+ VMOV_VFP32 = 0x0e000a10,
+ VMOV_ARM32 = 0x0e100a10,
+ VCVT_F64_S32 = 0x0eb80bc0,
+ VCVT_S32_F64 = 0x0ebd0b40,
+ VCVT_U32_F64 = 0x0ebc0b40,
+ VCVT_F32_F64 = 0x0eb70bc0,
+ VCVT_F64_F32 = 0x0eb70ac0,
+ VMRS_APSR = 0x0ef1fa10,
+ CLZ = 0x016f0f10,
+ BKPT = 0xe1200070,
+ BLX = 0x012fff30,
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ MOVW = 0x03000000,
+ MOVT = 0x03400000,
+#endif
+ NOP = 0xe1a00000,
+ };
+
+ enum {
+ Op2Immediate = (1 << 25),
+ ImmediateForHalfWordTransfer = (1 << 22),
+ Op2InvertedImmediate = (1 << 26),
+ SetConditionalCodes = (1 << 20),
+ Op2IsRegisterArgument = (1 << 25),
+ // Data transfer flags.
+ DataTransferUp = (1 << 23),
+ DataTransferWriteBack = (1 << 21),
+ DataTransferPostUpdate = (1 << 24),
+ DataTransferLoad = (1 << 20),
+ ByteDataTransfer = (1 << 22),
+ };
+
+ enum DataTransferTypeA {
+ LoadUint32 = 0x05000000 | DataTransferLoad,
+ LoadUint8 = 0x05400000 | DataTransferLoad,
+ StoreUint32 = 0x05000000,
+ StoreUint8 = 0x05400000,
+ };
+
+ enum DataTransferTypeB {
+ LoadUint16 = 0x010000b0 | DataTransferLoad,
+ LoadInt16 = 0x010000f0 | DataTransferLoad,
+ LoadInt8 = 0x010000d0 | DataTransferLoad,
+ StoreUint16 = 0x010000b0,
+ };
+
+ enum DataTransferTypeFloat {
+ LoadFloat = 0x0d000a00 | DataTransferLoad,
+ LoadDouble = 0x0d000b00 | DataTransferLoad,
+ StoreFloat = 0x0d000a00,
+ StoreDouble = 0x0d000b00,
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BranchOffsetMask = 0x00ffffff,
+ ConditionalFieldMask = 0xf0000000,
+ DataTransferOffsetMask = 0xfff,
+ };
+
+ enum {
+ MinimumBranchOffsetDistance = -0x00800000,
+ MaximumBranchOffsetDistance = 0x007fffff,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
+ };
+
+ static const ARMWord InvalidImmediate = 0xf0000000;
+ static const ARMWord InvalidBranchTarget = 0xffffffff;
+ static const int DefaultPrefetchOffset = 2;
+
+ static const ARMWord BlxInstructionMask = 0x012fff30;
+ static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
+ static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
+
+ static const ARMWord AddImmediateInstruction = 0x02800000;
+ static const ARMWord BlxInstruction = 0x012fff30;
+ static const ARMWord LdrImmediateInstruction = 0x05900000;
+ static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
+
+ // Instruction formating
+
+ void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
+ {
+ ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
+ m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
+ | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
+ | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
+ }
+
+ void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
+ {
+ ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
+ m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
+ | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
+ | (sm >> 1) | ((sm & 0x1) << 5));
+ }
+
+ void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
+ }
+
+ void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void add(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void tst(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void teq(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmp(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmn(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mov(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ void movw(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
+ }
+
+ void movt(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
+ }
+#endif
+
+ void movs(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mvn(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
+ }
+
+ void mvns(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void mul(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void vmov_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
+ }
+
+ void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
+ }
+
+ void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
+ }
+
+ void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
+ }
+
+ void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
+ }
+
+ void vcmp_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
+ }
+
+ void vsqrt_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
+ }
+
+ void vabs_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
+ }
+
+ void vneg_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
+ }
+
+ void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
+ }
+
+ void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
+ }
+
+ void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
+ }
+
+ void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
+ }
+
+ void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void push(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ void pop(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke(int reg, Condition cc = AL)
+ {
+ dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
+ }
+
+ inline void peek(int reg, Condition cc = AL)
+ {
+ dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
+ }
+
+ void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_vfp32(int sn, int rt, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
+ }
+
+ void vmov_arm32(int rt, int sn, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
+ }
+
+ void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(!(sm & 0x1)); // sm must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
+ }
+
+ void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
+ }
+
+ void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
+ }
+
+ void vmrs_apsr(Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
+ }
+
+ void clz(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
+ }
+
+ void bkpt(ARMWord value)
+ {
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+ }
+
+ void nop()
+ {
+ m_buffer.putInt(NOP);
+ }
+
+ void bx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
+ }
+
+ AssemblerLabel blx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
+ return m_buffer.label();
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lslRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
+ AssemblerLabel result = m_buffer.label();
+ if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
+ result = label();
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = labelIgnoringWatchpoints();
+ while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
+ nop();
+ // The available number of instructions are ensured by labelForWatchpoint.
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov(ARMRegisters::r0, ARMRegisters::r0);
+
+ return label();
+ }
+
+ AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+ {
+ ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+ m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
+ ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // DFG assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+ vmov_arm64(rd1, rd2, rn);
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ vmov_vfp64(rd, rn1, rn2);
+ }
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn)
+ {
+ // Check for call
+ if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
+ // Must be BLX
+ ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
+ insn--;
+ }
+
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
+ if (*insn & DataTransferUp)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & DataTransferOffsetMask));
+ return reinterpret_cast<ARMWord*>(addr - (*insn & DataTransferOffsetMask));
+ }
+
+ static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ if (*insn & 0x1)
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & DataTransferOffsetMask) >> 1));
+ return getLdrImmAddress(insn);
+ }
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~DataTransferOffsetMask));
+ return (load & ~DataTransferOffsetMask) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Read pointers
+ static void* readPointer(void* from)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
+ ARMWord* address = getLdrImmAddress(instruction);
+ return *reinterpret_cast<void**>(address);
+ }
+
+ // Patch pointers
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* where, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
+ ASSERT((*instruction & 0x0f700000) == LoadUint32);
+ if (value >= 0)
+ *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
+ else
+ *instruction = (*instruction & 0xff7ff000) | -value;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ // Linkers
+ static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
+ {
+ return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
+ ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+ *addr = toARMWord(to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
+
+ if (!(difference & 1)) {
+ difference >>= 2;
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ // Direct branch.
+ instruction[0] = B | AL | (difference & BranchOffsetMask);
+ cacheFlush(instruction, sizeof(ARMWord));
+ return;
+ }
+ }
+
+ // Load target.
+ instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
+ instruction[1] = reinterpret_cast<ARMWord>(to);
+ cacheFlush(instruction, sizeof(ARMWord) * 2);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(ARMWord) * 2;
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+
+ ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+ instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
+ *getLdrImmAddress(instruction) = imm;
+ instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
+ cacheFlush(instruction, 2 * sizeof(ARMWord));
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ return call.m_offset;
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2(ARMWord imm);
+
+ // Fast case if imm is known to be between 0 and 0xff
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return Op2Immediate | imm;
+ }
+
+ static ARMWord getOp2Half(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ static ARMWord getImm16Op2(ARMWord imm)
+ {
+ if (imm <= 0xffff)
+ return (imm & 0xf000) << 4 | (imm & 0xfff);
+ return InvalidImmediate;
+ }
+#endif
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+ ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
+ return AL | B | (offset & BranchOffsetMask);
+ }
+
+#if OS(LINUX) && COMPILER(GCC)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+#if OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t);
+#else
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(LINUX) && COMPILER(GCC)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#elif OS(QNX)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+#endif
+
+ private:
+ static ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg;
+ }
+
+ static ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 8;
+ }
+
+ static ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 12;
+ }
+
+ static ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & ConditionalFieldMask;
+ }
+
+ static ARMWord toARMWord(Condition cc)
+ {
+ return static_cast<ARMWord>(cc);
+ }
+
+ static ARMWord toARMWord(uint32_t u)
+ {
+ return static_cast<ARMWord>(u);
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ uint32_t m_indexOfTailOfLastWatchpoint;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.cpp b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
new file mode 100644
index 0000000000..faca66421b
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+}
+
+#endif
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000000..7dcf656921
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h
@@ -0,0 +1,2790 @@
+/*
+ * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ typedef enum {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ } FPSingleRegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ } FPDoubleRegisterID;
+
+ typedef enum {
+ q0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ q16,
+ q17,
+ q18,
+ q19,
+ q20,
+ q21,
+ q22,
+ q23,
+ q24,
+ q25,
+ q26,
+ q27,
+ q28,
+ q29,
+ q30,
+ q31,
+ } FPQuadRegisterID;
+
+ inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
+ {
+ ASSERT(reg < d16);
+ return (FPSingleRegisterID)(reg << 1);
+ }
+
+ inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
+ {
+ ASSERT(!(reg & 1));
+ return (FPDoubleRegisterID)(reg >> 1);
+ }
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
+ value >>= N; /* if any were set, lose the bottom N */
+ else /* if none of the top N bits are set, */
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ // Make sure this constructor is only reached with type TypeUInt16;
+ // this extra parameter makes the code a little clearer by making it
+ // explicit at call sites which type is being constructed
+ ASSERT_UNUSED(type, type == TypeUInt16);
+
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte1;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ uint16_t asUInt16() const { return m_value.asInt; }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 6;
+ };
+ } m_u;
+};
+
+class ARMv7Assembler {
+public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
+ typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
+ typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS, ConditionCS = ConditionHS,
+ ConditionLO, ConditionCC = ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+ ConditionInvalid
+ } Condition;
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
+ enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+ JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
+ JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
+ JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
+ JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
+ };
+ enum JumpLinkType {
+ LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+ LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
+ LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
+ LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
+ LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
+ LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
+ LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
+ LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
+ };
+
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ }
+ void operator=(const LinkRecord& other)
+ {
+ data.copyTypes.content[0] = other.data.copyTypes.content[0];
+ data.copyTypes.content[1] = other.data.copyTypes.content[1];
+ data.copyTypes.content[2] = other.data.copyTypes.content[2];
+ }
+ intptr_t from() const { return data.realTypes.m_from; }
+ void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+ intptr_t to() const { return data.realTypes.m_to; }
+ JumpType type() const { return data.realTypes.m_type; }
+ JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+ Condition condition() const { return data.realTypes.m_condition; }
+ private:
+ union {
+ struct RealTypes {
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 8;
+ JumpLinkType m_linkType : 8;
+ Condition m_condition : 16;
+ } realTypes;
+ struct CopyTypes {
+ uint32_t content[3];
+ } copyTypes;
+ COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+ } data;
+ };
+
+ ARMv7Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ static bool BadReg(RegisterID reg)
+ {
+ return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+ }
+
+ uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_RSB_imm_T1 = 0x4240,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_STR_reg_T1 = 0x5000,
+ OP_STRH_reg_T1 = 0x5200,
+ OP_STRB_reg_T1 = 0x5400,
+ OP_LDRSB_reg_T1 = 0x5600,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_LDRB_reg_T1 = 0x5C00,
+ OP_LDRSH_reg_T1 = 0x5E00,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_STRB_imm_T1 = 0x7000,
+ OP_LDRB_imm_T1 = 0x7800,
+ OP_STRH_imm_T1 = 0x8000,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ OP_NOP_T1 = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_B_T1 = 0xD000,
+ OP_B_T2 = 0xE000,
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ORR_S_reg_T2 = 0xEA50,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_VMOV_CtoD = 0xEC00,
+ OP_VMOV_DtoC = 0xEC10,
+ OP_FSTS = 0xED00,
+ OP_VSTR = 0xED00,
+ OP_FLDS = 0xED10,
+ OP_VLDR = 0xED10,
+ OP_VMOV_CtoS = 0xEE00,
+ OP_VMOV_StoC = 0xEE10,
+ OP_VMUL_T2 = 0xEE20,
+ OP_VADD_T2 = 0xEE30,
+ OP_VSUB_T2 = 0xEE30,
+ OP_VDIV = 0xEE80,
+ OP_VABS_T2 = 0xEEB0,
+ OP_VCMP = 0xEEB0,
+ OP_VCVT_FPIVFP = 0xEEB0,
+ OP_VMOV_T2 = 0xEEB0,
+ OP_VMOV_IMM_T2 = 0xEEB0,
+ OP_VMRS = 0xEEB0,
+ OP_VNEG_T2 = 0xEEB0,
+ OP_VSQRT_T1 = 0xEEB0,
+ OP_VCVTSD_T1 = 0xEEB0,
+ OP_VCVTDS_T1 = 0xEEB0,
+ OP_B_T3a = 0xF000,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_ADC_imm = 0xF140,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_RSB_imm_T2 = 0xF1C0,
+ OP_RSB_S_imm_T2 = 0xF1D0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_UBFX_T1 = 0xF3C0,
+ OP_NOP_T2a = 0xF3AF,
+ OP_STRB_imm_T3 = 0xF800,
+ OP_STRB_reg_T2 = 0xF800,
+ OP_LDRB_imm_T3 = 0xF810,
+ OP_LDRB_reg_T2 = 0xF810,
+ OP_STRH_imm_T3 = 0xF820,
+ OP_STRH_reg_T2 = 0xF820,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_STRB_imm_T2 = 0xF880,
+ OP_LDRB_imm_T2 = 0xF890,
+ OP_STRH_imm_T2 = 0xF8A0,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LDRSB_reg_T2 = 0xF910,
+ OP_LDRSH_reg_T2 = 0xF930,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_CLZ = 0xFAB0,
+ OP_SMULL_T1 = 0xFB80,
+#if CPU(APPLE_ARMV7S)
+ OP_SDIV_T1 = 0xFB90,
+ OP_UDIV_T1 = 0xFBB0,
+#endif
+ } OpcodeID1;
+
+ typedef enum {
+ OP_VADD_T2b = 0x0A00,
+ OP_VDIVb = 0x0A00,
+ OP_FLDSb = 0x0A00,
+ OP_VLDRb = 0x0A00,
+ OP_VMOV_IMM_T2b = 0x0A00,
+ OP_VMOV_T2b = 0x0A40,
+ OP_VMUL_T2b = 0x0A00,
+ OP_FSTSb = 0x0A00,
+ OP_VSTRb = 0x0A00,
+ OP_VMOV_StoCb = 0x0A10,
+ OP_VMOV_CtoSb = 0x0A10,
+ OP_VMOV_DtoCb = 0x0A10,
+ OP_VMOV_CtoDb = 0x0A10,
+ OP_VMRSb = 0x0A10,
+ OP_VABS_T2b = 0x0A40,
+ OP_VCMPb = 0x0A40,
+ OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
+ OP_VSUB_T2b = 0x0A40,
+ OP_VSQRT_T1b = 0x0A40,
+ OP_VCVTSD_T1b = 0x0A40,
+ OP_VCVTDS_T1b = 0x0A40,
+ OP_NOP_T2b = 0x8000,
+ OP_B_T3b = 0x8000,
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
+ }
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARMRegisters::sp) {
+ ASSERT(!(imm.getUInt16() & 3));
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ return;
+ } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
+ {
+ ASSERT(rm != ARMRegisters::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return m_formatter.label();
+ }
+
+ void bkpt(uint8_t imm = 0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
+ }
+
+ ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
+ }
+
+ ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt7());
+ ASSERT(!((rt | rn) & 8));
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt5())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+#if OS(LINUX) || OS(QNX)
+ static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
+ {
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
+ address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
+ address[4] = OP_CMP_reg_T2 | left;
+ cacheFlush(address, sizeof(uint16_t) * 5);
+ }
+#else
+ static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+ cacheFlush(address, sizeof(uint16_t) * 2);
+ }
+#endif
+
+ ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ sub(rd, zero, rm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ if (!((rd | rn) & 8) && !imm.getUInt12())
+ m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
+ {
+ ASSERT(lsb < 32);
+ ASSERT((width >= 1) && (width <= 32));
+ ASSERT((lsb + width) <= 32);
+ m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
+ }
+
+ void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+ }
+
+ void vcmpz(FPDoubleRegisterID rd)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
+ }
+
+ void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
+ }
+
+ void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
+ }
+
+ void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
+ }
+
+ void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
+ }
+
+ void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
+ }
+
+ void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
+ }
+
+ void vmov(RegisterID rd, FPSingleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd));
+ m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
+ }
+
+ void vmov(FPSingleRegisterID rd, RegisterID rn)
+ {
+ ASSERT(!BadReg(rn));
+ m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
+ }
+
+ void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd1));
+ ASSERT(!BadReg(rd2));
+ m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
+ }
+
+ void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ ASSERT(!BadReg(rn1));
+ ASSERT(!BadReg(rn2));
+ m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
+ }
+
+ void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
+ {
+ m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
+ }
+
+ void vmrs(RegisterID reg = ARMRegisters::pc)
+ {
+ ASSERT(reg != ARMRegisters::sp);
+ m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
+ }
+
+ void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
+ }
+
+ void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
+ }
+
+ void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
+ }
+
+ void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
+ }
+
+ void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
+ }
+
+ void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+ }
+
+ void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
+ }
+
+ void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
+ }
+
+ void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
+ }
+
+ void nop()
+ {
+ m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
+ }
+
+ void nopw()
+ {
+ m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
+ nopw();
+ else
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+ // Assembler admin methods:
+
+ static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+ {
+ return a.from() < b.from();
+ }
+
+ bool canCompact(JumpType jumpType)
+ {
+ // The following cannot be compacted:
+ // JumpFixed: represents custom jump sequence
+ // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+ // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+ }
+
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ if (jumpType == JumpFixed)
+ return LinkInvalid;
+
+ // for patchable jump we must leave space for the longest code sequence
+ if (jumpType == JumpNoConditionFixedSize)
+ return LinkBX;
+ if (jumpType == JumpConditionFixedSize)
+ return LinkConditionalBX;
+
+ const int paddingSize = JUMP_ENUM_SIZE(jumpType);
+
+ if (jumpType == JumpCondition) {
+ // 2-byte conditional T1
+ const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
+ if (canBeJumpT1(jumpT1Location, to))
+ return LinkJumpT1;
+ // 4-byte conditional T3
+ const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
+ if (canBeJumpT3(jumpT3Location, to))
+ return LinkJumpT3;
+ // 4-byte conditional T4 with IT
+ const uint16_t* conditionalJumpT4Location =
+ reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
+ if (canBeJumpT4(conditionalJumpT4Location, to))
+ return LinkConditionalJumpT4;
+ } else {
+ // 2-byte unconditional T2
+ const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
+ if (canBeJumpT2(jumpT2Location, to))
+ return LinkJumpT2;
+ // 4-byte unconditional T4
+ const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
+ if (canBeJumpT4(jumpT4Location, to))
+ return LinkJumpT4;
+ // use long jump sequence
+ return LinkBX;
+ }
+
+ ASSERT(jumpType == JumpCondition);
+ return LinkConditionalBX;
+ }
+
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpT1:
+ linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT2:
+ linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT3:
+ linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT4:
+ linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalJumpT4:
+ linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalBX:
+ linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkBX:
+ linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ void* unlinkedCode() { return m_formatter.data(); }
+ size_t codeSize() const { return m_formatter.codeSize(); }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkJumpAbsolute(location, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.isSet());
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+
+ cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setInt32(where, value, true);
+ }
+
+ static void repatchCompact(void* where, int32_t offset)
+ {
+ ASSERT(offset >= -255 && offset <= 255);
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ offset |= (add << 9);
+ offset |= (1 << 10);
+ offset |= (1 << 11);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(where);
+ location[1] &= ~((1 << 12) - 1);
+ location[1] |= offset;
+ cacheFlush(location, sizeof(uint16_t) * 2);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setPointer(where, value, true);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void*>(readInt32(where));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
+
+#if OS(LINUX) || OS(QNX)
+ if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+ } else {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
+ linkBX(ptr, to);
+ cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
+ }
+#else
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+#if OS(LINUX) || OS(QNX)
+ return 10;
+#else
+ return 4;
+#endif
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ break;
+ case OP_ADD_imm_T3:
+ ASSERT(!(ptr[1] & 0xF000));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_LDR_imm_T3;
+ ptr[1] |= (ptr[1] & 0x0F00) << 4;
+ ptr[1] &= 0xF0FF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ ASSERT(!(ptr[1] & 0x0F00));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_ADD_imm_T3;
+ ptr[1] |= (ptr[1] & 0xF000) >> 4;
+ ptr[1] &= 0x0FFF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ case OP_ADD_imm_T3:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+#if OS(LINUX)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(IOS)
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX)
+#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#endif
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+private:
+ // VFP operations commonly take one or more 5-bit operands, typically representing a
+ // floating point register number. This will commonly be encoded in the instruction
+ // in two parts, with one single bit field, and one 4-bit field. In the case of
+ // double precision operands the high bit of the register number will be encoded
+ // separately, and for single precision operands the high bit of the register number
+ // will be encoded individually.
+ // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
+ // field to be encoded together in the instruction (the low 4-bits of a double
+ // register number, or the high 4-bits of a single register number), and bit 4
+ // contains the bit value to be encoded individually.
+ struct VFPOperand {
+ explicit VFPOperand(uint32_t value)
+ : m_value(value)
+ {
+ ASSERT(!(m_value & ~0x1f));
+ }
+
+ VFPOperand(FPDoubleRegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(RegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(FPSingleRegisterID reg)
+ : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
+ {
+ }
+
+ uint32_t bits1()
+ {
+ return m_value >> 4;
+ }
+
+ uint32_t bits4()
+ {
+ return m_value & 0xf;
+ }
+
+ uint32_t m_value;
+ };
+
+ VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
+ {
+ // Cannot specify rounding when converting to float.
+ ASSERT(toInteger || !isRoundZero);
+
+ uint32_t op = 0x8;
+ if (toInteger) {
+ // opc2 indicates both toInteger & isUnsigned.
+ op |= isUnsigned ? 0x4 : 0x5;
+ // 'op' field in instruction is isRoundZero
+ if (isRoundZero)
+ op |= 0x10;
+ } else {
+ ASSERT(!isRoundZero);
+ // 'op' field in instruction is isUnsigned
+ if (!isUnsigned)
+ op |= 0x10;
+ }
+ return VFPOperand(op);
+ }
+
+ static void setInt32(void* code, uint32_t value, bool flush)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+ location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+ if (flush)
+ cacheFlush(location - 4, 4 * sizeof(uint16_t));
+ }
+
+ static int32_t readInt32(void* code)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16;
+ ARMThumbImmediate hi16;
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
+ uint32_t result = hi16.asUInt16();
+ result <<= 16;
+ result |= lo16.asUInt16();
+ return static_cast<int32_t>(result);
+ }
+
+ static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
+ {
+ // Requires us to have planted a LDR_imm_T1
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt7());
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
+ location[0] |= (imm.getUInt7() >> 2) << 6;
+ cacheFlush(location, sizeof(uint16_t));
+ }
+
+ static void setPointer(void* code, void* value, bool flush)
+ {
+ setInt32(code, reinterpret_cast<uint32_t>(value), flush);
+ }
+
+ static bool isB(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+ }
+
+ static bool isBX(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] & 0xff87) == OP_BX;
+ }
+
+ static bool isMOV_imm_T3(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isMOVT(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isNOP_T1(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return instruction[0] == OP_NOP_T1;
+ }
+
+ static bool isNOP_T2(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+ }
+
+ static bool canBeJumpT1(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 23) >> 23) == relative;
+ }
+
+ static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 20) >> 20) == relative;
+ }
+
+ static bool canBeJumpT3(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 11) >> 11) == relative;
+ }
+
+ static bool canBeJumpT4(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 7) >> 7) == relative;
+ }
+
+ void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT1(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ }
+
+ static void linkJumpT2(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT2(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ }
+
+ void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT3(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT4(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT4(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ instruction[-3] = ifThenElse(cond) | OP_IT;
+ linkJumpT4(instruction, target);
+ }
+
+ static void linkBX(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+
+ void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ linkBX(instruction, target);
+ instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ if (canBeJumpT4(instruction, target)) {
+ // There may be a better way to fix this, but right now put the NOPs first, since in the
+ // case of an conditional branch this will be coming after an ITTT predicating *three*
+ // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
+ // variable wdith encoding - the previous instruction might *look* like an ITTT but
+ // actually be the second half of a 2-word op.
+ instruction[-5] = OP_NOP_T1;
+ instruction[-4] = OP_NOP_T2a;
+ instruction[-3] = OP_NOP_T2b;
+ linkJumpT4(instruction, target);
+ } else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+ {
+ return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.i = (value >> 10) & 1;
+ result.m_value.imm4 = value & 15;
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+ {
+ return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.imm3 = (value >> 12) & 7;
+ result.m_value.imm8 = value & 255;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+
+ ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ARMThumbImmediate newImm = imm;
+ newImm.m_value.imm4 = imm4;
+
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
+ }
+
+ // Formats up instructions of the pattern:
+ // 111111111B11aaaa:bbbb222SA2C2cccc
+ // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
+ // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
+ ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
+ {
+ ASSERT(!(op1 & 0x004f));
+ ASSERT(!(op2 & 0xf1af));
+ m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
+ m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
+ }
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
+ {
+ bool up = true;
+ if (imm < 0) {
+ imm = -imm;
+ up = false;
+ }
+
+ uint32_t offset = imm;
+ ASSERT(!(offset & ~0x3fc));
+ offset >>= 2;
+
+ m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
+ m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
new file mode 100644
index 0000000000..95eaf7d99d
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include "AssemblerBuffer.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/UnusedParam.h>
+
+#if ENABLE(ASSEMBLER)
+
+
+#if PLATFORM(QT)
+#define ENABLE_JIT_CONSTANT_BLINDING 0
+#endif
+
+#ifndef ENABLE_JIT_CONSTANT_BLINDING
+#define ENABLE_JIT_CONSTANT_BLINDING 1
+#endif
+
+namespace JSC {
+
+class JumpReplacementWatchpoint;
+class LinkBuffer;
+class RepatchBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
+
+template <class AssemblerType>
+class AbstractMacroAssembler {
+public:
+ friend class JITWriteBarrierBase;
+ typedef AssemblerType AssemblerType_T;
+
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
+ class Jump;
+
+ typedef typename AssemblerType::RegisterID RegisterID;
+
+ // Section 1: MacroAssembler operand types
+ //
+ // The following types are used as operands to MacroAssembler operations,
+ // describing immediate and memory operands to the instructions to be planted.
+
+ enum Scale {
+ TimesOne,
+ TimesTwo,
+ TimesFour,
+ TimesEight,
+ };
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(const void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ const void* m_ptr;
+ };
+
+ // TrustedImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct TrustedImmPtr {
+ TrustedImmPtr() { }
+
+ explicit TrustedImmPtr(const void* value)
+ : m_value(value)
+ {
+ }
+
+ // This is only here so that TrustedImmPtr(0) does not confuse the C++
+ // overload handling rules.
+ explicit TrustedImmPtr(int value)
+ : m_value(0)
+ {
+ ASSERT_UNUSED(value, !value);
+ }
+
+ explicit TrustedImmPtr(size_t value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ const void* m_value;
+ };
+
+ struct ImmPtr :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImmPtr
+#else
+ public TrustedImmPtr
+#endif
+ {
+ explicit ImmPtr(const void* value)
+ : TrustedImmPtr(value)
+ {
+ }
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
+ };
+
+ // TrustedImm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm32 {
+ TrustedImm32() { }
+
+ explicit TrustedImm32(int32_t value)
+ : m_value(value)
+ {
+ }
+
+#if !CPU(X86_64)
+ explicit TrustedImm32(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int32_t m_value;
+ };
+
+
+ struct Imm32 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm32
+#else
+ public TrustedImm32
+#endif
+ {
+ explicit Imm32(int32_t value)
+ : TrustedImm32(value)
+ {
+ }
+#if !CPU(X86_64)
+ explicit Imm32(TrustedImmPtr ptr)
+ : TrustedImm32(ptr)
+ {
+ }
+#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
+ };
+
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+
+#if CPU(X86_64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+
+ struct Imm64 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm64
+#else
+ public TrustedImm64
+#endif
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
+ // Section 2: MacroAssembler code buffer handles
+ //
+ // The following types are used to reference items in the code buffer
+ // during JIT code generation. For example, the type Jump is used to
+ // track the location of a jump instruction so that it may later be
+ // linked to a label marking its destination.
+
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend struct DFG::OSRExit;
+ friend class Jump;
+ friend class JumpReplacementWatchpoint;
+ friend class MacroAssemblerCodeRef;
+ friend class LinkBuffer;
+ friend class Watchpoint;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // ConvertibleLoadLabel:
+ //
+ // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+ // so that:
+ //
+ // loadPtr(Address(a, i), b)
+ //
+ // becomes:
+ //
+ // addPtr(TrustedImmPtr(i), a, b)
+ class ConvertibleLoadLabel {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+
+ public:
+ ConvertibleLoadLabel()
+ {
+ }
+
+ ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ AssemblerLabel label() const { return m_label; }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelCompact:
+ //
+ // A DataLabelCompact is used to refer to a location in the code containing a
+ // compact immediate to be patched after the code has been generated.
+ class DataLabelCompact {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelCompact()
+ {
+ }
+
+ DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ DataLabelCompact(AssemblerLabel label)
+ : m_label(label)
+ {
+ }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // Call:
+ //
+ // A Call object is a reference to a call instruction that has been planted
+ // into the code buffer - it is typically used to link the call, setting the
+ // relative offset such that when executed it will call to the desired
+ // destination.
+ class Call {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+
+ public:
+ enum Flags {
+ None = 0x0,
+ Linkable = 0x1,
+ Near = 0x2,
+ LinkableNear = 0x3,
+ };
+
+ Call()
+ : m_flags(None)
+ {
+ }
+
+ Call(AssemblerLabel jmp, Flags flags)
+ : m_label(jmp)
+ , m_flags(flags)
+ {
+ }
+
+ bool isFlagSet(Flags flag)
+ {
+ return m_flags & flag;
+ }
+
+ static Call fromTailJump(Jump jump)
+ {
+ return Call(jump.m_label, Linkable);
+ }
+
+ AssemblerLabel m_label;
+ private:
+ Flags m_flags;
+ };
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ class Jump {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ friend struct DFG::OSRExit;
+ friend class LinkBuffer;
+ public:
+ Jump()
+ {
+ }
+
+#if CPU(ARM_THUMB2)
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
+#else
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
+ {
+ }
+#endif
+
+ Label label() const
+ {
+ Label result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+ masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
+ };
+
+ struct PatchableJump {
+ PatchableJump()
+ {
+ }
+
+ explicit PatchableJump(Jump jump)
+ : m_jump(jump)
+ {
+ }
+
+ operator Jump&() { return m_jump; }
+
+ Jump m_jump;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class LinkBuffer;
+
+ public:
+ typedef Vector<Jump, 2> JumpVector;
+
+ JumpList() { }
+
+ JumpList(Jump jump)
+ {
+ append(jump);
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(const JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ void clear()
+ {
+ m_jumps.clear();
+ }
+
+ const JumpVector& jumps() const { return m_jumps; }
+
+ private:
+ JumpVector m_jumps;
+ };
+
+
+ // Section 3: Misc admin methods
+#if ENABLE(DFG_JIT)
+ Label labelIgnoringWatchpoints()
+ {
+ Label result;
+ result.m_label = m_assembler.labelIgnoringWatchpoints();
+ return result;
+ }
+#else
+ Label labelIgnoringWatchpoints()
+ {
+ return label();
+ }
+#endif
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ void padBeforePatch()
+ {
+ // Rely on the fact that asking for a label already does the padding.
+ (void)label();
+ }
+
+ Label watchpointLabel()
+ {
+ Label result;
+ result.m_label = m_assembler.labelForWatchpoint();
+ return result;
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ class RegisterAllocationOffset {
+ public:
+ RegisterAllocationOffset(unsigned offset)
+ : m_offset(offset)
+ {
+ }
+
+ void check(unsigned low, unsigned high)
+ {
+ RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+ }
+
+ private:
+ unsigned m_offset;
+ };
+
+ void addRegisterAllocationAtOffset(unsigned offset)
+ {
+ m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
+ }
+
+ void clearRegisterAllocationOffsets()
+ {
+ m_registerAllocationForOffsets.clear();
+ }
+
+ void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
+ {
+ if (offset1 > offset2)
+ std::swap(offset1, offset2);
+
+ size_t size = m_registerAllocationForOffsets.size();
+ for (size_t i = 0; i < size; ++i)
+ m_registerAllocationForOffsets[i].check(offset1, offset2);
+ }
+#endif
+
+ template<typename T, typename U>
+ static ptrdiff_t differenceBetween(T from, U to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+ {
+ return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
+ }
+
+ unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+ ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+ {
+ AssemblerType::cacheFlush(code, size);
+ }
+protected:
+ AbstractMacroAssembler()
+ : m_randomSource(cryptographicallyRandomNumber())
+ {
+ }
+
+ AssemblerType m_assembler;
+
+ uint32_t random()
+ {
+ return m_randomSource.getUint32();
+ }
+
+ WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
+#endif
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool scratchRegisterForBlinding() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return true; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return true; }
+#endif
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, AssemblerLabel label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, AssemblerLabel label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_label);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+ {
+ return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+ }
+
+ static void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithLoad(label.dataLocation());
+ }
+
+ static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithAddressComputation(label.dataLocation());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBuffer.h b/src/3rdparty/masm/assembler/AssemblerBuffer.h
new file mode 100644
index 0000000000..277ec1043c
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBuffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ExecutableAllocator.h"
+#include "JITCompilationEffort.h"
+#include "JSGlobalData.h"
+#include "stdint.h"
+#include <string.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+ struct AssemblerLabel {
+ AssemblerLabel()
+ : m_offset(std::numeric_limits<uint32_t>::max())
+ {
+ }
+
+ explicit AssemblerLabel(uint32_t offset)
+ : m_offset(offset)
+ {
+ }
+
+ bool isSet() const { return (m_offset != std::numeric_limits<uint32_t>::max()); }
+
+ AssemblerLabel labelAtOffset(int offset) const
+ {
+ return AssemblerLabel(m_offset + offset);
+ }
+
+ uint32_t m_offset;
+ };
+
+ class AssemblerBuffer {
+ static const int inlineCapacity = 128;
+ public:
+ AssemblerBuffer()
+ : m_storage(inlineCapacity)
+ , m_buffer(&(*m_storage.begin()))
+ , m_capacity(inlineCapacity)
+ , m_index(0)
+ {
+ }
+
+ ~AssemblerBuffer()
+ {
+ }
+
+ bool isAvailable(int space)
+ {
+ return m_index + space <= m_capacity;
+ }
+
+ void ensureSpace(int space)
+ {
+ if (!isAvailable(space))
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_index & (alignment - 1));
+ }
+
+ template<typename IntegralType>
+ void putIntegral(IntegralType value)
+ {
+ ensureSpace(sizeof(IntegralType));
+ putIntegralUnchecked(value);
+ }
+
+ template<typename IntegralType>
+ void putIntegralUnchecked(IntegralType value)
+ {
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value;
+ m_index += sizeof(IntegralType);
+ }
+
+ void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+ void putByte(int8_t value) { putIntegral(value); }
+ void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+ void putShort(int16_t value) { putIntegral(value); }
+ void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+ void putInt(int32_t value) { putIntegral(value); }
+ void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+ void putInt64(int64_t value) { putIntegral(value); }
+
+ void* data() const
+ {
+ return m_buffer;
+ }
+
+ size_t codeSize() const
+ {
+ return m_index;
+ }
+
+ AssemblerLabel label() const
+ {
+ return AssemblerLabel(m_index);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ if (!m_index)
+ return 0;
+
+ RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID, effort);
+
+ if (!result)
+ return 0;
+
+ ExecutableAllocator::makeWritable(result->start(), result->sizeInBytes());
+
+ memcpy(result->start(), m_buffer, m_index);
+
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_index; }
+
+ protected:
+ void append(const char* data, int size)
+ {
+ if (!isAvailable(size))
+ grow(size);
+
+ memcpy(m_buffer + m_index, data, size);
+ m_index += size;
+ }
+
+ void grow(int extraCapacity = 0)
+ {
+ m_capacity += m_capacity / 2 + extraCapacity;
+
+ m_storage.grow(m_capacity);
+ m_buffer = &(*m_storage.begin());
+ }
+
+ private:
+ Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage;
+ char* m_buffer;
+ int m_capacity;
+ int m_index;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000000..5377ef0c7a
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool : public AssemblerBuffer {
+ typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+ using AssemblerBuffer::putIntegral;
+ using AssemblerBuffer::putIntegralUnchecked;
+public:
+ typedef struct {
+ short high;
+ short low;
+ } TwoShorts;
+
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ void ensureSpaceForAnyInstruction(int amount = 1)
+ {
+ flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t));
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ void putIntegral(TwoShorts value)
+ {
+ putIntegral(value.high);
+ putIntegral(value.low);
+ }
+
+ void putIntegralUnchecked(TwoShorts value)
+ {
+ putIntegralUnchecked(value.high);
+ putIntegralUnchecked(value.low);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(globalData, ownerUID, effort);
+ }
+
+ void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier(bool isForced = false)
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (isForced || 5 * static_cast<uint32_t>(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_numConsts;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ template<typename IntegralType>
+ void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable)
+ {
+ if (!m_numConsts)
+ m_maxDistance = maxPoolSize;
+ flushIfNoSpaceFor(sizeof(IntegralType), 4);
+
+ m_loadOffsets.append(codeSize());
+ if (isReusable) {
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, i)));
+ correctDeltas(sizeof(IntegralType));
+ return;
+ }
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, m_numConsts)));
+ ++m_numConsts;
+
+ correctDeltas(sizeof(IntegralType), 4);
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = codeSize();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<char*>(data()) + *iter;
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset);
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+ if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/src/3rdparty/masm/assembler/CodeLocation.h b/src/3rdparty/masm/assembler/CodeLocation.h
new file mode 100644
index 0000000000..86d1f2b755
--- /dev/null
+++ b/src/3rdparty/masm/assembler/CodeLocation.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabelCompact;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+class CodeLocationConvertibleLoad;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+ CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
+ CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelCompact : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelCompact() { }
+ explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationDataLabelCompact(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationConvertibleLoad : public CodeLocationCommon {
+public:
+ CodeLocationConvertibleLoad() { }
+ explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationConvertibleLoad(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelCompact(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationConvertibleLoad(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.cpp b/src/3rdparty/masm/assembler/LinkBuffer.cpp
new file mode 100644
index 0000000000..645eba5380
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "Options.h"
+
+namespace JSC {
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+{
+ performFinalization();
+
+ return CodeRef(m_executableMemory);
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+{
+ ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
+
+ CodeRef result = finalizeCodeWithoutDisassembly();
+
+ dataLogF("Generated JIT code for ");
+ va_list argList;
+ va_start(argList, format);
+ WTF::dataLogFV(format, argList);
+ va_end(argList);
+ dataLogF(":\n");
+
+ dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+ disassemble(result.code(), m_size, " ", WTF::dataFile());
+
+ return result;
+}
+
+void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+ ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+ m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = m_executableMemory->start();
+ m_size = m_assembler->m_assembler.codeSize();
+ ASSERT(m_code);
+#else
+ m_initialSize = m_assembler->m_assembler.codeSize();
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = (uint8_t*)m_executableMemory->start();
+ ASSERT(m_code);
+ ExecutableAllocator::makeWritable(m_code, m_initialSize);
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
+ uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
+ uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ // Compact branch if we can...
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_initialSize - readPtr;
+ m_executableMemory->shrink(m_size);
+
+#if DUMP_LINK_STATISTICS
+ dumpLinkStatistics(m_code, m_initialSize, m_size);
+#endif
+#if DUMP_CODE
+ dumpCode(m_code, m_size);
+#endif
+#endif
+}
+
+void LinkBuffer::performFinalization()
+{
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ m_completed = true;
+#endif
+
+#if ENABLE(BRANCH_COMPACTION)
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+#else
+ ExecutableAllocator::makeExecutable(code(), m_size);
+#endif
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+#if DUMP_LINK_STATISTICS
+void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
+{
+ static unsigned linkCount = 0;
+ static unsigned totalInitialSize = 0;
+ static unsigned totalFinalSize = 0;
+ linkCount++;
+ totalInitialSize += initialSize;
+ totalFinalSize += finalSize;
+ dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
+ code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
+ static_cast<unsigned>(initialSize - finalSize),
+ 100.0 * (initialSize - finalSize) / initialSize);
+ dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
+ linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+ 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+}
+#endif
+
+#if DUMP_CODE
+void LinkBuffer::dumpCode(void* code, size_t size)
+{
+#if CPU(ARM_THUMB2)
+ // Dump the generated code in an asm file format that can be assembled and then disassembled
+ // for debugging purposes. For example, save this output as jit.s:
+ // gcc -arch armv7 -c jit.s
+ // otool -tv jit.o
+ static unsigned codeCount = 0;
+ unsigned short* tcode = static_cast<unsigned short*>(code);
+ size_t tsize = size / sizeof(short);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.syntax unified\n"
+ "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+ "\t.globl\t%s\n"
+ "\t.align 2\n"
+ "\t.code 16\n"
+ "\t.thumb_func\t%s\n"
+ "# %p\n"
+ "%s:\n", nameBuf, nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.short\t0x%x\n", tcode[i]);
+#elif CPU(ARM_TRADITIONAL)
+ // gcc -c jit.s
+ // objdump -D jit.o
+ static unsigned codeCount = 0;
+ unsigned int* tcode = static_cast<unsigned int*>(code);
+ size_t tsize = size / sizeof(unsigned int);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.globl\t%s\n"
+ "\t.align 4\n"
+ "\t.code 32\n"
+ "\t.text\n"
+ "# %p\n"
+ "%s:\n", nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.long\t0x%x\n", tcode[i]);
+#endif
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.h b/src/3rdparty/masm/assembler/LinkBuffer.h
new file mode 100644
index 0000000000..e1882433c1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2009, 2010, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#define DUMP_LINK_STATISTICS 0
+#define DUMP_CODE 0
+
+#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
+#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
+
+#include "JITCompilationEffort.h"
+#include "MacroAssembler.h"
+#include <wtf/DataLog.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class JSGlobalData;
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+ WTF_MAKE_NONCOPYABLE(LinkBuffer);
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::PatchableJump PatchableJump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabelCompact DataLabelCompact;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+public:
+ LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : m_size(0)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
+ , m_code(0)
+ , m_assembler(masm)
+ , m_globalData(&globalData)
+#ifndef NDEBUG
+ , m_completed(false)
+ , m_effort(effort)
+#endif
+ {
+ linkCode(ownerUID, effort);
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
+ }
+
+ bool didFailToAllocate() const
+ {
+ return !m_executableMemory;
+ }
+
+ bool isValid() const
+ {
+ return !didFailToAllocate();
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_label = applyOffset(call.m_label);
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ jump.m_label = applyOffset(jump.m_label);
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ link(list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationLabel locationOf(PatchableJump jump)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
+ {
+ return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label)
+ {
+ return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ call.m_label = applyOffset(call.m_label);
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ uint32_t offsetOf(Label label)
+ {
+ return applyOffset(label.m_label).m_offset;
+ }
+
+ // Upon completion of all patching 'FINALIZE_CODE()' should be called once to
+ // complete generation of the code. Alternatively, call
+ // finalizeCodeWithoutDisassembly() directly if you have your own way of
+ // displaying disassembly.
+
+ CodeRef finalizeCodeWithoutDisassembly();
+ CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
+ void* debugAddress()
+ {
+ return m_code;
+ }
+
+ size_t debugSize()
+ {
+ return m_size;
+ }
+
+private:
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
+ // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void linkCode(void* ownerUID, JITCompilationEffort);
+
+ void performFinalization();
+
+#if DUMP_LINK_STATISTICS
+ static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
+#endif
+
+#if DUMP_CODE
+ static void dumpCode(void* code, size_t);
+#endif
+
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+ size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+ size_t m_initialSize;
+#endif
+ void* m_code;
+ MacroAssembler* m_assembler;
+ JSGlobalData* m_globalData;
+#ifndef NDEBUG
+ bool m_completed;
+ JITCompilationEffort m_effort;
+#endif
+};
+
+#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \
+ (UNLIKELY((condition)) \
+ ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
+ : (linkBufferReference).finalizeCodeWithoutDisassembly())
+
+// Use this to finalize code, like so:
+//
+// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
+//
+// Which, in disassembly mode, will print:
+//
+// Generated JIT code for my super thingy number 42:
+// Code at [0x123456, 0x234567]:
+// 0x123456: mov $0, 0
+// 0x12345a: ret
+//
+// ... and so on.
+//
+// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly
+// is true, so you can hide expensive disassembly-only computations inside there.
+
+#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF((Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/src/3rdparty/masm/assembler/MIPSAssembler.h b/src/3rdparty/masm/assembler/MIPSAssembler.h
new file mode 100644
index 0000000000..7f553bb9a1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MIPSAssembler.h
@@ -0,0 +1,1107 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MIPSAssembler_h
+#define MIPSAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31
+} RegisterID;
+
+typedef enum {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+ typedef MIPSRegisters::RegisterID RegisterID;
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ MIPSAssembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ // MIPS instruction opcode field position
+ enum {
+ OP_SH_RD = 11,
+ OP_SH_RT = 16,
+ OP_SH_RS = 21,
+ OP_SH_SHAMT = 6,
+ OP_SH_CODE = 16,
+ OP_SH_FD = 6,
+ OP_SH_FS = 11,
+ OP_SH_FT = 16
+ };
+
+ void emitInst(MIPSWord op)
+ {
+ void* oldBase = m_buffer.data();
+
+ m_buffer.putInt(op);
+
+ void* newBase = m_buffer.data();
+ if (oldBase != newBase)
+ relocateJumps(oldBase, newBase);
+ }
+
+ void nop()
+ {
+ emitInst(0x00000000);
+ }
+
+ /* Need to insert one load data delay nop for mips1. */
+ void loadDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ /* Need to insert one coprocessor access delay nop for mips1. */
+ void copDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ void move(RegisterID rd, RegisterID rs)
+ {
+ /* addu */
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+ }
+
+ /* Set an immediate value to a register. This may generate 1 or 2
+ instructions. */
+ void li(RegisterID dest, int imm)
+ {
+ if (imm >= -32768 && imm <= 32767)
+ addiu(dest, MIPSRegisters::zero, imm);
+ else if (imm >= 0 && imm < 65536)
+ ori(dest, MIPSRegisters::zero, imm);
+ else {
+ lui(dest, imm >> 16);
+ if (imm & 0xffff)
+ ori(dest, dest, imm);
+ }
+ }
+
+ void lui(RegisterID rt, int imm)
+ {
+ emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void addiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mult(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void div(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mfhi(RegisterID rd)
+ {
+ emitInst(0x00000010 | (rd << OP_SH_RD));
+ }
+
+ void mflo(RegisterID rd)
+ {
+ emitInst(0x00000012 | (rd << OP_SH_RD));
+ }
+
+ void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+#else
+ mult(rs, rt);
+ mflo(rd);
+#endif
+ }
+
+ void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void andi(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void ori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void xori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void sll(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void sllv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void sra(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void srl(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void lb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lbu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwl(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwr(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lhu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void sb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void jr(RegisterID rs)
+ {
+ emitInst(0x00000008 | (rs << OP_SH_RS));
+ }
+
+ void jalr(RegisterID rs)
+ {
+ emitInst(0x0000f809 | (rs << OP_SH_RS));
+ }
+
+ void jal()
+ {
+ emitInst(0x0c000000);
+ }
+
+ void bkpt()
+ {
+ int value = 512; /* BRK_BUG */
+ emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+ }
+
+ void bgez(RegisterID rs, int imm)
+ {
+ emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void bltz(RegisterID rs, int imm)
+ {
+ emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void beq(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bne(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bc1t()
+ {
+ emitInst(0x45010000);
+ }
+
+ void bc1f()
+ {
+ emitInst(0x45000000);
+ }
+
+ void appendJump()
+ {
+ m_jumps.append(m_buffer.label());
+ }
+
+ void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ copDelayNop();
+ }
+
+ void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void swc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void mtc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mthc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mfc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void sqrtd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void movd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void negd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void truncwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtdw(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtds(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtsd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void ceqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cngtd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cnged(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cueqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void culed(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cultd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ // General helpers
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_buffer.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ if (!result)
+ return 0;
+
+ relocateJumps(m_buffer.data(), result->start());
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // Assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mfc1(rd1, rn);
+ mfhc1(rd2, rn);
+#else
+ mfc1(rd1, rn);
+ mfc1(rd2, FPRegisterID(rn + 1));
+#endif
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mtc1(rn1, rd);
+ mthc1(rn2, rd);
+#else
+ mtc1(rn1, rd);
+ mtc1(rn2, FPRegisterID(rd + 1));
+#endif
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ // The return address is after a call and a delay slot instruction
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ static size_t linkDirectJump(void* code, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code));
+ size_t ops = 0;
+ int32_t slotAddr = reinterpret_cast<int>(insn) + 4;
+ int32_t toAddr = reinterpret_cast<int>(to);
+
+ if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) {
+ // lui
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff);
+ ++insn;
+ // jr
+ *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ ++insn;
+ ops = 4 * sizeof(MIPSWord);
+ } else {
+ // j
+ *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2);
+ ++insn;
+ ops = 2 * sizeof(MIPSWord);
+ }
+ // nop
+ *insn = 0x00000000;
+ return ops;
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + from.m_offset);
+ MIPSWord* toPos = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + to.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, toPos);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkCallInternal(insn, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+ insn = insn - 6;
+ int flushSize = linkWithOffset(insn, to);
+
+ cacheFlush(insn, flushSize);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ void* start;
+ int size = linkCallInternal(from, to);
+ if (size == sizeof(MIPSWord))
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 2 * sizeof(MIPSWord));
+ else
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 4 * sizeof(MIPSWord));
+
+ cacheFlush(start, size);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (to & 0xffff);
+ insn--;
+ cacheFlush(insn, 2 * sizeof(MIPSWord));
+ }
+
+ static int32_t readInt32(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return result;
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ repatchInt32(where, value);
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ repatchInt32(from, reinterpret_cast<int32_t>(to));
+ }
+
+ static void* readPointer(void* from)
+ {
+ return reinterpret_cast<void*>(readInt32(from));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn -= 4;
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return reinterpret_cast<void*>(result);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(MIPSWord) * 4;
+ }
+
+ static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm)
+ {
+ MIPSWord* insn = static_cast<MIPSWord*>(instructionStart);
+ size_t codeSize = 2 * sizeof(MIPSWord);
+
+ // lui
+ *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff);
+ ++insn;
+ // if jr $t9
+ if (*insn == 0x03200008) {
+ *insn = 0x00000000;
+ codeSize += sizeof(MIPSWord);
+ }
+ cacheFlush(insn, codeSize);
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 3));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 3));
+ size_t ops = linkDirectJump(instructionStart, to);
+ cacheFlush(instructionStart, ops);
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x8c000000 | ((*insn) & 0x3ffffff); // lw
+ cacheFlush(insn, 4);
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x24000000 | ((*insn) & 0x3ffffff); // addiu
+ cacheFlush(insn, 4);
+ }
+
+private:
+ /* Update each jump in the buffer of newBase. */
+ void relocateJumps(void* oldBase, void* newBase)
+ {
+ // Check each jump
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ int pos = iter->m_offset;
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
+ insn = insn + 2;
+ // Need to make sure we have 5 valid instructions after pos
+ if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+ continue;
+
+ if ((*insn & 0xfc000000) == 0x08000000) { // j
+ int offset = *insn & 0x03ffffff;
+ int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+ int topFourBits = (oldInsnAddress + 4) >> 28;
+ int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ int newInsnAddress = (int)insn;
+ if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+ *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+ else {
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ }
+ } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+ int high = (*insn & 0xffff) << 16;
+ int low = *(insn + 1) & 0xffff;
+ int oldTargetAddress = high | low;
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ }
+ }
+ }
+
+ static int linkWithOffset(MIPSWord* insn, void* to)
+ {
+ ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+
+ if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+ /*
+ Convert the sequence:
+ beq $2, $3, target
+ nop
+ b 1f
+ nop
+ nop
+ nop
+ 1:
+
+ to the new sequence if possible:
+ bne $2, $3, 1f
+ nop
+ j target
+ nop
+ nop
+ nop
+ 1:
+
+ OR to the new sequence:
+ bne $2, $3, 1f
+ nop
+ lui $25, target >> 16
+ ori $25, $25, target & 0xffff
+ jr $25
+ nop
+ 1:
+
+ Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+ */
+
+ if (*(insn + 2) == 0x10000003) {
+ if ((*insn & 0xfc000000) == 0x10000000) // beq
+ *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+ else if ((*insn & 0xfc000000) == 0x14000000) // bne
+ *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+ else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+ *insn = 0x45000005; // bc1f
+ else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+ *insn = 0x45010005; // bc1t
+ else
+ ASSERT(0);
+ }
+
+ insn = insn + 2;
+ if ((reinterpret_cast<intptr_t>(insn) + 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *insn = 0x08000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ *(insn + 1) = 0;
+ return 4 * sizeof(MIPSWord);
+ }
+
+ intptr_t newTargetAddress = reinterpret_cast<intptr_t>(to);
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 5 * sizeof(MIPSWord);
+ }
+
+ *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+ return sizeof(MIPSWord);
+ }
+
+ static int linkCallInternal(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn - 4;
+
+ if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+ if ((reinterpret_cast<intptr_t>(from) - 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *(insn + 2) = 0x0c000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ return sizeof(MIPSWord);
+ }
+
+ /* lui $25, (to >> 16) & 0xffff */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori $25, $25, to & 0xffff */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ /* jalr $25 */
+ *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 3 * sizeof(MIPSWord);
+ }
+
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+ /* lui */
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ return 2 * sizeof(MIPSWord);
+ }
+
+ AssemblerBuffer m_buffer;
+ Jumps m_jumps;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MIPSAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h
new file mode 100644
index 0000000000..f74680d7fc
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssembler.h
@@ -0,0 +1,1465 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::move;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::and32;
+ using MacroAssemblerBase::branchAdd32;
+ using MacroAssemblerBase::branchMul32;
+ using MacroAssemblerBase::branchSub32;
+ using MacroAssemblerBase::lshift32;
+ using MacroAssemblerBase::or32;
+ using MacroAssemblerBase::rshift32;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::urshift32;
+ using MacroAssemblerBase::xor32;
+#endif
+
+ static const double twoToThe32; // This is super useful for some double code.
+
+ // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+ using MacroAssemblerBase::invert;
+
+ static DoubleCondition invert(DoubleCondition cond)
+ {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
+ }
+ }
+
+ static bool isInvertible(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static ResultCondition invert(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Zero; // Make compiler happy for release builds.
+ }
+ }
+#endif
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ Address addressForPoke(int index)
+ {
+ return Address(stackPointerRegister, (index * sizeof(void*)));
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, addressForPoke(index));
+ }
+
+ void poke(TrustedImm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(TrustedImmPtr imm, int index = 0)
+ {
+ storePtr(imm, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void peek64(RegisterID dest, int index = 0)
+ {
+ load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(TrustedImm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+
+ void poke64(RegisterID src, int index = 0)
+ {
+ store64(src, addressForPoke(index));
+ }
+#endif
+
+#if CPU(MIPS)
+ void poke(FPRegisterID src, int index = 0)
+ {
+ ASSERT(!(index & 1));
+ storeDouble(src, addressForPoke(index));
+ }
+#endif
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+ void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+#if !CPU(ARM_THUMB2)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtr(cond, left, right));
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(jump());
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return PatchableJump(branchTest32(cond, reg, mask));
+ }
+#endif // !CPU(ARM_THUMB2)
+
+#if !CPU(ARM)
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, reg, imm));
+ }
+#endif // !(CPU(ARM)
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+ // Commute a relational condition, returns a new condition that will produce
+ // the same results given the same inputs but with their positions exchanged.
+ static RelationalCondition commute(RelationalCondition condition)
+ {
+ switch (condition) {
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ default:
+ break;
+ }
+
+ ASSERT(condition == Equal || condition == NotEqual);
+ return condition;
+ }
+
+ static const unsigned BlindingModulus = 64;
+ bool shouldConsiderBlinding()
+ {
+ return !(random() & (BlindingModulus - 1));
+ }
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
+ void addPtr(Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add32(TrustedImm32(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add32(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg32(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or32(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or32(TrustedImm32(imm), dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub32(TrustedImm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm.asTrustedImmPtr()), dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(ImmPtr imm, Address address)
+ {
+ store32(Imm32(imm.asTrustedImmPtr()), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, void* address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, src, dest);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+#else
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, Address address)
+ {
+ add64(imm, address);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add64(TrustedImm64(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add64(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and64(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg64(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or64(src, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or64(TrustedImm64(imm), dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or64(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ or64(imm, src, dest);
+ }
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight64(imm, srcDst);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub64(TrustedImm64(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor64(imm, srcDest);
+ }
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store64WithAddressOffsetPatch(src, address);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ {
+ return branchTest64(cond, address, reg);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, imm, dest);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, imm, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ return branchSub64(cond, src1, src2, dest);
+ }
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::and64;
+ using MacroAssemblerBase::convertInt32ToDouble;
+ using MacroAssemblerBase::store64;
+ bool shouldBlindDouble(double value)
+ {
+ // Don't trust NaN or +/-Infinity
+ if (!std::isfinite(value))
+ return shouldConsiderBlinding();
+
+ // Try to force normalisation, and check that there's no change
+ // in the bit pattern
+ if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
+ return shouldConsiderBlinding();
+
+ value = abs(value);
+ // Only allow a limited set of fractional components
+ double scaledValue = value * 8;
+ if (scaledValue / 8 != value)
+ return shouldConsiderBlinding();
+ double frac = scaledValue - floor(scaledValue);
+ if (frac != 0.0)
+ return shouldConsiderBlinding();
+
+ return value > 0xff;
+ }
+
+ bool shouldBlind(ImmPtr imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImmPtr {
+ RotatedImmPtr(uintptr_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImmPtr value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+ {
+ uint8_t rotation = random() % (sizeof(void*) * 8);
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+ return RotatedImmPtr(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRightPtr(constant.rotation, dest);
+ }
+
+ bool shouldBlind(Imm64 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint64_t value = imm.asTrustedImm64().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+
+ JSValue jsValue = JSValue::decode(value);
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImm64 {
+ RotatedImm64(uint64_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImm64 value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImm64 rotationBlindConstant(Imm64 imm)
+ {
+ uint8_t rotation = random() % (sizeof(int64_t) * 8);
+ uint64_t value = imm.asTrustedImm64().m_value;
+ value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+ return RotatedImm64(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRight64(constant.rotation, dest);
+ }
+
+ void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ convertInt32ToDouble(scratchRegister, dest);
+ } else
+ convertInt32ToDouble(imm.asTrustedImm32(), dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImmPtr(), dest);
+ }
+
+ void move(Imm64 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm64(), dest);
+ }
+
+ void and64(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and64(key.value1, dest);
+ and64(key.value2, dest);
+ } else
+ and64(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ if (shouldBlind(right)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+ return branchPtr(cond, left, right.asTrustedImmPtr());
+ }
+
+ void storePtr(ImmPtr imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ storePtr(scratchRegister, dest);
+ } else
+ storePtr(imm.asTrustedImmPtr(), dest);
+ }
+
+ void store64(Imm64 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ store64(scratchRegister, dest);
+ } else
+ store64(imm.asTrustedImm64(), dest);
+ }
+
+#endif
+
+#endif // !CPU(X86_64)
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ bool shouldBlind(Imm32 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#else
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint32_t value = imm.asTrustedImm32().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffff:
+ return false;
+ default:
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+#endif
+ }
+
+ struct BlindedImm32 {
+ BlindedImm32(int32_t v1, int32_t v2)
+ : value1(v1)
+ , value2(v2)
+ {
+ }
+ TrustedImm32 value1;
+ TrustedImm32 value2;
+ };
+
+ uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+ {
+ uint32_t key = random();
+ if (value <= 0xff)
+ mask = 0xff;
+ else if (value <= 0xffff)
+ mask = 0xffff;
+ else if (value <= 0xffffff)
+ mask = 0xffffff;
+ else
+ mask = 0xffffffff;
+ return key & mask;
+ }
+
+ uint32_t keyForConstant(uint32_t value)
+ {
+ uint32_t mask = 0;
+ return keyForConstant(value, mask);
+ }
+
+ BlindedImm32 xorBlindConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ return BlindedImm32(baseValue ^ key, key);
+ }
+
+ BlindedImm32 additionBlindedConstant(Imm32 imm)
+ {
+ // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+ static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+ if (key > baseValue)
+ key = key - baseValue;
+ return BlindedImm32(baseValue - key, key);
+ }
+
+ BlindedImm32 andBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+ }
+
+ BlindedImm32 orBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+ }
+
+ void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+ {
+ move(constant.value1, dest);
+ xor32(constant.value2, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ addPtr(key.value1, dest);
+ addPtr(key.value2, dest);
+ } else
+ addPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and32(key.value1, dest);
+ and32(key.value2, dest);
+ } else
+ and32(imm.asTrustedImm32(), dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ andPtr(key.value1, dest);
+ andPtr(key.value2, dest);
+ } else
+ andPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return and32(imm.asTrustedImm32(), dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ and32(src, dest);
+ } else
+ and32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm32(), dest);
+ }
+
+ void or32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return or32(imm, dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ or32(src, dest);
+ } else
+ or32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = orBlindedConstant(imm);
+ or32(key.value1, dest);
+ or32(key.value2, dest);
+ } else
+ or32(imm.asTrustedImm32(), dest);
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(ImmPtr value, int index = 0)
+ {
+ storePtr(value, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void poke(Imm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+#endif
+
+ void store32(Imm32 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+ BlindedImm32 blind = xorBlindConstant(imm);
+ store32(blind.value1, dest);
+ xor32(blind.value2, dest);
+#else
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ store32(scratchRegister, dest);
+ } else {
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ store32(imm.asTrustedImm32(), dest);
+ }
+#endif
+ } else
+ store32(imm.asTrustedImm32(), dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ sub32(key.value1, dest);
+ sub32(key.value2, dest);
+ } else
+ sub32(imm.asTrustedImm32(), dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ subPtr(key.value1, dest);
+ subPtr(key.value2, dest);
+ } else
+ subPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, src, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+ {
+ if (shouldBlind(right)) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
+ return branch32(cond, left, scratchRegister);
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
+ }
+ return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchMul32(cond, src, dest);
+ }
+ return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ }
+
+ // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+ // with src == dst, and on x86-32 we don't have a platform scratch register.
+ Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+ {
+ if (shouldBlind(imm)) {
+ ASSERT(scratch != dest);
+ ASSERT(scratch != src);
+ loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+ return branchSub32(cond, src, scratch, dest);
+ }
+ return branchSub32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ lshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ lshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ rshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ rshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ urshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ urshift32(src, trustedImm32ForShift(amount), dest);
+ }
+#endif
+};
+
+} // namespace JSC
+
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+ MacroAssembler() { }
+
+public:
+
+ enum RegisterID { NoRegister };
+ enum FPRegisterID { NoFPRegister };
+};
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 0000000000..98dc3e9879
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#if OS(LINUX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{
+#if OS(LINUX)
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd > 0) {
+ Elf32_auxv_t aux;
+ while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+ if (aux.a_type == AT_HWCAP) {
+ close(fd);
+ return aux.a_un.a_val & HWCAP_VFP;
+ }
+ }
+ close(fd);
+ }
+#endif
+
+#if (COMPILER(RVCT) && defined(__TARGET_FPU_VFP)) || (COMPILER(GCC) && defined(__VFP_FP__))
+ return true;
+#else
+ return false;
+#endif
+}
+
+const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
+
+#if CPU(ARMV5_OR_LOWER)
+/* On ARMv5 and below, natural alignment is required. */
+void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+{
+ ARMWord op2;
+
+ ASSERT(address.scale >= 0 && address.scale <= 3);
+ op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
+
+ if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset));
+ m_assembler.halfDtrUp(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2));
+ } else if (address.offset < 0 && address.offset >= -0xff) {
+ m_assembler.add(ARMRegisters::S0, address.base, op2);
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset));
+ m_assembler.halfDtrDown(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2));
+ } else {
+ m_assembler.moveImm(address.offset, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0);
+ m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::Op2Immediate | 0x2);
+ m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0);
+ }
+ m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16));
+}
+#endif
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.h b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
new file mode 100644
index 0000000000..01e34c97cd
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
@@ -0,0 +1,1386 @@
+/*
+ * Copyright (C) 2008 Apple Inc.
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARM_h
+#define MacroAssemblerARM_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
+ static const int DoubleConditionMask = 0x0f;
+ static const int DoubleConditionBitSpecial = 0x10;
+ COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
+public:
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+
+ enum RelationalCondition {
+ Equal = ARMAssembler::EQ,
+ NotEqual = ARMAssembler::NE,
+ Above = ARMAssembler::HI,
+ AboveOrEqual = ARMAssembler::CS,
+ Below = ARMAssembler::CC,
+ BelowOrEqual = ARMAssembler::LS,
+ GreaterThan = ARMAssembler::GT,
+ GreaterThanOrEqual = ARMAssembler::GE,
+ LessThan = ARMAssembler::LT,
+ LessThanOrEqual = ARMAssembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMAssembler::VS,
+ Signed = ARMAssembler::MI,
+ Zero = ARMAssembler::EQ,
+ NonZero = ARMAssembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMAssembler::EQ,
+ DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
+ DoubleGreaterThan = ARMAssembler::GT,
+ DoubleGreaterThanOrEqual = ARMAssembler::GE,
+ DoubleLessThan = ARMAssembler::CC,
+ DoubleLessThanOrEqual = ARMAssembler::LS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = ARMAssembler::NE,
+ DoubleGreaterThanOrUnordered = ARMAssembler::HI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
+ DoubleLessThanOrUnordered = ARMAssembler::LT,
+ DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ static const Scale ScalePtr = TimesFour;
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.adds(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ add32(ARMRegisters::S1, dest);
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.bitAnds(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, dest, w);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.bitAnds(dest, src, w);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ and32(ARMRegisters::S1, dest);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.muls(dest, op1, op2);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, ARMRegisters::S0);
+ m_assembler.muls(dest, src, ARMRegisters::S0);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+ or32(src, ARMRegisters::S1);
+ store32(ARMRegisters::S1, ARMRegisters::S0);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orrs(dest, op1, op2);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ ARMWord w = ARMAssembler::getOp2Byte(0x1f);
+ m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
+
+ m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ sub32(ARMRegisters::S1, dest);
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.eors(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eors(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, dest);
+ else
+ m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvns(dest, src);
+ else
+ m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+#if WTF_ARM_ARCH_AT_LEAST(5)
+ m_assembler.clz(dest, src);
+#else
+ UNUSED_PARAM(src);
+ UNUSED_PARAM(dest);
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+#if CPU(ARMV5_OR_LOWER)
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
+#else
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+#endif
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return result;
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -4095 && value <= 4095;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+ if (address.offset >= 0)
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ else
+ m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
+ return dataLabel;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 dataLabel(this);
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
+ m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store8(TrustedImm32 imm, const void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ move(imm, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, ARMRegisters::S1);
+ m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push(src);
+ }
+
+ void push(Address address)
+ {
+ load32(address, ARMRegisters::S1);
+ push(ARMRegisters::S1);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, ARMRegisters::S0);
+ push(ARMRegisters::S0);
+ }
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.moveImm(imm.m_value, dest);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, ARMRegisters::S0);
+ move(reg2, reg1);
+ move(ARMRegisters::S0, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
+ {
+ internalCompare32(left, right);
+ return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, ARMRegisters::S1);
+ return branch32(cond, left, ARMRegisters::S1);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ load8(Address(ARMRegisters::S1), ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.tst(reg, mask);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
+ if (w & ARMAssembler::Op2InvertedImmediate)
+ m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
+ else
+ m_assembler.tst(reg, w);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, ARMRegisters::S1);
+ return branchTest32(cond, ARMRegisters::S1, mask);
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ void jump(Address address)
+ {
+ load32(address, ARMRegisters::pc);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
+ {
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(op1, op2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ add32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op2 == dest) {
+ if (op1 == dest) {
+ move(op2, ARMRegisters::S0);
+ op2 = ARMRegisters::S0;
+ } else {
+ // Swap the operands.
+ RegisterID tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+ }
+ m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
+ m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ mull32(src1, src2, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(src1, src2, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ move(imm, ARMRegisters::S0);
+ mull32(ARMRegisters::S0, src, dest);
+ cond = NonZero;
+ }
+ else
+ mul32(imm, src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ sub32(src, imm, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ m_assembler.subs(dest, op1, op2);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ neg32(srcDest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ or32(src, dest);
+ return Jump(m_assembler.jmp(ARMCondition(cond)));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ internalCompare32(reg, imm);
+ Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
+ m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
+ return PatchableJump(jump);
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt(0);
+ }
+
+ Call nearCall()
+ {
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ call32(address.base, address.offset);
+ }
+
+ void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, ARMRegisters::S1);
+ compare32(cond, ARMRegisters::S1, right, dest);
+ }
+
+ void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmp(0, reg);
+ else
+ m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
+ m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, ARMRegisters::S1);
+ test32(cond, ARMRegisters::S1, mask, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ add32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ ARMWord tmp;
+
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
+ m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
+ else {
+ m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
+ move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
+ }
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
+
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ if (imm.m_value >= 0)
+ m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ else
+ m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, ARMRegisters::S1);
+ sub32(imm, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
+ m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, ARMRegisters::S1);
+ return branch32(cond, ARMRegisters::S1, right);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+ m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
+
+ // NOP the default prefetching
+ m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
+ }
+
+ Call call()
+ {
+ ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
+ m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
+ return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ DataLabelPtr dataLabel(this);
+ m_assembler.ldrUniqueImmediate(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
+ return dataLabel;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
+ Jump jump = branch32(cond, left, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, ARMRegisters::S1);
+ ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
+ dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
+ Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
+ return jump;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
+ store32(ARMRegisters::S1, address);
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ // Floating point operators
+ static bool supportsFloatingPoint()
+ {
+ return s_isVFPPresent;
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+ return false;
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+ return s_isVFPPresent;
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov_f64(dest, src);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd_f64(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, ARMRegisters::SD0);
+ addDouble(ARMRegisters::SD0, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv_f64(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ RELEASE_ASSERT_NOT_REACHED(); // Untested
+ loadDouble(src, ARMRegisters::SD0);
+ divDouble(ARMRegisters::SD0, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub_f64(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ subDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, ARMRegisters::SD0);
+ mulDouble(ARMRegisters::SD0, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul_f64(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt_f64(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs_f64(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg_f64(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov_vfp32(dest << 1, src);
+ m_assembler.vcvt_f64_s32(dest, dest << 1);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ load32(Address(ARMRegisters::S1), ARMRegisters::S1);
+ convertInt32ToDouble(ARMRegisters::S1, dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f64_f32(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvt_f32_f64(dst, src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp_f64(left, right);
+ m_assembler.vmrs_apsr();
+ if (cond & DoubleConditionBitSpecial)
+ m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
+ return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToInt32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ ARMWord w = ARMAssembler::getOp2(0x80000000);
+ ASSERT(w != ARMAssembler::InvalidImmediate);
+ m_assembler.cmp(ARMRegisters::S0, w);
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ truncateDoubleToUint32(src, dest);
+
+ m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
+ m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
+
+ m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
+ {
+ m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
+ m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
+
+ // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
+ convertInt32ToDouble(ARMRegisters::S0, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL));
+ return static_cast<RelationalCondition>(cond ^ 0x10000000);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ ARMAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ ARMAssembler::Condition ARMCondition(RelationalCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ ARMAssembler::Condition ARMCondition(ResultCondition cond)
+ {
+ return static_cast<ARMAssembler::Condition>(cond);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_assembler.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ void call32(RegisterID base, int32_t offset)
+ {
+ load32(Address(base, offset), ARMRegisters::S1);
+ m_assembler.blx(ARMRegisters::S1);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ void internalCompare32(RegisterID left, TrustedImm32 right)
+ {
+ ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
+ if (tmp != ARMAssembler::InvalidImmediate)
+ m_assembler.cmn(left, tmp);
+ else
+ m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
+ }
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static const bool s_isVFPPresent;
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // MacroAssemblerARM_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
new file mode 100644
index 0000000000..81c1d7e08a
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
@@ -0,0 +1,1914 @@
+/*
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerARMv7_h
+#define MacroAssemblerARMv7_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ARMv7Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
+ // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
+ // - dTR is likely used more than aTR, and we'll get better instruction
+ // encoding if it's in the low 8 registers.
+ static const RegisterID dataTempRegister = ARMRegisters::ip;
+ static const RegisterID addressTempRegister = ARMRegisters::r3;
+
+ static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
+ inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
+
+public:
+ MacroAssemblerARMv7()
+ : m_makeJumpPatchable(false)
+ {
+ }
+
+ typedef ARMv7Assembler::LinkRecord LinkRecord;
+ typedef ARMv7Assembler::JumpType JumpType;
+ typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -255 && value <= 255;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+
+ struct ArmAddress {
+ enum AddressType {
+ HasOffset,
+ HasIndex,
+ } type;
+ RegisterID base;
+ union {
+ int32_t offset;
+ struct {
+ RegisterID index;
+ Scale scale;
+ };
+ } u;
+
+ explicit ArmAddress(RegisterID base, int32_t offset = 0)
+ : type(HasOffset)
+ , base(base)
+ {
+ u.offset = offset;
+ }
+
+ explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
+ : type(HasIndex)
+ , base(base)
+ {
+ u.index = index;
+ u.scale = scale;
+ }
+ };
+
+public:
+ typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
+
+ static const Scale ScalePtr = TimesFour;
+
+ enum RelationalCondition {
+ Equal = ARMv7Assembler::ConditionEQ,
+ NotEqual = ARMv7Assembler::ConditionNE,
+ Above = ARMv7Assembler::ConditionHI,
+ AboveOrEqual = ARMv7Assembler::ConditionHS,
+ Below = ARMv7Assembler::ConditionLO,
+ BelowOrEqual = ARMv7Assembler::ConditionLS,
+ GreaterThan = ARMv7Assembler::ConditionGT,
+ GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ LessThan = ARMv7Assembler::ConditionLT,
+ LessThanOrEqual = ARMv7Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = ARMv7Assembler::ConditionVS,
+ Signed = ARMv7Assembler::ConditionMI,
+ Zero = ARMv7Assembler::ConditionEQ,
+ NonZero = ARMv7Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = ARMv7Assembler::ConditionEQ,
+ DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
+ DoubleGreaterThan = ARMv7Assembler::ConditionGT,
+ DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
+ DoubleLessThan = ARMv7Assembler::ConditionLO,
+ DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
+ DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
+ DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
+ DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
+ DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
+ DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
+ };
+
+ static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID linkRegister = ARMRegisters::lr;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.add(dest, dest, src);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add(dest, src, dataTempRegister);
+ }
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ }
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.ARM_and(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.ARM_and(dest, src, dataTempRegister);
+ }
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ and32(dest, src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ and32(imm, dest, dest);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void countLeadingZeros32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.clz(dest, src);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsl(dest, src, dataTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, src, imm.m_value & 0x1f);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift32(dest, imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, dest, src);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.neg(srcDest, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ load32(addressTempRegister, dataTempRegister);
+ or32(src, dataTempRegister);
+ store32(dataTempRegister, addressTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest, dest);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.orr(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.orr(dest, src, dataTempRegister);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.asr(dest, src, dataTempRegister);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ // Clamp the shift to the range 0..31
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
+ ASSERT(armImm.isValid());
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
+
+ m_assembler.lsr(dest, src, dataTempRegister);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift32(dest, imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sub(dest, dest, src);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dest, dest, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub(dest, dest, dataTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ load32(address, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ load32(address.m_ptr, dataTempRegister);
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // Hrrrm, since dataTempRegister holds the data loaded,
+ // use addressTempRegister to hold the immediate.
+ move(imm, addressTempRegister);
+ m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
+ }
+
+ store32(dataTempRegister, address.m_ptr);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.eor(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.mvn(dest, src);
+ return;
+ }
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.eor(dest, src, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.eor(dest, src, dataTempRegister);
+ }
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ xor32(dest, src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn(dest, dest);
+ else
+ xor32(imm, dest, dest);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+private:
+ void load32(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldr(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldr(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrh(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load16Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
+ }
+
+ void load8(ArmAddress address, RegisterID dest)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.ldrb(dest, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void load8Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
+ }
+
+protected:
+ void store32(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.str(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.str(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.str(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+private:
+ void store8(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strb(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strb(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strb(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store16(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strh(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strh(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strh(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+public:
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(setupArmAddress(address), dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(setupArmAddress(address), dest);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
+ return result;
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ load8Signed(setupArmAddress(address), dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ load32(ArmAddress(address.base, dataTempRegister), dest);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+
+ RegisterID base = address.base;
+
+ DataLabelCompact label(this);
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+
+ m_assembler.ldr(dest, base, address.offset, true, false);
+ return label;
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ load16Signed(setupArmAddress(address), dest);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
+ if (armImm.isValid())
+ m_assembler.ldrh(dest, address.base, armImm);
+ else {
+ move(TrustedImm32(address.offset), dataTempRegister);
+ m_assembler.ldrh(dest, address.base, dataTempRegister);
+ }
+ }
+
+ void load16Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
+ store32(src, ArmAddress(address.base, dataTempRegister));
+ return label;
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ store32(src, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
+ void store32(RegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ store8(src, ArmAddress(addressTempRegister, 0));
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ store16(src, setupArmAddress(address));
+ }
+
+ // Possibly clobbers src, but not on this architecture.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool shouldBlindForSpecificArch(uint32_t value)
+ {
+ ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+ // Couldn't be encoded as an immediate, so assume it's untrusted.
+ if (!immediate.isValid())
+ return true;
+
+ // If we can encode the immediate, we have less than 16 attacker
+ // controlled bits.
+ if (immediate.isEncodedImm())
+ return false;
+
+ // Don't let any more than 12 bits of an instruction word
+ // be controlled by an attacker.
+ return !immediate.isUInt12();
+ }
+#endif
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vldr(dest, base, offset);
+ }
+
+ void loadFloat(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadDouble(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadFloat(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov(dest, src);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ m_assembler.vldr(dest, addressTempRegister, 0);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.vstr(src, base, offset);
+ }
+
+ void storeFloat(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ storeDouble(src, addressTempRegister);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeDouble(src, Address(addressTempRegister, address.offset));
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeFloat(src, Address(addressTempRegister, address.offset));
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, dest, src);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ addDouble(fpTempRegister, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, op1, op2);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fpTempRegister);
+ m_assembler.vadd(dest, dest, fpTempRegister);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, op1, op2);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, dest, src);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ subDouble(fpTempRegister, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, op1, op2);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ mulDouble(fpTempRegister, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vabs(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vmov(fpTempRegister, src, src);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(Address address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ // Fixme: load directly into the fpr!
+ load32(address.m_ptr, dataTempRegister);
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.vcmp(left, right);
+ m_assembler.vmrs();
+
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+ return makeBranch(cond);
+ }
+
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ // Convert into dest.
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Calculate 2x dest. If the value potentially underflowed, it will have
+ // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
+ // overflow the result will be equal to -2.
+ Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
+ Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
+
+ // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
+ underflow.link(this);
+ if (branchType == BranchIfTruncateSuccessful)
+ return noOverflow;
+
+ // We'll reach the current point in the code on failure, so plant a
+ // jump here & link the success case.
+ Jump failure = jump();
+ noOverflow.link(this);
+ return failure;
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff));
+ Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0));
+ overflow.link(this);
+
+ if (branchType == BranchIfTruncateSuccessful)
+ return success;
+
+ Jump failure = jump();
+ success.link(this);
+ return failure;
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
+ {
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump result = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
+ {
+ m_assembler.vcmpz(reg);
+ m_assembler.vmrs();
+ Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ // store postindexed with writeback
+ m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ }
+
+ void push(RegisterID src)
+ {
+ // store preindexed with writeback
+ m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ uint32_t value = imm.m_value;
+
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.mov(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, dataTempRegister);
+ move(reg2, reg1);
+ move(dataTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return ARMv7Assembler::maxJumpReplacementSize();
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+private:
+
+ // Should we be using TEQ for equal/not-equal?
+ void compare32(RegisterID left, TrustedImm32 right)
+ {
+ int32_t imm = right.m_value;
+ if (!imm)
+ m_assembler.tst(left, left);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
+ else {
+ move(TrustedImm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
+ }
+ }
+ }
+
+ void test32(RegisterID reg, TrustedImm32 mask)
+ {
+ int32_t imm = mask.m_value;
+
+ if (imm == -1)
+ m_assembler.tst(reg, reg);
+ else {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.tst(reg, armImm);
+ else {
+ move(mask, dataTempRegister);
+ m_assembler.tst(reg, dataTempRegister);
+ }
+ }
+ }
+
+public:
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmp(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32WithUnalignedHalfWords(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load32(left.m_ptr, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ compare32(left, right);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch8(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.tst(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ test32(reg, mask);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
+ load32(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.bx(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ load32(address, dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load32(Address(dataTempRegister), dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.add_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.add_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, src, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, imm, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ // Move the high bits of the address into addressTempRegister,
+ // and load the value into dataTempRegister.
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ // Do the add.
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // If the operand does not fit into an immediate then load it temporarily
+ // into addressTempRegister; since we're overwriting addressTempRegister
+ // we'll need to reload it with the high bits of the address afterwards.
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ }
+
+ // Store the result.
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ m_assembler.smull(dest, dataTempRegister, src1, src2);
+
+ if (cond == Overflow) {
+ m_assembler.asr(addressTempRegister, dest, 31);
+ return branch32(NotEqual, addressTempRegister, dataTempRegister);
+ }
+
+ return branchTest32(cond, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul32(cond, src, dest, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, dataTempRegister);
+ return branchMul32(cond, dataTempRegister, src, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ m_assembler.sub_S(srcDest, zero, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ m_assembler.orr_S(dest, dest, src);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.sub_S(dest, op1, op2);
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
+ {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.sub_S(dest, op1, armImm);
+ else {
+ move(imm, dataTempRegister);
+ m_assembler.sub_S(dest, op1, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, dest, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, dest, imm, dest);
+ }
+
+ void relativeTableJump(RegisterID index, int scale)
+ {
+ ASSERT(scale >= 0 && scale <= 31);
+
+ // dataTempRegister will point after the jump if index register contains zero
+ move(ARMRegisters::pc, dataTempRegister);
+ m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9));
+
+ ShiftTypeAndAmount shift(SRType_LSL, scale);
+ m_assembler.add(dataTempRegister, dataTempRegister, index, shift);
+ jump(dataTempRegister);
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint(uint8_t imm = 0)
+ {
+ m_assembler.bkpt(imm);
+ }
+
+ ALWAYS_INLINE Call nearCall()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ }
+
+ ALWAYS_INLINE Call call()
+ {
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call call(RegisterID target)
+ {
+ return Call(m_assembler.blx(target), Call::None);
+ }
+
+ ALWAYS_INLINE Call call(Address address)
+ {
+ load32(address, dataTempRegister);
+ return Call(m_assembler.blx(dataTempRegister), Call::None);
+ }
+
+ ALWAYS_INLINE void ret()
+ {
+ m_assembler.bx(linkRegister);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmp(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
+ {
+ load32(left, dataTempRegister);
+ compare32(cond, dataTempRegister, right, dest);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(left, right);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ // FIXME:
+ // The mask should be optional... paerhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load32(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ load8(address, dataTempRegister);
+ test32(dataTempRegister, mask);
+ m_assembler.it(armV7Condition(cond), false);
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
+ }
+
+ ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(imm, dst);
+ return DataLabel32(this);
+ }
+
+ ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
+ {
+ padBeforePatch();
+ moveFixedWidthEncoding(TrustedImm32(imm), dst);
+ return DataLabelPtr(this);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, TrustedImm32(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableJump()
+ {
+ padBeforePatch();
+ m_makeJumpPatchable = true;
+ Jump result = jump();
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ return label;
+ }
+ ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+
+ ALWAYS_INLINE Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't link.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ }
+
+ ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+
+ int executableOffsetFor(int location)
+ {
+ return m_assembler.executableOffsetFor(location);
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const unsigned twoWordOpSize = 4;
+ return label.labelAtOffset(-twoWordOpSize * 2);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+ {
+#if OS(LINUX) || OS(QNX)
+ ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
+#else
+ UNUSED_PARAM(rd);
+ ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
+#endif
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ ALWAYS_INLINE Jump jump()
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
+ }
+
+ ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
+ {
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
+ m_assembler.it(cond, true, true);
+ moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
+ }
+ ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
+ ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
+
+ ArmAddress setupArmAddress(BaseIndex address)
+ {
+ if (address.offset) {
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return ArmAddress(addressTempRegister, address.index, address.scale);
+ } else
+ return ArmAddress(address.base, address.index, address.scale);
+ }
+
+ ArmAddress setupArmAddress(Address address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ ArmAddress setupArmAddress(ImplicitAddress address)
+ {
+ if ((address.offset >= -0xff) && (address.offset <= 0xfff))
+ return ArmAddress(address.base, address.offset);
+
+ move(TrustedImm32(address.offset), addressTempRegister);
+ return ArmAddress(address.base, addressTempRegister);
+ }
+
+ RegisterID makeBaseIndexBase(BaseIndex address)
+ {
+ if (!address.offset)
+ return address.base;
+
+ ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
+ if (imm.isValid())
+ m_assembler.add(addressTempRegister, address.base, imm);
+ else {
+ move(TrustedImm32(address.offset), addressTempRegister);
+ m_assembler.add(addressTempRegister, addressTempRegister, address.base);
+ }
+
+ return addressTempRegister;
+ }
+
+ void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst)
+ {
+ uint32_t value = imm.m_value;
+ m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
+ m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
+ }
+
+ ARMv7Assembler::Condition armV7Condition(RelationalCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(ResultCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+ ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
+ {
+ return static_cast<ARMv7Assembler::Condition>(cond);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ ARMv7Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ bool m_makeJumpPatchable;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerARMv7_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h b/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h
new file mode 100644
index 0000000000..89cffb1278
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerCodeRef_h
+#define MacroAssemblerCodeRef_h
+
+#include "Disassembler.h"
+#include "ExecutableAllocator.h"
+#include "LLIntData.h"
+#include <wtf/DataLog.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefPtr.h>
+#include <wtf/UnusedParam.h>
+
+// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
+// instruction address on the platform (for example, check any alignment requirements).
+#if CPU(ARM_THUMB2)
+// ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded
+// into the processor are decorated with the bottom bit set, indicating that this is
+// thumb code (as oposed to 32-bit traditional ARM). The first test checks for both
+// decorated and undectorated null, and the second test ensures that the pointer is
+// decorated.
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
+ ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
+#define ASSERT_VALID_CODE_OFFSET(offset) \
+ ASSERT(!(offset & 1)) // Must be multiple of 2.
+#else
+#define ASSERT_VALID_CODE_POINTER(ptr) \
+ ASSERT(ptr)
+#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
+#endif
+
+#if CPU(X86) && OS(WINDOWS)
+#define CALLING_CONVENTION_IS_STDCALL 1
+#ifndef CDECL
+#if COMPILER(MSVC)
+#define CDECL __cdecl
+#else
+#define CDECL __attribute__ ((__cdecl))
+#endif // COMPILER(MSVC)
+#endif // CDECL
+#else
+#define CALLING_CONVENTION_IS_STDCALL 0
+#endif
+
+#if CPU(X86)
+#define HAS_FASTCALL_CALLING_CONVENTION 1
+#ifndef FASTCALL
+#if COMPILER(MSVC)
+#define FASTCALL __fastcall
+#else
+#define FASTCALL __attribute__ ((fastcall))
+#endif // COMPILER(MSVC)
+#endif // FASTCALL
+#else
+#define HAS_FASTCALL_CALLING_CONVENTION 0
+#endif // CPU(X86)
+
+namespace JSC {
+
+// FunctionPtr:
+//
+// FunctionPtr should be used to wrap pointers to C/C++ functions in JSC
+// (particularly, the stub functions).
+class FunctionPtr {
+public:
+ FunctionPtr()
+ : m_value(0)
+ {
+ }
+
+ template<typename returnType>
+ FunctionPtr(returnType(*value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType(*value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType(*value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+// MSVC doesn't seem to treat functions with different calling conventions as
+// different types; these methods already defined for fastcall, below.
+#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
+
+ template<typename returnType>
+ FunctionPtr(returnType (CDECL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (CDECL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+#if HAS_FASTCALL_CALLING_CONVENTION
+
+ template<typename returnType>
+ FunctionPtr(returnType (FASTCALL *value)())
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1>
+ FunctionPtr(returnType (FASTCALL *value)(argType1))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4>
+ FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+#endif
+
+ template<typename FunctionType>
+ explicit FunctionPtr(FunctionType* value)
+ // Using a C-ctyle cast here to avoid compiler error on RVTC:
+ // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers
+ // (I guess on RVTC function pointers have a different constness to GCC/MSVC?)
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+ void* executableAddress() const { return m_value; }
+
+
+private:
+ void* m_value;
+};
+
+// ReturnAddressPtr:
+//
+// ReturnAddressPtr should be used to wrap return addresses generated by processor
+// 'call' instructions exectued in JIT code. We use return addresses to look up
+// exception and optimization information, and to repatch the call instruction
+// that is the source of the return address.
+class ReturnAddressPtr {
+public:
+ ReturnAddressPtr()
+ : m_value(0)
+ {
+ }
+
+ explicit ReturnAddressPtr(void* value)
+ : m_value(value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ explicit ReturnAddressPtr(FunctionPtr function)
+ : m_value(function.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* value() const { return m_value; }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodePtr:
+//
+// MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code.
+class MacroAssemblerCodePtr {
+public:
+ MacroAssemblerCodePtr()
+ : m_value(0)
+ {
+ }
+
+ explicit MacroAssemblerCodePtr(void* value)
+#if CPU(ARM_THUMB2)
+ // Decorate the pointer as a thumb code pointer.
+ : m_value(reinterpret_cast<char*>(value) + 1)
+#else
+ : m_value(value)
+#endif
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
+ {
+ ASSERT_VALID_CODE_POINTER(value);
+ MacroAssemblerCodePtr result;
+ result.m_value = value;
+ return result;
+ }
+
+#if ENABLE(LLINT)
+ static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId)
+ {
+ return createFromExecutableAddress(LLInt::getCodePtr(codeId));
+ }
+#endif
+
+ explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
+ : m_value(ra.value())
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
+ void* executableAddress() const { return m_value; }
+#if CPU(ARM_THUMB2)
+ // To use this pointer as a data address remove the decoration.
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
+#else
+ void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
+#endif
+
+ bool operator!() const
+ {
+ return !m_value;
+ }
+
+private:
+ void* m_value;
+};
+
+// MacroAssemblerCodeRef:
+//
+// A reference to a section of JIT generated code. A CodeRef consists of a
+// pointer to the code, and a ref pointer to the pool from within which it
+// was allocated.
+class MacroAssemblerCodeRef {
+private:
+ // This is private because it's dangerous enough that we want uses of it
+ // to be easy to find - hence the static create method below.
+ explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
+ : m_codePtr(codePtr)
+ {
+ ASSERT(m_codePtr);
+ }
+
+public:
+ MacroAssemblerCodeRef()
+ {
+ }
+
+ MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory)
+ : m_codePtr(executableMemory->start())
+ , m_executableMemory(executableMemory)
+ {
+ ASSERT(m_executableMemory->isManaged());
+ ASSERT(m_executableMemory->start());
+ ASSERT(m_codePtr);
+ }
+
+ // Use this only when you know that the codePtr refers to code that is
+ // already being kept alive through some other means. Typically this means
+ // that codePtr is immortal.
+ static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
+ {
+ return MacroAssemblerCodeRef(codePtr);
+ }
+
+#if ENABLE(LLINT)
+ // Helper for creating self-managed code refs from LLInt.
+ static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId)
+ {
+ return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId)));
+ }
+#endif
+
+ ExecutableMemoryHandle* executableMemory() const
+ {
+ return m_executableMemory.get();
+ }
+
+ MacroAssemblerCodePtr code() const
+ {
+ return m_codePtr;
+ }
+
+ size_t size() const
+ {
+ if (!m_executableMemory)
+ return 0;
+ return m_executableMemory->sizeInBytes();
+ }
+
+ bool tryToDisassemble(const char* prefix) const
+ {
+ return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile());
+ }
+
+ bool operator!() const { return !m_codePtr; }
+
+private:
+ MacroAssemblerCodePtr m_codePtr;
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+};
+
+} // namespace JSC
+
+#endif // MacroAssemblerCodeRef_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
new file mode 100644
index 0000000000..e18d86c5b3
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
@@ -0,0 +1,2751 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerMIPS_h
+#define MacroAssemblerMIPS_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> {
+public:
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+
+ MacroAssemblerMIPS()
+ : m_fixedWidth(false)
+ {
+ }
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -2147483647 - 1 && value <= 2147483647;
+ }
+
+ static const Scale ScalePtr = TimesFour;
+
+ // For storing immediate number
+ static const RegisterID immTempRegister = MIPSRegisters::t0;
+ // For storing data loaded from the memory
+ static const RegisterID dataTempRegister = MIPSRegisters::t1;
+ // For storing address base
+ static const RegisterID addrTempRegister = MIPSRegisters::t2;
+ // For storing compare result
+ static const RegisterID cmpTempRegister = MIPSRegisters::t3;
+
+ // FP temp register
+ static const FPRegisterID fpTempRegister = MIPSRegisters::f16;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 0x7FFFFFFF;
+
+ enum RelationalCondition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual
+ };
+
+ enum ResultCondition {
+ Overflow,
+ Signed,
+ Zero,
+ NonZero
+ };
+
+ enum DoubleCondition {
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ static const RegisterID stackPointerRegister = MIPSRegisters::sp;
+ static const RegisterID returnAddressRegister = MIPSRegisters::ra;
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addu(dest, dest, src);
+ }
+
+ void add32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.addu(dest, op1, op2);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ addu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.addu(dest, src, immTempRegister);
+ }
+ }
+
+ void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ if (dest.offset >= -32768 && dest.offset <= 32767 && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, dest.base, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, dest.base, dest.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ addu dataTemp, dataTemp, src
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (dest.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, dest.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, dest.offset);
+ m_assembler.addu(dataTempRegister, dataTempRegister, src);
+ m_assembler.sw(dataTempRegister, addrTempRegister, dest.offset);
+ }
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw cmpTemp, 0(addrTemp)
+ addu dataTemp, cmpTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(cmpTempRegister, addrTempRegister, 0);
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, cmpTempRegister, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.addu(dataTempRegister, cmpTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ add32(imm, address)
+ sltu immTemp, dataTemp, cmpTemp # set carry-in bit
+ lw dataTemp, 4(addrTemp)
+ addiu dataTemp, imm.m_value >> 31 ? -1 : 0
+ addu dataTemp, dataTemp, immTemp
+ sw dataTemp, 4(addrTemp)
+ */
+ add32(imm, address);
+ m_assembler.sltu(immTempRegister, dataTempRegister, cmpTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 4);
+ if (imm.m_value >> 31)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -1);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.sw(dataTempRegister, addrTempRegister, 4);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, dest, src);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.andInsn(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, dest, imm.m_value);
+ else {
+ /*
+ li immTemp, imm
+ and dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, dest, immTempRegister);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
+ m_assembler.andi(dest, src, imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.andInsn(dest, src, immTempRegister);
+ }
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, dest, shiftAmount);
+ }
+
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.sllv(dest, src, shiftAmount);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, dest, immTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ m_assembler.sllv(dest, src, immTempRegister);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul(dest, dest, src);
+ }
+
+ void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.mul(dest, op1, op2);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (imm.m_value == 1 && !m_fixedWidth)
+ move(src, dest);
+ else {
+ /*
+ li dataTemp, imm
+ mul dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.mul(dest, src, dataTempRegister);
+ }
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.subu(srcDest, MIPSRegisters::zero, srcDest);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, dest, src);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orInsn(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535
+ && !m_fixedWidth) {
+ m_assembler.ori(dest, dest, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, dest, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, dest, dataTempRegister);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ return;
+
+ if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) {
+ m_assembler.ori(dest, src, imm.m_value);
+ return;
+ }
+
+ /*
+ li dataTemp, imm
+ or dest, src, dataTemp
+ */
+ move(imm, dataTempRegister);
+ m_assembler.orInsn(dest, src, dataTempRegister);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.orInsn(dataTempRegister, dataTempRegister, src);
+ store32(dataTempRegister, dest.m_ptr);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, dest, shiftAmount);
+ }
+
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srav(dest, src, shiftAmount);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, dest, imm.m_value);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sra(dest, src, imm.m_value);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, dest, shiftAmount);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.srlv(dest, src, shiftAmount);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, dest, imm.m_value);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.srl(dest, src, imm.m_value);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subu(dest, dest, src);
+ }
+
+ void sub32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.subu(dest, op1, op2);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, dest, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, dest, immTempRegister);
+ }
+ }
+
+ void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth) {
+ /*
+ addiu dest, src, imm
+ */
+ m_assembler.addiu(dest, src, -imm.m_value);
+ } else {
+ /*
+ li immTemp, imm
+ subu dest, src, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.subu(dest, src, immTempRegister);
+ }
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ lw dataTemp, offset(base)
+ li immTemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, offset(base)
+ */
+ m_assembler.lw(dataTempRegister, address.base, address.offset);
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dataTemp, (offset & 0xffff)(addrTemp)
+ li immtemp, imm
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768
+ && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
+ }
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ sub32(dataTempRegister, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ /*
+ li addrTemp, address
+ li immTemp, imm
+ lw dataTemp, 0(addrTemp)
+ subu dataTemp, dataTemp, immTemp
+ sw dataTemp, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address.m_ptr), addrTempRegister);
+ m_assembler.lw(dataTempRegister, addrTempRegister, 0);
+
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
+ }
+ m_assembler.sw(dataTempRegister, addrTempRegister, 0);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, dest, src);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.xorInsn(dest, op1, op2);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, dest, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, dest, immTempRegister);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.nor(dest, src, MIPSRegisters::zero);
+ return;
+ }
+
+ /*
+ li immTemp, imm
+ xor dest, dest, immTemp
+ */
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(dest, src, immTempRegister);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtd(dst, src);
+ }
+
+ void absDouble(FPRegisterID, FPRegisterID)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ return result;
+ }
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ /* Need to use zero-extened load byte for load8. */
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lbu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lbu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lbu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lbu dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lbu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lb dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lb dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lw(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lw dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lw dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lw dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lw(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32764
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ (Big-Endian)
+ lwl dest, address.offset(addrTemp)
+ lwr dest, address.offset+3(addrTemp)
+ (Little-Endian)
+ lwl dest, address.offset+3(addrTemp)
+ lwr dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, address.offset);
+ m_assembler.lwr(dest, addrTempRegister, address.offset + 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, address.offset + 3);
+ m_assembler.lwr(dest, addrTempRegister, address.offset);
+
+#endif
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, address.offset >> 16
+ ori immTemp, immTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, immTemp
+ (Big-Endian)
+ lw dest, 0(at)
+ lw dest, 3(at)
+ (Little-Endian)
+ lw dest, 3(at)
+ lw dest, 0(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, address.offset >> 16);
+ m_assembler.ori(immTempRegister, immTempRegister, address.offset);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+#if CPU(BIG_ENDIAN)
+ m_assembler.lwl(dest, addrTempRegister, 0);
+ m_assembler.lwr(dest, addrTempRegister, 3);
+#else
+ m_assembler.lwl(dest, addrTempRegister, 3);
+ m_assembler.lwr(dest, addrTempRegister, 0);
+#endif
+ }
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ /*
+ li addrTemp, address
+ lw dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ lw dest, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lw(dest, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ load32WithAddressOffsetPatch(address, dest);
+ return dataLabel;
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.lhu(dest, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ lhu dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ /* Need to use zero-extened load half-word for load16. */
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lhu dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lhu dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lhu(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lh dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lh dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ m_fixedWidth = true;
+ /*
+ lui addrTemp, address.offset >> 16
+ ori addrTemp, addrTemp, address.offset & 0xffff
+ addu addrTemp, addrTemp, address.base
+ sw src, 0(addrTemp)
+ */
+ DataLabel32 dataLabel(this);
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, 0);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sb src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sb src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sb src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sh src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sh src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sw(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sw(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, address.base, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, address.base, address.offset);
+ }
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sw immTemp, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sw src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ if (!imm.m_value)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sw src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ if (!imm.m_value && !m_fixedWidth)
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
+ else {
+ move(imm, immTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
+ }
+ }
+ }
+
+
+ void store32(RegisterID src, const void* address)
+ {
+ /*
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(src, addrTempRegister, 0);
+ }
+
+ void store32(TrustedImm32 imm, const void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sw src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sw(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointTruncate()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ static bool supportsFloatingPointSqrt()
+ {
+#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.lw(dest, MIPSRegisters::sp, 0);
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4);
+ m_assembler.sw(src, MIPSRegisters::sp, 0);
+ }
+
+ void push(Address address)
+ {
+ load32(address, dataTempRegister);
+ push(dataTempRegister);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ move(imm, immTempRegister);
+ push(immTempRegister);
+ }
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value && !m_fixedWidth)
+ move(MIPSRegisters::zero, dest);
+ else if (m_fixedWidth) {
+ m_assembler.lui(dest, imm.m_value >> 16);
+ m_assembler.ori(dest, dest, imm.m_value);
+ } else
+ m_assembler.li(dest, imm.m_value);
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.move(dest, src);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ move(TrustedImm32(imm), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ move(reg1, immTempRegister);
+ move(reg2, reg1);
+ move(immTempRegister, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ move(src, dest);
+ }
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ // Make sure the immediate value is unsigned 8 bits.
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ move(right, immTempRegister);
+ compare32(cond, dataTempRegister, immTempRegister, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ load8(left, dataTempRegister);
+ // Be careful that the previous load8() uses immTempRegister.
+ // So, we need to put move() after load8().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ if (cond == Equal)
+ return branchEqual(left, right);
+ if (cond == NotEqual)
+ return branchNotEqual(left, right);
+ if (cond == Above) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == AboveOrEqual) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Below) {
+ m_assembler.sltu(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == BelowOrEqual) {
+ m_assembler.sltu(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThan) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThan) {
+ m_assembler.slt(cmpTempRegister, left, right);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == LessThanOrEqual) {
+ m_assembler.slt(cmpTempRegister, right, left);
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ move(right, immTempRegister);
+ return branch32(cond, left, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ load32(right, dataTempRegister);
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32(left, dataTempRegister);
+ // Be careful that the previous load32() uses immTempRegister.
+ // So, we need to put move() after load32().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ load32WithUnalignedHalfWords(left, dataTempRegister);
+ // Be careful that the previous load32WithUnalignedHalfWords()
+ // uses immTempRegister.
+ // So, we need to put move() after load32WithUnalignedHalfWords().
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ load32(left.m_ptr, dataTempRegister);
+ move(right, immTempRegister);
+ return branch32(cond, dataTempRegister, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ m_assembler.andInsn(cmpTempRegister, reg, mask);
+ if (cond == Zero)
+ return branchEqual(cmpTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ return branchEqual(reg, MIPSRegisters::zero);
+ return branchNotEqual(reg, MIPSRegisters::zero);
+ }
+ move(mask, immTempRegister);
+ return branchTest32(cond, reg, immTempRegister);
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load32(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load8(Address(dataTempRegister), dataTempRegister);
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
+ Jump jump()
+ {
+ return branchEqual(MIPSRegisters::zero, MIPSRegisters::zero);
+ }
+
+ void jump(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ }
+
+ void jump(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void jump(AbsoluteAddress address)
+ {
+ m_fixedWidth = true;
+ load32(address.m_ptr, MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ add32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ add32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ add32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ move(src, dest);
+ return branchAdd32(cond, immTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, dest
+ xori cmpTemp, dataTemp, imm
+ bltz cmpTemp, No_overflow # diff sign bit -> no overflow
+ addiu dataTemp, dataTemp, imm
+ move dest, dataTemp
+ xori cmpTemp, dataTemp, imm
+ bgez cmpTemp, No_overflow # same sign big -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth) {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, imm.m_value);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ } else {
+ load32(dest.m_ptr, dataTempRegister);
+ move(imm, immTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bltz(cmpTempRegister, 10);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, immTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ m_assembler.xori(cmpTempRegister, dataTempRegister, immTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ }
+ return jump();
+ }
+ move(imm, immTempRegister);
+ load32(dest.m_ptr, dataTempRegister);
+ add32(immTempRegister, dataTempRegister);
+ store32(dataTempRegister, dest.m_ptr);
+ if (cond == Signed) {
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero)
+ return branchEqual(dataTempRegister, MIPSRegisters::zero);
+ if (cond == NonZero)
+ return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src1, src2);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src1, src2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src1, src2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src1, src2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ mult src, dest
+ mfhi dataTemp
+ mflo dest
+ sra addrTemp, dest, 31
+ beq dataTemp, addrTemp, No_overflow # all sign bits (bit 63 to bit 31) are the same -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ m_assembler.mult(src, dest);
+ m_assembler.mfhi(dataTempRegister);
+ m_assembler.mflo(dest);
+ m_assembler.sra(addrTempRegister, dest, 31);
+ m_assembler.beq(dataTempRegister, addrTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ mul32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ mul32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ mul32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchMul32(cond, immTempRegister, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dest, dataTemp
+ xor cmpTemp, dataTemp, src
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, src
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(dest, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, src);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, src);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, immTempRegister);
+ return branchSub32(cond, src, immTempRegister, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Overflow) {
+ /*
+ move dataTemp, op1
+ xor cmpTemp, dataTemp, op2
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ subu dest, dataTemp, op2
+ xor cmpTemp, dest, dataTemp
+ bgez cmpTemp, No_overflow # same sign bit -> no overflow
+ nop
+ b Overflow
+ nop
+ nop
+ nop
+ nop
+ nop
+ No_overflow:
+ */
+ move(op1, dataTempRegister);
+ m_assembler.xorInsn(cmpTempRegister, dataTempRegister, op2);
+ m_assembler.bgez(cmpTempRegister, 10);
+ m_assembler.subu(dest, dataTempRegister, op2);
+ m_assembler.xorInsn(cmpTempRegister, dest, dataTempRegister);
+ m_assembler.bgez(cmpTempRegister, 7);
+ m_assembler.nop();
+ return jump();
+ }
+ if (cond == Signed) {
+ sub32(op1, op2, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ sub32(op1, op2, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ sub32(op1, op2, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ m_assembler.li(dataTempRegister, -1);
+ return branchMul32(cond, dataTempRegister, srcDest);
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+ if (cond == Signed) {
+ or32(src, dest);
+ // Check if dest is negative.
+ m_assembler.slt(cmpTempRegister, dest, MIPSRegisters::zero);
+ return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
+ }
+ if (cond == Zero) {
+ or32(src, dest);
+ return branchEqual(dest, MIPSRegisters::zero);
+ }
+ if (cond == NonZero) {
+ or32(src, dest);
+ return branchNotEqual(dest, MIPSRegisters::zero);
+ }
+ ASSERT(0);
+ return Jump();
+ }
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ }
+
+ Call nearCall()
+ {
+ /* We need two words for relaxation. */
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.jal();
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::LinkableNear);
+ }
+
+ Call call()
+ {
+ m_assembler.lui(MIPSRegisters::t9, 0);
+ m_assembler.ori(MIPSRegisters::t9, MIPSRegisters::t9, 0);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call call(RegisterID target)
+ {
+ move(target, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ Call call(Address address)
+ {
+ m_fixedWidth = true;
+ load32(address, MIPSRegisters::t9);
+ m_assembler.jalr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::None);
+ }
+
+ void ret()
+ {
+ m_assembler.jr(MIPSRegisters::ra);
+ m_assembler.nop();
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ if (cond == Equal) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltiu(dest, dest, 1);
+ } else if (cond == NotEqual) {
+ m_assembler.xorInsn(dest, left, right);
+ m_assembler.sltu(dest, MIPSRegisters::zero, dest);
+ } else if (cond == Above)
+ m_assembler.sltu(dest, right, left);
+ else if (cond == AboveOrEqual) {
+ m_assembler.sltu(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == Below)
+ m_assembler.sltu(dest, left, right);
+ else if (cond == BelowOrEqual) {
+ m_assembler.sltu(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == GreaterThan)
+ m_assembler.slt(dest, right, left);
+ else if (cond == GreaterThanOrEqual) {
+ m_assembler.slt(dest, left, right);
+ m_assembler.xori(dest, dest, 1);
+ } else if (cond == LessThan)
+ m_assembler.slt(dest, left, right);
+ else if (cond == LessThanOrEqual) {
+ m_assembler.slt(dest, right, left);
+ m_assembler.xori(dest, dest, 1);
+ }
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ move(right, immTempRegister);
+ compare32(cond, left, immTempRegister, dest);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load8(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+ load32(address, dataTempRegister);
+ if (mask.m_value == -1 && !m_fixedWidth) {
+ if (cond == Zero)
+ m_assembler.sltiu(dest, dataTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
+ } else {
+ move(mask, immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
+ if (cond == Zero)
+ m_assembler.sltiu(dest, cmpTempRegister, 1);
+ else
+ m_assembler.sltu(dest, MIPSRegisters::zero, cmpTempRegister);
+ }
+ }
+
+ DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabel32 label(this);
+ move(imm, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr label(this);
+ move(initialValue, dest);
+ m_fixedWidth = false;
+ return label;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, left, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_fixedWidth = true;
+ load32(left, dataTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, immTempRegister);
+ Jump temp = branch32(cond, dataTempRegister, immTempRegister);
+ m_fixedWidth = false;
+ return temp;
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ m_fixedWidth = true;
+ DataLabelPtr dataLabel = moveWithPatch(initialValue, dataTempRegister);
+ store32(dataTempRegister, address);
+ m_fixedWidth = false;
+ return dataLabel;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), address);
+ }
+
+ Call tailRecursiveCall()
+ {
+ // Like a normal call, but don't update the returned address register
+ m_fixedWidth = true;
+ move(TrustedImm32(0), MIPSRegisters::t9);
+ m_assembler.jr(MIPSRegisters::t9);
+ m_assembler.nop();
+ m_fixedWidth = false;
+ return Call(m_assembler.label(), Call::Linkable);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ }
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ m_assembler.ldc1(dest, address.base, address.offset);
+ } else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ ldc1 dest, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ lwc1 dest+1, (address.offset+4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ lwc1 dest+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ ldc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ ldc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address
+ lwc1 dest, 0(addrTemp)
+ lwc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, 0);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4);
+#else
+ /*
+ li addrTemp, address
+ ldc1 dest, 0(addrTemp)
+ */
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, 0);
+#endif
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+#if WTF_MIPS_ISA(1)
+ /*
+ li addrTemp, address.offset
+ addu addrTemp, addrTemp, base
+ swc1 dest, 0(addrTemp)
+ swc1 dest+1, 4(addrTemp)
+ */
+ move(TrustedImm32(address.offset), addrTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth)
+ m_assembler.sdc1(src, address.base, address.offset);
+ else {
+ /*
+ lui addrTemp, (offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, base
+ sdc1 src, (offset & 0xffff)(addrTemp)
+ */
+ m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ swc1 src+1, (address.offset + 4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ swc1 src+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sdc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sdc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+#if WTF_MIPS_ISA(1)
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, 0);
+#endif
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest || m_fixedWidth)
+ m_assembler.movd(dest, src);
+ }
+
+ void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
+ {
+ moveDouble(fr1, fpTempRegister);
+ moveDouble(fr2, fr1);
+ moveDouble(fpTempRegister, fr2);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, dest, src);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, op1, op2);
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, dest, src);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.subd(dest, op1, op2);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.subd(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, dest, src);
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.muld(dest, dest, fpTempRegister);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.muld(dest, op1, op2);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.divd(dest, op1, op2);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ loadDouble(src, fpTempRegister);
+ m_assembler.divd(dest, dest, fpTempRegister);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.negd(dest, src);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.mtc1(src, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ m_assembler.mtc1(dataTempRegister, fpTempRegister);
+ m_assembler.cvtdw(dest, fpTempRegister);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtds(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtsd(dst, src);
+ }
+
+ void insertRelaxationWords()
+ {
+ /* We need four words for relaxation. */
+ m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 3); // Jump over nops;
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.nop();
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1t();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.appendJump();
+ m_assembler.bc1f();
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.beq(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchNotEqual(RegisterID rs, RegisterID rt)
+ {
+ m_assembler.nop();
+ m_assembler.nop();
+ m_assembler.appendJump();
+ m_assembler.bne(rs, rt, 0);
+ m_assembler.nop();
+ insertRelaxationWords();
+ return Jump(m_assembler.label());
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.ceqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqual) {
+ m_assembler.cueqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThan) {
+ m_assembler.cngtd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.cnged(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThan) {
+ m_assembler.cltd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.cled(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ m_assembler.cueqd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleNotEqualOrUnordered) {
+ m_assembler.ceqd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrUnordered) {
+ m_assembler.coled(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ m_assembler.coltd(left, right);
+ return branchFalse(); // false
+ }
+ if (cond == DoubleLessThanOrUnordered) {
+ m_assembler.cultd(left, right);
+ return branchTrue();
+ }
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ m_assembler.culed(left, right);
+ return branchTrue();
+ }
+ ASSERT(0);
+
+ return Jump();
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MAX 0x7fffffff).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0));
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Result is undefined if src > 2^31
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.truncwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+ }
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ m_assembler.cvtwd(fpTempRegister, src);
+ m_assembler.mfc1(dest, fpTempRegister);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branch32(Equal, dest, MIPSRegisters::zero));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fpTemp, src));
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.vmov(scratch, MIPSRegisters::zero, MIPSRegisters::zero);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ RelationalCondition r;
+ if (cond == Equal)
+ r = NotEqual;
+ else if (cond == NotEqual)
+ r = Equal;
+ else if (cond == Above)
+ r = BelowOrEqual;
+ else if (cond == AboveOrEqual)
+ r = Below;
+ else if (cond == Below)
+ r = AboveOrEqual;
+ else if (cond == BelowOrEqual)
+ r = Above;
+ else if (cond == GreaterThan)
+ r = LessThanOrEqual;
+ else if (cond == GreaterThanOrEqual)
+ r = LessThan;
+ else if (cond == LessThan)
+ r = GreaterThanOrEqual;
+ else if (cond == LessThanOrEqual)
+ r = GreaterThan;
+ return r;
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MIPSAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ MIPSAssembler::maxJumpReplacementSize();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ MIPSAssembler::revertJumpToMove(instructionStart.dataLocation(), immTempRegister, reinterpret_cast<int>(initialValue) & 0xffff);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+
+private:
+ // If m_fixedWidth is true, we will generate a fixed number of instructions.
+ // Otherwise, we can emit any number of instructions.
+ bool m_fixedWidth;
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ MIPSAssembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+};
+
+}
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MacroAssemblerMIPS_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp b/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp
new file mode 100644
index 0000000000..59de3ff48c
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "MacroAssemblerSH4.h"
+
+namespace JSC {
+
+void MacroAssemblerSH4::linkCall(void* code, Call call, FunctionPtr function)
+{
+ SH4Assembler::linkCall(code, call.m_label, function.value());
+}
+
+void MacroAssemblerSH4::repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+{
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+}
+
+void MacroAssemblerSH4::repatchCall(CodeLocationCall call, FunctionPtr destination)
+{
+ SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerSH4.h b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
new file mode 100644
index 0000000000..56fb74d45b
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
@@ -0,0 +1,2293 @@
+/*
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef MacroAssemblerSH4_h
+#define MacroAssemblerSH4_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "SH4Assembler.h"
+#include "AbstractMacroAssembler.h"
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> {
+public:
+ typedef SH4Assembler::FPRegisterID FPRegisterID;
+
+ static const Scale ScalePtr = TimesFour;
+ static const FPRegisterID fscratch = SH4Registers::fr10;
+ static const RegisterID stackPointerRegister = SH4Registers::sp;
+ static const RegisterID linkRegister = SH4Registers::pr;
+ static const RegisterID scratchReg3 = SH4Registers::r13;
+
+ static const int MaximumCompactPtrAlignedAddressOffset = 60;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset);
+ }
+
+ enum RelationalCondition {
+ Equal = SH4Assembler::EQ,
+ NotEqual = SH4Assembler::NE,
+ Above = SH4Assembler::HI,
+ AboveOrEqual = SH4Assembler::HS,
+ Below = SH4Assembler::LI,
+ BelowOrEqual = SH4Assembler::LS,
+ GreaterThan = SH4Assembler::GT,
+ GreaterThanOrEqual = SH4Assembler::GE,
+ LessThan = SH4Assembler::LT,
+ LessThanOrEqual = SH4Assembler::LE
+ };
+
+ enum ResultCondition {
+ Overflow = SH4Assembler::OF,
+ Signed = SH4Assembler::SI,
+ Zero = SH4Assembler::EQ,
+ NonZero = SH4Assembler::NE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = SH4Assembler::EQ,
+ DoubleNotEqual = SH4Assembler::NE,
+ DoubleGreaterThan = SH4Assembler::GT,
+ DoubleGreaterThanOrEqual = SH4Assembler::GE,
+ DoubleLessThan = SH4Assembler::LT,
+ DoubleLessThanOrEqual = SH4Assembler::LE,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = SH4Assembler::EQU,
+ DoubleNotEqualOrUnordered = SH4Assembler::NEU,
+ DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
+ DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
+ DoubleLessThanOrUnordered = SH4Assembler::LTU,
+ DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
+ };
+
+ RegisterID claimScratch()
+ {
+ return m_assembler.claimScratch();
+ }
+
+ void releaseScratch(RegisterID reg)
+ {
+ m_assembler.releaseScratch(reg);
+ }
+
+ // Integer arithmetic operations
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addlRegReg(src, dest);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (m_assembler.isImmediate(imm.m_value)) {
+ m_assembler.addlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movlRegReg(src, dest);
+ add32(imm, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ add32(imm, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src.m_ptr, scr);
+ m_assembler.addlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andlRegReg(src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.andlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.andlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ and32(src, dest);
+ return;
+ }
+
+ and32(imm, dest);
+ }
+
+ void lshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.shllRegReg(dest, shiftamount);
+ }
+
+ void rshift32(int imm, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(-imm, scr);
+ m_assembler.shaRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (!imm.m_value)
+ return;
+
+ if ((imm.m_value == 1) || (imm.m_value == 2) || (imm.m_value == 8) || (imm.m_value == 16)) {
+ m_assembler.shllImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value & 0x1f) , scr);
+ m_assembler.shllRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+
+ lshift32(shiftamount, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(imm, scr);
+ if (src != dest)
+ move(src, dest);
+ mul32(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orlRegReg(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
+ m_assembler.orlImm8r(imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.orlRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+
+void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ or32(src, dest);
+ return;
+ }
+
+ or32(imm, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src != dest) {
+ move(imm, dest);
+ xor32(src, dest);
+ return;
+ }
+
+ xor32(imm, dest);
+ }
+
+ void rshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.neg(shiftamount, shiftamount);
+ m_assembler.shaRegReg(dest, shiftamount);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value & 0x1f)
+ rshift32(imm.m_value & 0x1f, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sublRegReg(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
+ {
+ RegisterID result = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(-imm.m_value))
+ m_assembler.addlImm8r(-imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.sublRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(-imm.m_value))
+ m_assembler.addlImm8r(-imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.sublRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address, RegisterID scratchReg)
+ {
+ RegisterID result = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(imm.m_value))
+ m_assembler.addlImm8r(imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.addlRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID result = claimScratch();
+ RegisterID scratchReg = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
+ m_assembler.movlMemReg(scratchReg, result);
+
+ if (m_assembler.isImmediate(imm.m_value))
+ m_assembler.addlImm8r(imm.m_value, result);
+ else {
+ m_assembler.loadConstant(imm.m_value, scratchReg3);
+ m_assembler.addlRegReg(scratchReg3, result);
+ }
+
+ store32(result, scratchReg);
+ releaseScratch(result);
+ releaseScratch(scratchReg);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ RegisterID scr1 = claimScratch();
+ RegisterID scr2 = claimScratch();
+
+ // Add 32-bit LSB first.
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
+ m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit LSB of int64 @ address
+ m_assembler.loadConstant(imm.m_value, scr2);
+ m_assembler.clrt();
+ m_assembler.addclRegReg(scr1, scr2);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
+ m_assembler.movlRegMem(scr2, scr1); // Update address with 32-bit LSB result.
+
+ // Then add 32-bit MSB.
+ m_assembler.addlImm8r(4, scr1);
+ m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit MSB of int64 @ address
+ m_assembler.movt(scr2);
+ if (imm.m_value < 0)
+ m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
+ m_assembler.addvlRegReg(scr2, scr1);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr) + 4, scr2);
+ m_assembler.movlRegMem(scr1, scr2); // Update (address + 4) with 32-bit MSB result.
+
+ releaseScratch(scr2);
+ releaseScratch(scr1);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (m_assembler.isImmediate(-imm.m_value)) {
+ m_assembler.addlImm8r(-imm.m_value, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ m_assembler.sublRegReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorlRegReg(src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID srcDest)
+ {
+ if (imm.m_value == -1) {
+ m_assembler.notlReg(srcDest, srcDest);
+ return;
+ }
+
+ if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.xorlRegReg(scr, srcDest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.xorlImm8r(imm.m_value, srcDest);
+ }
+
+ void compare32(int imm, RegisterID dst, RelationalCondition cond)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
+ m_assembler.cmpEqImmR0(imm, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
+ {
+ RegisterID scr = claimScratch();
+ if (!offset) {
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void testImm(int imm, int offset, RegisterID base)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+
+ if ((offset < 0) || (offset >= 64)) {
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ } else if (offset)
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ else
+ m_assembler.movlMemReg(base, scr);
+ if (m_assembler.isImmediate(imm))
+ m_assembler.movImm8(imm, scr1);
+ else
+ m_assembler.loadConstant(imm, scr1);
+
+ m_assembler.testlRegReg(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void testlImm(int imm, RegisterID dst)
+ {
+ if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
+ m_assembler.testlImm8r(imm, dst);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm, scr);
+ m_assembler.testlRegReg(scr, dst);
+ releaseScratch(scr);
+ }
+
+ void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
+ {
+ if (!offset) {
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ }
+
+ void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
+ {
+ if (!offset) {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.movlMemReg(base, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return;
+ }
+
+ if ((offset < 0) || (offset >= 64)) {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant(offset, scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.movlMemReg(offset >> 2, base, scr);
+ m_assembler.loadConstant(imm, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ }
+
+ // Memory access operation
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address.base, address.offset, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ load8(address.base, address.offset, dest);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load8Signed(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)), dest);
+ m_assembler.movlMemReg(dest, dest);
+ }
+
+ void load32(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movlMemReg(base, dest);
+ return;
+ }
+
+ if ((offset >= 0) && (offset < 64)) {
+ m_assembler.movlMemReg(offset >> 2, base, dest);
+ return;
+ }
+
+ if ((dest == SH4Registers::r0) && (dest != base)) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.movlR0mr(base, dest);
+ return;
+ }
+
+ RegisterID scr;
+ if (dest == base)
+ scr = claimScratch();
+ else
+ scr = dest;
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlMemReg(scr, dest);
+
+ if (dest == base)
+ releaseScratch(scr);
+ }
+
+ void load8Signed(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movbMemReg(base, dest);
+ return;
+ }
+
+ if ((offset > 0) && (offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movbMemReg(offset, base, dest);
+ return;
+ }
+
+ if (base != dest) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.addlRegReg(base, dest);
+ m_assembler.movbMemReg(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movbMemReg(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void load8(RegisterID base, int offset, RegisterID dest)
+ {
+ if (!offset) {
+ m_assembler.movbMemReg(base, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ if ((offset > 0) && (offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movbMemReg(offset, base, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ if (base != dest) {
+ m_assembler.loadConstant((offset), dest);
+ m_assembler.addlRegReg(base, dest);
+ m_assembler.movbMemReg(dest, dest);
+ m_assembler.extub(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((offset), scr);
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movbMemReg(scr, dest);
+ m_assembler.extub(dest, dest);
+ releaseScratch(scr);
+ }
+
+ void load32(RegisterID r0, RegisterID src, RegisterID dst)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movlR0mr(src, dst);
+ }
+
+ void load32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlMemReg(src, dst);
+ }
+
+ void load16(ImplicitAddress address, RegisterID dest)
+ {
+ if (!address.offset) {
+ m_assembler.movwMemReg(address.base, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ if ((address.offset > 0) && (address.offset < 64) && (dest == SH4Registers::r0)) {
+ m_assembler.movwMemReg(address.offset, address.base, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ if (address.base != dest) {
+ m_assembler.loadConstant((address.offset), dest);
+ m_assembler.addlRegReg(address.base, dest);
+ m_assembler.movwMemReg(dest, dest);
+ extuw(dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant((address.offset), scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movwMemReg(scr, dest);
+ extuw(dest, dest);
+ releaseScratch(scr);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ add32(address.base, scr);
+ load8(scr, scr1);
+ add32(TrustedImm32(1), scr);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(scr1, dest);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void load16(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ extuw(dest, dest);
+ }
+
+ void load16Signed(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movwMemReg(src, dest);
+ }
+
+ void load16(RegisterID r0, RegisterID src, RegisterID dest)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movwR0mr(src, dest);
+ extuw(dest, dest);
+ }
+
+ void load16Signed(RegisterID r0, RegisterID src, RegisterID dest)
+ {
+ ASSERT(r0 == SH4Registers::r0);
+ m_assembler.movwR0mr(src, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+ if (address.base == SH4Registers::r0)
+ load16(address.base, scr, dest);
+ else {
+ add32(address.base, scr);
+ load16(scr, dest);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+ if (address.base == SH4Registers::r0)
+ load16Signed(address.base, scr, dest);
+ else {
+ add32(address.base, scr);
+ load16Signed(scr, dest);
+ }
+
+ releaseScratch(scr);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ m_assembler.movbRegMem(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ m_assembler.movwRegMem(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ store32(src, address.offset, address.base, scr);
+ releaseScratch(scr);
+ }
+
+ void store32(RegisterID src, int offset, RegisterID base, RegisterID scr)
+ {
+ if (!offset) {
+ m_assembler.movlRegMem(src, base);
+ return;
+ }
+
+ if ((offset >=0) && (offset < 64)) {
+ m_assembler.movlRegMem(src, offset >> 2, base);
+ return;
+ }
+
+ m_assembler.loadConstant((offset), scr);
+ if (scr == SH4Registers::r0) {
+ m_assembler.movlRegMemr0(src, base);
+ return;
+ }
+
+ m_assembler.addlRegReg(base, scr);
+ m_assembler.movlRegMem(src, scr);
+ }
+
+ void store32(RegisterID src, RegisterID offset, RegisterID base)
+ {
+ ASSERT(offset == SH4Registers::r0);
+ m_assembler.movlRegMemr0(src, base);
+ }
+
+ void store32(RegisterID src, RegisterID dst)
+ {
+ m_assembler.movlRegMem(src, dst);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ store32(scr, address.offset, address.base, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ store32(src, Address(scr, address.offset));
+
+ releaseScratch(scr);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant((imm.m_value), scr);
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr1);
+ m_assembler.movlRegMem(scr, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabel32 label(this);
+ m_assembler.loadConstantUnReusable(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlRegMem(src, scr);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ DataLabelCompact dataLabel(this);
+ ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
+ ASSERT(address.offset >= 0);
+ m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
+ return dataLabel;
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+
+ RegisterID scr = claimScratch();
+ m_assembler.movImm8(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, dest);
+ releaseScratch(scr);
+
+ return result;
+ }
+
+ // Floating-point operations
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return false; }
+
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.fldsfpul((FPRegisterID)(src + 1));
+ m_assembler.stsfpulReg(dest1);
+ m_assembler.fldsfpul(src);
+ m_assembler.stsfpulReg(dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.ldsrmfpul(src1);
+ m_assembler.fstsfpul((FPRegisterID)(dest + 1));
+ m_assembler.ldsrmfpul(src2);
+ m_assembler.fstsfpul(dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(address.offset, scr);
+ if (address.base == SH4Registers::r0) {
+ m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsReadr0r(scr, dest);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
+ m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
+ m_assembler.fmovsReadrm(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsWriterm(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.fmovsWriterm((FPRegisterID)(src + 1), scr);
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsWriterm(src, scr);
+ releaseScratch(scr);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.fmovsWriterm((FPRegisterID)(src + 1), scr);
+ m_assembler.addlImm8r(4, scr);
+ m_assembler.fmovsWriterm(src, scr);
+
+ releaseScratch(scr);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ if (op1 == dest)
+ m_assembler.daddRegReg(op2, dest);
+ else {
+ m_assembler.dmovRegReg(op1, dest);
+ m_assembler.daddRegReg(op2, dest);
+ }
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.daddRegReg(src, dest);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void addDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ addDouble(fscratch, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dsubRegReg(src, dest);
+ }
+
+ void subDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ subDouble(fscratch, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.dmulRegReg(src, dest);
+ }
+
+ void mulDouble(Address address, FPRegisterID dest)
+ {
+ loadDouble(address, fscratch);
+ mulDouble(fscratch, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ddivRegReg(src, dest);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.fldsfpul(src);
+ m_assembler.dcnvsd(dst);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.dcnvds(src);
+ m_assembler.fstsfpul(dst);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ldsrmfpul(src);
+ m_assembler.floatfpulDreg(dest);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(src.m_ptr), scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ load32(src, scr);
+ convertInt32ToDouble(scr, dest);
+ releaseScratch(scr);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ Jump m_jump;
+ JumpList end;
+
+ if (dest != SH4Registers::r0)
+ move(SH4Registers::r0, scr1);
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+
+ if (address.offset)
+ add32(TrustedImm32(address.offset), scr);
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 68, sizeof(uint32_t));
+ move(scr, SH4Registers::r0);
+ m_assembler.andlImm8r(0x3, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ load32(scr, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ m_assembler.andlImm8r(0x1, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ load16(scr, scr1);
+ add32(TrustedImm32(2), scr);
+ load16(scr, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ load8(scr, scr1);
+ add32(TrustedImm32(1), scr);
+ load16(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(dest, scr1);
+ add32(TrustedImm32(2), scr);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.link(this);
+
+ releaseScratch(scr);
+ releaseScratch(scr1);
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = scratchReg3;
+ load32WithUnalignedHalfWords(left, scr);
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ compare32(right.m_value, scr, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ m_assembler.movImm8(0, scratchReg3);
+ convertInt32ToDouble(scratchReg3, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ if (cond == DoubleEqual) {
+ m_assembler.dcmppeq(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleNotEqual) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, left);
+ releaseScratch(scr);
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThan) {
+ m_assembler.dcmppgt(right, left);
+ return branchTrue();
+ }
+
+ if (cond == DoubleGreaterThanOrEqual) {
+ m_assembler.dcmppgt(left, right);
+ return branchFalse();
+ }
+
+ if (cond == DoubleLessThan) {
+ m_assembler.dcmppgt(left, right);
+ return branchTrue();
+ }
+
+ if (cond == DoubleLessThanOrEqual) {
+ m_assembler.dcmppgt(right, left);
+ return branchFalse();
+ }
+
+ if (cond == DoubleEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThanOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleGreaterThanOrEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleLessThanOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ if (cond == DoubleLessThanOrEqualOrUnordered) {
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ ASSERT(cond == DoubleNotEqualOrUnordered);
+ RegisterID scr = claimScratch();
+ JumpList end;
+ m_assembler.loadConstant(0x7fbfffff, scratchReg3);
+ m_assembler.dcnvds(right);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcnvds(left);
+ m_assembler.stsfpulReg(scr);
+ m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
+ m_assembler.dcmppeq(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
+ releaseScratch(scr);
+ return m_jump;
+ }
+
+ Jump branchTrue()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ Jump m_jump = Jump(m_assembler.je());
+ m_assembler.extraInstrForBranch(scratchReg3);
+ return m_jump;
+ }
+
+ Jump branchFalse()
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
+ Jump m_jump = Jump(m_assembler.jne());
+ m_assembler.extraInstrForBranch(scratchReg3);
+ return m_jump;
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ RegisterID scr = claimScratch();
+ move(left.index, scr);
+ lshift32(TrustedImm32(left.scale), scr);
+ add32(left.base, scr);
+ load32(scr, left.offset, scr);
+ compare32(right.m_value, scr, cond);
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (dest != src)
+ m_assembler.dmovRegReg(src, dest);
+ m_assembler.dsqrt(dest);
+ }
+
+ void absDouble(FPRegisterID, FPRegisterID)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(address, addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID addressTempRegister = claimScratch();
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ Jump jmp = branchTest32(cond, addressTempRegister, mask);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ }
+
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ Jump jmp = branch32(cond, addressTempRegister, right);
+ releaseScratch(addressTempRegister);
+ return jmp;
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ RegisterID addressTempRegister = claimScratch();
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ releaseScratch(addressTempRegister);
+ }
+
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.ftrcdrmfpul(src);
+ m_assembler.stsfpulReg(dest);
+ m_assembler.loadConstant(0x7fffffff, scratchReg3);
+ m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 14, sizeof(uint32_t));
+ m_assembler.branch(BT_OPCODE, 2);
+ m_assembler.addlImm8r(1, scratchReg3);
+ m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
+ return branchTrue();
+ }
+
+ // Stack manipulation operations
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.popReg(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.pushReg(src);
+ }
+
+ void push(Address address)
+ {
+ if (!address.offset) {
+ push(address.base);
+ return;
+ }
+
+ if ((address.offset < 0) || (address.offset >= 64)) {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, SH4Registers::sp);
+ m_assembler.addlImm8r(-4, SH4Registers::sp);
+ releaseScratch(scr);
+ return;
+ }
+
+ m_assembler.movlMemReg(address.offset >> 2, address.base, SH4Registers::sp);
+ m_assembler.addlImm8r(-4, SH4Registers::sp);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(imm.m_value, scr);
+ push(scr);
+ releaseScratch(scr);
+ }
+
+ // Register move operations
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.m_value, dest);
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
+ DataLabelPtr dataLabel(this);
+ m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest);
+ return dataLabel;
+ }
+
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movlRegReg(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.loadConstant(imm.asIntptr(), dest);
+ }
+
+ void extuw(RegisterID src, RegisterID dst)
+ {
+ m_assembler.extuw(src, dst);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ if (cond != NotEqual) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (left != dest) {
+ move(right, dest);
+ compare32(cond, left, dest, dest);
+ return;
+ }
+
+ RegisterID scr = claimScratch();
+ move(right, scr);
+ compare32(cond, left, scr, dest);
+ releaseScratch(scr);
+ }
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ load8(address, dest);
+ if (mask.m_value == -1)
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+ else
+ testlImm(mask.m_value, dest);
+ if (cond != NonZero) {
+ m_assembler.movt(dest);
+ return;
+ }
+
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
+ m_assembler.movImm8(0, dest);
+ m_assembler.branch(BT_OPCODE, 0);
+ m_assembler.movImm8(1, dest);
+ }
+
+ void loadPtrLinkReg(ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ load32(address, scr);
+ m_assembler.ldspr(scr);
+ releaseScratch(scr);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmplRegReg(right, left, SH4Condition(cond));
+ /* BT label => BF off
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testlRegReg(left, left);
+ else
+ compare32(right.m_value, left, cond);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ compare32(right.offset, right.base, left, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ compare32(right, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ compare32(right.m_value, left.offset, left.base, cond);
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ RegisterID scr = claimScratch();
+
+ move(TrustedImm32(reinterpret_cast<uint32_t>(left.m_ptr)), scr);
+ m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
+ releaseScratch(scr);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ RegisterID addressTempRegister = claimScratch();
+
+ m_assembler.loadConstant(reinterpret_cast<uint32_t>(left.m_ptr), addressTempRegister);
+ m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
+ compare32(right.m_value, addressTempRegister, cond);
+ releaseScratch(addressTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+ RegisterID scr = claimScratch();
+
+ move(left.index, scr);
+ lshift32(TrustedImm32(left.scale), scr);
+
+ if (left.offset)
+ add32(TrustedImm32(left.offset), scr);
+ add32(left.base, scr);
+ load8(scr, scr);
+ RegisterID scr1 = claimScratch();
+ m_assembler.loadConstant(right.m_value, scr1);
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ return branch32(cond, scr, scr1);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ m_assembler.testlRegReg(reg, mask);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(reg, reg);
+ else
+ testlImm(mask.m_value, reg);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT((cond == Zero) || (cond == NonZero));
+
+ if (mask.m_value == -1)
+ compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond));
+ else
+ testImm(mask.m_value, address.offset, address.base);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ RegisterID scr = claimScratch();
+
+ move(address.index, scr);
+ lshift32(TrustedImm32(address.scale), scr);
+ add32(address.base, scr);
+ load32(scr, address.offset, scr);
+
+ if (mask.m_value == -1)
+ m_assembler.testlRegReg(scr, scr);
+ else
+ testlImm(mask.m_value, scr);
+
+ releaseScratch(scr);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmpReg(target);
+ }
+
+ void jump(Address address)
+ {
+ RegisterID scr = claimScratch();
+
+ if ((address.offset < 0) || (address.offset >= 64)) {
+ m_assembler.loadConstant(address.offset, scr);
+ m_assembler.addlRegReg(address.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ } else if (address.offset)
+ m_assembler.movlMemReg(address.offset >> 2, address.base, scr);
+ else
+ m_assembler.movlMemReg(address.base, scr);
+ m_assembler.jmpReg(scr);
+
+ releaseScratch(scr);
+ }
+
+ // Arithmetic control flow operations
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.addvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ if (cond == Signed) {
+ m_assembler.addlRegReg(src, dest);
+ // Check if dest is negative
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ m_assembler.addlRegReg(src, dest);
+ compare32(0, dest, Equal);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ return branchAdd32(cond, scratchReg3, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (src != dest)
+ move(src, dest);
+
+ if (cond == Overflow) {
+ move(imm, scratchReg3);
+ m_assembler.addvlRegReg(scratchReg3, dest);
+ return branchTrue();
+ }
+
+ add32(imm, dest);
+
+ if (cond == Signed) {
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, Equal);
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ RegisterID scr1 = claimScratch();
+ RegisterID scr = claimScratch();
+ m_assembler.dmullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ m_assembler.movImm8(-31, scr);
+ m_assembler.movlRegReg(dest, scr1);
+ m_assembler.shaRegReg(scr1, scr);
+ m_assembler.stsmach(scr);
+ m_assembler.cmplRegReg(scr, scr1, SH4Condition(Equal));
+ releaseScratch(scr1);
+ releaseScratch(scr);
+ return branchFalse();
+ }
+
+ m_assembler.imullRegReg(src, dest);
+ m_assembler.stsmacl(dest);
+ if (cond == Signed) {
+ // Check if dest is negative
+ m_assembler.cmppz(dest);
+ return branchFalse();
+ }
+
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ if (src != dest)
+ move(src, dest);
+
+ return branchMul32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Overflow) {
+ m_assembler.subvlRegReg(src, dest);
+ return branchTrue();
+ }
+
+ if (cond == Signed) {
+ // Check if dest is negative
+ m_assembler.sublRegReg(src, dest);
+ compare32(0, dest, LessThan);
+ return branchTrue();
+ }
+
+ sub32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ move(imm, scratchReg3);
+ return branchSub32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(imm, scratchReg3);
+ if (src != dest)
+ move(src, dest);
+ return branchSub32(cond, scratchReg3, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 != dest)
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+
+ if (cond == Signed) {
+ or32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(LessThan));
+ return branchTrue();
+ }
+
+ or32(src, dest);
+ compare32(0, dest, static_cast<RelationalCondition>(cond));
+
+ if (cond == NonZero) // NotEqual
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ m_assembler.ftrcdrmfpul(src);
+ m_assembler.stsfpulReg(dest);
+ convertInt32ToDouble(dest, fscratch);
+ failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
+
+ if (dest == SH4Registers::r0)
+ m_assembler.cmpEqImmR0(0, dest);
+ else {
+ m_assembler.movImm8(0, scratchReg3);
+ m_assembler.cmplRegReg(scratchReg3, dest, SH4Condition(Equal));
+ }
+ failureCases.append(branchTrue());
+ }
+
+ void neg32(RegisterID dst)
+ {
+ m_assembler.neg(dst, dst);
+ }
+
+ void urshift32(RegisterID shiftamount, RegisterID dest)
+ {
+ if (shiftamount == SH4Registers::r0)
+ m_assembler.andlImm8r(0x1f, shiftamount);
+ else {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(0x1f, scr);
+ m_assembler.andlRegReg(scr, shiftamount);
+ releaseScratch(scr);
+ }
+ m_assembler.neg(shiftamount, shiftamount);
+ m_assembler.shllRegReg(dest, shiftamount);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ RegisterID scr = claimScratch();
+ m_assembler.loadConstant(-(imm.m_value & 0x1f), scr);
+ m_assembler.shaRegReg(dest, scr);
+ releaseScratch(scr);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+
+ urshift32(shiftamount, dest);
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address, RegisterID target)
+ {
+ load32(address.base, address.offset, target);
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
+ m_assembler.branch(JSR_OPCODE, target);
+ m_assembler.nop();
+ }
+
+ void breakpoint()
+ {
+ m_assembler.bkpt();
+ m_assembler.nop();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID dataTempRegister = claimScratch();
+
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
+ releaseScratch(dataTempRegister);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstant(left.offset, scr);
+ m_assembler.addlRegReg(left.base, scr);
+ m_assembler.movlMemReg(scr, scr);
+ RegisterID scr1 = claimScratch();
+ dataLabel = moveWithPatch(initialRightValue, scr1);
+ m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
+ releaseScratch(scr);
+ releaseScratch(scr1);
+
+ if (cond == NotEqual)
+ return branchFalse();
+ return branchTrue();
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ m_assembler.nop();
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ RegisterID scr = claimScratch();
+ DataLabelPtr label = moveWithPatch(initialValue, scr);
+ store32(scr, address);
+ releaseScratch(scr);
+ return label;
+ }
+
+ DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
+
+ int sizeOfConstantPool()
+ {
+ return m_assembler.sizeOfConstantPool();
+ }
+
+ Call tailRecursiveCall()
+ {
+ RegisterID scr = claimScratch();
+
+ m_assembler.loadConstantUnReusable(0x0, scr, true);
+ Jump m_jump = Jump(m_assembler.jmp(scr));
+ releaseScratch(scr);
+
+ return Call::fromTailJump(m_jump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ return tailRecursiveCall();
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return label.labelAtOffset(0);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ SH4Assembler::revertJump(instructionStart.dataLocation(), reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+protected:
+ SH4Assembler::Condition SH4Condition(RelationalCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+
+ SH4Assembler::Condition SH4Condition(ResultCondition cond)
+ {
+ return static_cast<SH4Assembler::Condition>(cond);
+ }
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void*, Call, FunctionPtr);
+ static void repatchCall(CodeLocationCall, CodeLocationLabel);
+ static void repatchCall(CodeLocationCall, FunctionPtr);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerSH4_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86.h b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
new file mode 100644
index 0000000000..27a030edfd
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_h
+#define MacroAssemblerX86_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86)
+
+#include "MacroAssemblerX86Common.h"
+
+namespace JSC {
+
+class MacroAssemblerX86 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesFour;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::branchSub32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::branch32;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::storeDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+ using MacroAssemblerX86Common::branchTest8;
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ m_assembler.addl_mr(address.m_ptr, dest);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.addl_im(imm.m_value, address.m_ptr);
+ m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.andl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.orl_im(imm.m_value, address.m_ptr);
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ m_assembler.orl_rm(reg, address.m_ptr);
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ m_assembler.subl_im(imm.m_value, address.m_ptr);
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ m_assembler.addsd_mr(address.m_ptr, dest);
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address);
+ }
+
+ void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+ {
+ m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address);
+ }
+
+ void store32(RegisterID src, void* address)
+ {
+ m_assembler.movl_rm(src, address);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address);
+ }
+
+ // Possibly clobbers src.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ movePackedToInt32(src, dest1);
+ rshiftPacked(TrustedImm32(32), src);
+ movePackedToInt32(src, dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ moveInt32ToPacked(src1, dest);
+ moveInt32ToPacked(src2, scratch);
+ lshiftPacked(TrustedImm32(32), scratch);
+ orPacked(scratch, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.addl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ m_assembler.subl_im(imm.m_value, dest.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Call call()
+ {
+ return Call(m_assembler.call(), Call::Linkable);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ m_assembler.jmp_m(address.m_ptr);
+ }
+
+ Call tailRecursiveCall()
+ {
+ return Call::fromTailJump(jump());
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ return Call::fromTailJump(oldJump);
+ }
+
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.m_ptr);
+ else
+ m_assembler.testb_im(mask.m_value, address.m_ptr);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ padBeforePatch();
+ m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
+ dataLabel = DataLabelPtr(this);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ padBeforePatch();
+ m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
+ return DataLabelPtr(this);
+ }
+
+ static bool supportsFloatingPoint() { return isSSE2Present(); }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
+ static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
+ static bool supportsFloatingPointAbs() { return isSSE2Present(); }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
+ return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ const int offsetBytes = 0;
+ const int immediateBytes = 4;
+ const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue)
+ {
+ ASSERT(!address.offset);
+ X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h b/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h
new file mode 100644
index 0000000000..53cb80c210
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86Common.h
@@ -0,0 +1,1541 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86Common_h
+#define MacroAssemblerX86Common_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "X86Assembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
+protected:
+#if CPU(X86_64)
+ static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
+#endif
+
+ static const int DoubleConditionBitInvert = 0x10;
+ static const int DoubleConditionBitSpecial = 0x20;
+ static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
+
+public:
+ typedef X86Assembler::FPRegisterID FPRegisterID;
+ typedef X86Assembler::XMMRegisterID XMMRegisterID;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+ {
+ return value >= -128 && value <= 127;
+ }
+
+ enum RelationalCondition {
+ Equal = X86Assembler::ConditionE,
+ NotEqual = X86Assembler::ConditionNE,
+ Above = X86Assembler::ConditionA,
+ AboveOrEqual = X86Assembler::ConditionAE,
+ Below = X86Assembler::ConditionB,
+ BelowOrEqual = X86Assembler::ConditionBE,
+ GreaterThan = X86Assembler::ConditionG,
+ GreaterThanOrEqual = X86Assembler::ConditionGE,
+ LessThan = X86Assembler::ConditionL,
+ LessThanOrEqual = X86Assembler::ConditionLE
+ };
+
+ enum ResultCondition {
+ Overflow = X86Assembler::ConditionO,
+ Signed = X86Assembler::ConditionS,
+ Zero = X86Assembler::ConditionE,
+ NonZero = X86Assembler::ConditionNE
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
+ DoubleNotEqual = X86Assembler::ConditionNE,
+ DoubleGreaterThan = X86Assembler::ConditionA,
+ DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
+ DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
+ DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleEqualOrUnordered = X86Assembler::ConditionE,
+ DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
+ DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
+ DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
+ DoubleLessThanOrUnordered = X86Assembler::ConditionB,
+ DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
+ };
+ COMPILE_ASSERT(
+ !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
+ DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
+
+ static const RegisterID stackPointerRegister = X86Registers::esp;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+#if CPU(X86_64)
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
+ static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
+#endif
+#endif
+#endif
+
+ // Integer arithmetic operations:
+ //
+ // Operations are typically two operand - operation(source, srcDst)
+ // For many operations the source may be an TrustedImm32, the srcDst operand
+ // may often be a memory location (explictly described using an Address
+ // object).
+
+ void add32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addl_rr(src, dest);
+ }
+
+ void add32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.addl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.addl_ir(imm.m_value, dest);
+ }
+
+ void add32(Address src, RegisterID dest)
+ {
+ m_assembler.addl_mr(src.offset, src.base, dest);
+ }
+
+ void add32(RegisterID src, Address dest)
+ {
+ m_assembler.addl_rm(src, dest.offset, dest.base);
+ }
+
+ void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leal_mr(imm.m_value, src, dest);
+ }
+
+ void and32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andl_rr(src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.andl_ir(imm.m_value, dest);
+ }
+
+ void and32(RegisterID src, Address dest)
+ {
+ m_assembler.andl_rm(src, dest.offset, dest.base);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ m_assembler.andl_mr(src.offset, src.base, dest);
+ }
+
+ void and32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.andl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ and32(op2, dest);
+ else {
+ move(op2, dest);
+ and32(op1, dest);
+ }
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ and32(imm, dest);
+ }
+
+ void lshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shll_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ lshift32(shift_amount, dest);
+ }
+
+ void lshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shll_i8r(imm.m_value, dest);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ lshift32(imm, dest);
+ }
+
+ void mul32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_rr(src, dest);
+ }
+
+ void mul32(Address src, RegisterID dest)
+ {
+ m_assembler.imull_mr(src.offset, src.base, dest);
+ }
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.imull_i32r(src, imm.m_value, dest);
+ }
+
+ void neg32(RegisterID srcDest)
+ {
+ m_assembler.negl_r(srcDest);
+ }
+
+ void neg32(Address srcDest)
+ {
+ m_assembler.negl_m(srcDest.offset, srcDest.base);
+ }
+
+ void or32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orl_rr(src, dest);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orl_ir(imm.m_value, dest);
+ }
+
+ void or32(RegisterID src, Address dest)
+ {
+ m_assembler.orl_rm(src, dest.offset, dest.base);
+ }
+
+ void or32(Address src, RegisterID dest)
+ {
+ m_assembler.orl_mr(src.offset, src.base, dest);
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.orl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ zeroExtend32ToPtr(op1, dest);
+ else if (op1 == dest)
+ or32(op2, dest);
+ else {
+ move(op2, dest);
+ or32(op1, dest);
+ }
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or32(imm, dest);
+ }
+
+ void rshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.sarl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ rshift32(shift_amount, dest);
+ }
+
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.sarl_i8r(imm.m_value, dest);
+ }
+
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ rshift32(imm, dest);
+ }
+
+ void urshift32(RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (shift_amount == X86Registers::ecx)
+ m_assembler.shrl_CLr(dest);
+ else {
+ // On x86 we can only shift by ecx; if asked to shift by another register we'll
+ // need rejig the shift amount into ecx first, and restore the registers afterwards.
+ // If we dest is ecx, then shift the swapped register!
+ swap(shift_amount, X86Registers::ecx);
+ m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
+ swap(shift_amount, X86Registers::ecx);
+ }
+ }
+
+ void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
+ {
+ ASSERT(shift_amount != dest);
+
+ if (src != dest)
+ move(src, dest);
+ urshift32(shift_amount, dest);
+ }
+
+ void urshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.shrl_i8r(imm.m_value, dest);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ if (src != dest)
+ move(src, dest);
+ urshift32(imm, dest);
+ }
+
+ void sub32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subl_rr(src, dest);
+ }
+
+ void sub32(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subl_ir(imm.m_value, dest);
+ }
+
+ void sub32(TrustedImm32 imm, Address address)
+ {
+ m_assembler.subl_im(imm.m_value, address.offset, address.base);
+ }
+
+ void sub32(Address src, RegisterID dest)
+ {
+ m_assembler.subl_mr(src.offset, src.base, dest);
+ }
+
+ void sub32(RegisterID src, Address dest)
+ {
+ m_assembler.subl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorl_rr(src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, Address dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_m(dest.offset, dest.base);
+ else
+ m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.notl_r(dest);
+ else
+ m_assembler.xorl_ir(imm.m_value, dest);
+ }
+
+ void xor32(RegisterID src, Address dest)
+ {
+ m_assembler.xorl_rm(src, dest.offset, dest.base);
+ }
+
+ void xor32(Address src, RegisterID dest)
+ {
+ m_assembler.xorl_mr(src.offset, src.base, dest);
+ }
+
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(TrustedImm32(0), dest);
+ else if (op1 == dest)
+ xor32(op2, dest);
+ else {
+ move(op2, dest);
+ xor32(op1, dest);
+ }
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ xor32(imm, dest);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.sqrtsd_rr(src, dst);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.andnpd_rr(src, dst);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(src != dst);
+ static const double negativeZeroConstant = -0.0;
+ loadDouble(&negativeZeroConstant, dst);
+ m_assembler.xorpd_rr(src, dst);
+ }
+
+
+ // Memory access operations:
+ //
+ // Loads are of the form load(address, destination) and stores of the form
+ // store(source, address). The source for a store may be an TrustedImm32. Address
+ // operand objects to loads and store will be implicitly constructed if a
+ // register is passed.
+
+ void load32(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, dest);
+ }
+
+ void load32(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(address, dest);
+ }
+
+ DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ ASSERT(isCompactPtrAlignedAddressOffset(value));
+ AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movl_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movzbl_mr(address.offset, address.base, dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load8Signed(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movsbl_mr(address.offset, address.base, dest);
+ }
+
+ void load16(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16(Address address, RegisterID dest)
+ {
+ m_assembler.movzwl_mr(address.offset, address.base, dest);
+ }
+
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load16Signed(Address address, RegisterID dest)
+ {
+ m_assembler.movswl_mr(address.offset, address.base, dest);
+ }
+
+ DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movl_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void store32(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base);
+ }
+
+ void store32(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store32(TrustedImm32 imm, ImplicitAddress address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
+ }
+
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
+ }
+
+ void store8(TrustedImm32 imm, BaseIndex address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp;
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ temp = X86Registers::eax;
+ else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ temp = X86Registers::ebx;
+ else {
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ temp = X86Registers::ecx;
+ }
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only store from the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (src >= 4) {
+ // Pick a temporary register.
+ RegisterID temp;
+ if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+ temp = X86Registers::eax;
+ else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+ temp = X86Registers::ebx;
+ else {
+ ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+ temp = X86Registers::ecx;
+ }
+
+ // Swap to the temporary register to perform the store.
+ swap(src, temp);
+ m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
+ swap(src, temp);
+ return;
+ }
+#endif
+ m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+
+ // Floating-point operation:
+ //
+ // Presently only supports SSE, not x87 floating point.
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (src != dest)
+ m_assembler.movsd_rr(src, dest);
+ }
+
+ void loadDouble(const void* address, FPRegisterID dest)
+ {
+#if CPU(X86)
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address, dest);
+#else
+ move(TrustedImmPtr(address), scratchRegister);
+ loadDouble(scratchRegister, dest);
+#endif
+ }
+
+ void loadDouble(ImplicitAddress address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, dest);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void storeDouble(FPRegisterID src, ImplicitAddress address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsd2ss_rr(src, dst);
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtss2sd_rr(src, dst);
+ }
+
+ void addDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_rr(src, dest);
+ }
+
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ addDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ addDouble(op1, dest);
+ }
+ }
+
+ void addDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.addsd_mr(src.offset, src.base, dest);
+ }
+
+ void divDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_rr(src, dest);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A / B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ divDouble(op2, dest);
+ }
+
+ void divDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.divsd_mr(src.offset, src.base, dest);
+ }
+
+ void subDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_rr(src, dest);
+ }
+
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(op1 == dest || op2 != dest);
+
+ moveDouble(op1, dest);
+ subDouble(op2, dest);
+ }
+
+ void subDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.subsd_mr(src.offset, src.base, dest);
+ }
+
+ void mulDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_rr(src, dest);
+ }
+
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ if (op1 == dest)
+ mulDouble(op2, dest);
+ else {
+ moveDouble(op2, dest);
+ mulDouble(op1, dest);
+ }
+ }
+
+ void mulDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.mulsd_mr(src.offset, src.base, dest);
+ }
+
+ void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_rr(src, dest);
+ }
+
+ void convertInt32ToDouble(Address src, FPRegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
+ }
+
+ Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ ASSERT(isSSE2Present());
+
+ if (cond & DoubleConditionBitInvert)
+ m_assembler.ucomisd_rr(left, right);
+ else
+ m_assembler.ucomisd_rr(right, left);
+
+ if (cond == DoubleEqual) {
+ if (left == right)
+ return Jump(m_assembler.jnp());
+ Jump isUnordered(m_assembler.jp());
+ Jump result = Jump(m_assembler.je());
+ isUnordered.link(this);
+ return result;
+ } else if (cond == DoubleNotEqualOrUnordered) {
+ if (left == right)
+ return Jump(m_assembler.jp());
+ Jump isUnordered(m_assembler.jp());
+ Jump isEqual(m_assembler.je());
+ isUnordered.link(this);
+ Jump result = jump();
+ isEqual.link(this);
+ return result;
+ }
+
+ ASSERT(!(cond & DoubleConditionBitSpecial));
+ return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
+ }
+
+ // Truncates 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, INT_MIN).
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
+ }
+
+ Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
+ }
+
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+ }
+
+#if CPU(X86_64)
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2siq_rr(src, dest);
+ }
+#endif
+
+ // Convert 'src' to an integer, and places the resulting 'dest'.
+ // If the result is not representable as a 32 bit value, branch.
+ // May also branch for some values that are representable in 32 bits
+ // (specifically, in this case, 0).
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.cvttsd2si_rr(src, dest);
+
+ // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
+ failureCases.append(branchTest32(Zero, dest));
+
+ // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+ convertInt32ToDouble(dest, fpTemp);
+ m_assembler.ucomisd_rr(fpTemp, src);
+ failureCases.append(m_assembler.jp());
+ failureCases.append(m_assembler.jne());
+ }
+
+ Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleNotEqual, reg, scratch);
+ }
+
+ Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.xorpd_rr(scratch, scratch);
+ return branchDouble(DoubleEqualOrUnordered, reg, scratch);
+ }
+
+ void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psllq_i8r(imm.m_value, reg);
+ }
+
+ void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.psrlq_i8r(imm.m_value, reg);
+ }
+
+ void orPacked(XMMRegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.por_rr(src, dst);
+ }
+
+ void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ void movePackedToInt32(XMMRegisterID src, RegisterID dst)
+ {
+ ASSERT(isSSE2Present());
+ m_assembler.movd_rr(src, dst);
+ }
+
+ // Stack manipulation operations:
+ //
+ // The ABI is assumed to provide a stack abstraction to memory,
+ // containing machine word sized units of data. Push and pop
+ // operations add and remove a single register sized unit of data
+ // to or from the stack. Peek and poke operations read or write
+ // values on the stack, without moving the current stack position.
+
+ void pop(RegisterID dest)
+ {
+ m_assembler.pop_r(dest);
+ }
+
+ void push(RegisterID src)
+ {
+ m_assembler.push_r(src);
+ }
+
+ void push(Address address)
+ {
+ m_assembler.push_m(address.offset, address.base);
+ }
+
+ void push(TrustedImm32 imm)
+ {
+ m_assembler.push_i32(imm.m_value);
+ }
+
+
+ // Register move operations:
+ //
+ // Move values in registers.
+
+ void move(TrustedImm32 imm, RegisterID dest)
+ {
+ // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
+ // may be useful to have a separate version that sign extends the value?
+ if (!imm.m_value)
+ m_assembler.xorl_rr(dest, dest);
+ else
+ m_assembler.movl_i32r(imm.m_value, dest);
+ }
+
+#if CPU(X86_64)
+ void move(RegisterID src, RegisterID dest)
+ {
+ // Note: on 64-bit this is is a full register move; perhaps it would be
+ // useful to have separate move32 & movePtr, with move32 zero extending?
+ if (src != dest)
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.asIntptr(), dest);
+ }
+
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.m_value, dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgq_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movsxd_rr(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ m_assembler.movl_rr(src, dest);
+ }
+#else
+ void move(RegisterID src, RegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.movl_rr(src, dest);
+ }
+
+ void move(TrustedImmPtr imm, RegisterID dest)
+ {
+ m_assembler.movl_i32r(imm.asIntptr(), dest);
+ }
+
+ void swap(RegisterID reg1, RegisterID reg2)
+ {
+ if (reg1 != reg2)
+ m_assembler.xchgl_rr(reg1, reg2);
+ }
+
+ void signExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+#endif
+
+
+ // Forwards / external control flow operations:
+ //
+ // This set of jump and conditional branch operations return a Jump
+ // object which may linked at a later point, allow forwards jump,
+ // or jumps that will require external linkage (after the code has been
+ // relocated).
+ //
+ // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
+ // respecitvely, for unsigned comparisons the names b, a, be, and ae are
+ // used (representing the names 'below' and 'above').
+ //
+ // Operands to the comparision are provided in the expected order, e.g.
+ // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
+ // treated as a signed 32bit value, is less than or equal to 5.
+ //
+ // jz and jnz test whether the first operand is equal to zero, and take
+ // an optional second operand of a mask under which to perform the test.
+
+public:
+ Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpl_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpl_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpl_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testl_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testl_rr(reg, reg);
+ else
+ m_assembler.testl_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
+ ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(right.m_value & 0xFFFFFF00));
+
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump jump()
+ {
+ return Jump(m_assembler.jmp());
+ }
+
+ void jump(RegisterID target)
+ {
+ m_assembler.jmp_r(target);
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(Address address)
+ {
+ m_assembler.jmp_m(address.offset, address.base);
+ }
+
+
+ // Arithmetic control flow operations:
+ //
+ // This set of conditional branch operations branch based
+ // on the result of an arithmetic operation. The operation
+ // is performed as normal, storing the result.
+ //
+ // * jz operations branch if the result is zero.
+ // * jo operations branch if the (signed) arithmetic
+ // operation caused an overflow to occur.
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchAdd32(cond, src2, dest);
+ move(src2, dest);
+ return branchAdd32(cond, src1, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ move(src, dest);
+ return branchAdd32(cond, imm, dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ mul32(src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ mul32(imm, src, dest);
+ if (cond != Overflow)
+ m_assembler.testl_rr(dest, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ if (src1 == dest)
+ return branchMul32(cond, src2, dest);
+ move(src2, dest);
+ return branchMul32(cond, src1, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
+ {
+ sub32(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ sub32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ // B := A - B is invalid.
+ ASSERT(src1 == dest || src2 != dest);
+
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub32(cond, src2, dest);
+ }
+
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ neg32(srcDest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ // Miscellaneous operations:
+
+ void breakpoint()
+ {
+ m_assembler.int3();
+ }
+
+ Call nearCall()
+ {
+ return Call(m_assembler.call(), Call::LinkableNear);
+ }
+
+ Call call(RegisterID target)
+ {
+ return Call(m_assembler.call(target), Call::None);
+ }
+
+ void call(Address address)
+ {
+ m_assembler.call_m(address.offset, address.base);
+ }
+
+ void ret()
+ {
+ m_assembler.ret();
+ }
+
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ m_assembler.cmpb_im(right.m_value, left.offset, left.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpl_rr(right, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testl_rr(left, left);
+ else
+ m_assembler.cmpl_ir(right.m_value, left);
+ set32(x86Condition(cond), dest);
+ }
+
+ // FIXME:
+ // The mask should be optional... perhaps the argument order should be
+ // dest-src, operations always have a dest? ... possibly not true, considering
+ // asm ops like test, or pseudo ops like pop().
+
+ void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpb_im(0, address.offset, address.base);
+ else
+ m_assembler.testb_im(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpl_im(0, address.offset, address.base);
+ else
+ m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+ set32(x86Condition(cond), dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
+ }
+
+ void nop()
+ {
+ m_assembler.nop();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return X86Assembler::maxJumpReplacementSize();
+ }
+
+protected:
+ X86Assembler::Condition x86Condition(RelationalCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ X86Assembler::Condition x86Condition(ResultCondition cond)
+ {
+ return static_cast<X86Assembler::Condition>(cond);
+ }
+
+ void set32(X86Assembler::Condition cond, RegisterID dest)
+ {
+#if CPU(X86)
+ // On 32-bit x86 we can only set the first 4 registers;
+ // esp..edi are mapped to the 'h' registers!
+ if (dest >= 4) {
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ m_assembler.setCC_r(cond, X86Registers::eax);
+ m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
+ m_assembler.xchgl_rr(dest, X86Registers::eax);
+ return;
+ }
+#endif
+ m_assembler.setCC_r(cond, dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+private:
+ // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
+ // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
+ friend class MacroAssemblerX86;
+
+#if CPU(X86)
+#if OS(MAC_OS_X)
+
+ // All X86 Macs are guaranteed to support at least SSE2,
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#else // OS(MAC_OS_X)
+
+ enum SSE2CheckState {
+ NotCheckedSSE2,
+ HasSSE2,
+ NoSSE2
+ };
+
+ static bool isSSE2Present()
+ {
+ if (s_sse2CheckState == NotCheckedSSE2) {
+ // Default the flags value to zero; if the compiler is
+ // not MSVC or GCC we will read this as SSE2 not present.
+ int flags = 0;
+#if COMPILER(MSVC)
+ _asm {
+ mov eax, 1 // cpuid function 1 gives us the standard feature set
+ cpuid;
+ mov flags, edx;
+ }
+#elif COMPILER(GCC)
+ asm (
+ "movl $0x1, %%eax;"
+ "pushl %%ebx;"
+ "cpuid;"
+ "popl %%ebx;"
+ "movl %%edx, %0;"
+ : "=g" (flags)
+ :
+ : "%eax", "%ecx", "%edx"
+ );
+#endif
+ static const int SSE2FeatureBit = 1 << 26;
+ s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
+ }
+ // Only check once.
+ ASSERT(s_sse2CheckState != NotCheckedSSE2);
+
+ return s_sse2CheckState == HasSSE2;
+ }
+
+ static SSE2CheckState s_sse2CheckState;
+
+#endif // OS(MAC_OS_X)
+#elif !defined(NDEBUG) // CPU(X86)
+
+ // On x86-64 we should never be checking for SSE2 in a non-debug build,
+ // but non debug add this method to keep the asserts above happy.
+ static bool isSSE2Present()
+ {
+ return true;
+ }
+
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86Common_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
new file mode 100644
index 0000000000..c711e6f8da
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
@@ -0,0 +1,643 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssemblerX86_64_h
+#define MacroAssemblerX86_64_h
+
+#if ENABLE(ASSEMBLER) && CPU(X86_64)
+
+#include "MacroAssemblerX86Common.h"
+
+#define REPTACH_OFFSET_CALL_R11 3
+
+namespace JSC {
+
+class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
+public:
+ static const Scale ScalePtr = TimesEight;
+
+ using MacroAssemblerX86Common::add32;
+ using MacroAssemblerX86Common::and32;
+ using MacroAssemblerX86Common::branchAdd32;
+ using MacroAssemblerX86Common::or32;
+ using MacroAssemblerX86Common::sub32;
+ using MacroAssemblerX86Common::load32;
+ using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
+ using MacroAssemblerX86Common::call;
+ using MacroAssemblerX86Common::jump;
+ using MacroAssemblerX86Common::addDouble;
+ using MacroAssemblerX86Common::loadDouble;
+ using MacroAssemblerX86Common::convertInt32ToDouble;
+
+ void add32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(imm, Address(scratchRegister));
+ }
+
+ void and32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ and32(imm, Address(scratchRegister));
+ }
+
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(Address(scratchRegister), dest);
+ }
+
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(imm, Address(scratchRegister));
+ }
+
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(reg, Address(scratchRegister));
+ }
+
+ void sub32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ sub32(imm, Address(scratchRegister));
+ }
+
+ void load32(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movl_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load32(dest, dest);
+ }
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ m_assembler.addsd_mr(0, scratchRegister, dest);
+ }
+
+ void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
+ {
+ move(imm, scratchRegister);
+ m_assembler.cvtsi2sd_rr(scratchRegister, dest);
+ }
+
+ void store32(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store32(imm, scratchRegister);
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store8(imm, Address(scratchRegister));
+ }
+
+ Call call()
+ {
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
+ ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
+ return result;
+ }
+
+ // Address is a memory location containing the address to jump to
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ jump(Address(scratchRegister));
+ }
+
+ Call tailRecursiveCall()
+ {
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Call makeTailRecursiveCall(Jump oldJump)
+ {
+ oldJump.link(this);
+ DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
+ Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
+ ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
+ return Call::fromTailJump(newJump);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), scratchRegister);
+ add32(src, Address(scratchRegister));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void add64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.addq_rr(src, dest);
+ }
+
+ void add64(Address src, RegisterID dest)
+ {
+ m_assembler.addq_mr(src.offset, src.base, dest);
+ }
+
+ void add64(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), scratchRegister);
+ add64(Address(scratchRegister), dest);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.addq_ir(imm.m_value, srcDest);
+ }
+
+ void add64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ add64(scratchRegister, dest);
+ }
+
+ void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ m_assembler.leaq_mr(imm.m_value, src, dest);
+ }
+
+ void add64(TrustedImm32 imm, Address address)
+ {
+ m_assembler.addq_im(imm.m_value, address.offset, address.base);
+ }
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add64(imm, Address(scratchRegister));
+ }
+
+ void and64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.andq_rr(src, dest);
+ }
+
+ void and64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.andq_ir(imm.m_value, srcDest);
+ }
+
+ void neg64(RegisterID dest)
+ {
+ m_assembler.negq_r(dest);
+ }
+
+ void or64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.orq_rr(src, dest);
+ }
+
+ void or64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ or64(scratchRegister, dest);
+ }
+
+ void or64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.orq_ir(imm.m_value, dest);
+ }
+
+ void or64(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ if (op1 == op2)
+ move(op1, dest);
+ else if (op1 == dest)
+ or64(op2, dest);
+ else {
+ move(op2, dest);
+ or64(op1, dest);
+ }
+ }
+
+ void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ or64(imm, dest);
+ }
+
+ void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
+ {
+ m_assembler.rorq_i8r(imm.m_value, srcDst);
+ }
+
+ void sub64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.subq_rr(src, dest);
+ }
+
+ void sub64(TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.subq_ir(imm.m_value, dest);
+ }
+
+ void sub64(TrustedImm64 imm, RegisterID dest)
+ {
+ move(imm, scratchRegister);
+ sub64(scratchRegister, dest);
+ }
+
+ void xor64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.xorq_rr(src, dest);
+ }
+
+ void xor64(RegisterID src, Address dest)
+ {
+ m_assembler.xorq_rm(src, dest.offset, dest.base);
+ }
+
+ void xor64(TrustedImm32 imm, RegisterID srcDest)
+ {
+ m_assembler.xorq_ir(imm.m_value, srcDest);
+ }
+
+ void load64(ImplicitAddress address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ }
+
+ void load64(BaseIndex address, RegisterID dest)
+ {
+ m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
+ }
+
+ void load64(const void* address, RegisterID dest)
+ {
+ if (dest == X86Registers::eax)
+ m_assembler.movq_mEAX(address);
+ else {
+ move(TrustedImmPtr(address), dest);
+ load64(dest, dest);
+ }
+ }
+
+ DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp32(address.offset, address.base, dest);
+ return DataLabel32(this);
+ }
+
+ DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_mr_disp8(address.offset, address.base, dest);
+ return DataLabelCompact(this);
+ }
+
+ void store64(RegisterID src, ImplicitAddress address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base);
+ }
+
+ void store64(RegisterID src, BaseIndex address)
+ {
+ m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
+ }
+
+ void store64(RegisterID src, void* address)
+ {
+ if (src == X86Registers::eax)
+ m_assembler.movq_EAXm(address);
+ else {
+ move(TrustedImmPtr(address), scratchRegister);
+ store64(src, scratchRegister);
+ }
+ }
+
+ void store64(TrustedImm64 imm, ImplicitAddress address)
+ {
+ move(imm, scratchRegister);
+ store64(scratchRegister, address);
+ }
+
+ void store64(TrustedImm64 imm, BaseIndex address)
+ {
+ move(imm, scratchRegister);
+ m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
+ }
+
+ DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ padBeforePatch();
+ m_assembler.movq_rm_disp32(src, address.offset, address.base);
+ return DataLabel32(this);
+ }
+
+ void move64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void moveDoubleTo64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.movq_rr(src, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
+ m_assembler.testq_rr(left, left);
+ else
+ m_assembler.cmpq_ir(right.m_value, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.cmpq_rr(right, left);
+ m_assembler.setCC_r(x86Condition(cond), dest);
+ m_assembler.movzbl_rr(dest, dest);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_assembler.cmpq_rr(right, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
+ {
+ if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
+ m_assembler.testq_rr(left, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branch64(RelationalCondition cond, RegisterID left, Address right)
+ {
+ m_assembler.cmpq_mr(right.offset, right.base, left);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ move(TrustedImmPtr(left.m_ptr), scratchRegister);
+ return branch64(cond, Address(scratchRegister), right);
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, RegisterID right)
+ {
+ m_assembler.cmpq_rm(right, left.offset, left.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
+ {
+ move(right, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ m_assembler.testq_rr(reg, mask);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // if we are only interested in the low seven bits, this can be tested with a testb
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ if (mask.m_value == -1)
+ m_assembler.testq_rr(reg, reg);
+ else if ((mask.m_value & ~0x7f) == 0)
+ m_assembler.testb_i8r(mask.m_value, reg);
+ else
+ m_assembler.testq_i32r(mask.m_value, reg);
+ set32(x86Condition(cond), dest);
+ }
+
+ void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ m_assembler.testq_rr(reg, mask);
+ set32(x86Condition(cond), dest);
+ }
+
+ Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load64(address.m_ptr, scratchRegister);
+ return branchTest64(cond, scratchRegister, mask);
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
+ {
+ m_assembler.testq_rm(reg, address.offset, address.base);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
+ else
+ m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+
+ Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ add64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ move(src1, dest);
+ return branchSub64(cond, src2, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ return result;
+ }
+
+ DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
+ {
+ padBeforePatch();
+ m_assembler.movq_i64r(initialValue.asIntptr(), dest);
+ return DataLabelPtr(this);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ dataLabel = moveWithPatch(initialRightValue, scratchRegister);
+ return branch64(cond, left, scratchRegister);
+ }
+
+ DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
+ {
+ DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
+ store64(scratchRegister, address);
+ return label;
+ }
+
+ using MacroAssemblerX86Common::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
+ MacroAssemblerX86Common::move(addr, scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask);
+ }
+
+ static bool supportsFloatingPoint() { return true; }
+ // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
+ }
+
+ static RegisterID scratchRegisterForBlinding() { return scratchRegister; }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ const int immediateBytes = 8;
+ const int totalBytes = rexBytes + opcodeBytes + immediateBytes;
+ ASSERT(totalBytes >= maxJumpReplacementSize());
+ return label.labelAtOffset(-totalBytes);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
+ {
+ X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister);
+ }
+
+private:
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkCall(void* code, Call call, FunctionPtr function)
+ {
+ if (!call.isFlagSet(Call::Near))
+ X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
+ else
+ X86Assembler::linkCall(code, call.m_label, function.value());
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
+ }
+
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssemblerX86_64_h
diff --git a/src/3rdparty/masm/assembler/RepatchBuffer.h b/src/3rdparty/masm/assembler/RepatchBuffer.h
new file mode 100644
index 0000000000..dbb56f9ad5
--- /dev/null
+++ b/src/3rdparty/masm/assembler/RepatchBuffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RepatchBuffer_h
+#define RepatchBuffer_h
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include <MacroAssembler.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+// RepatchBuffer:
+//
+// This class is used to modify code after code generation has been completed,
+// and after the code has potentially already been executed. This mechanism is
+// used to apply optimizations to the code.
+//
+class RepatchBuffer {
+ typedef MacroAssemblerCodePtr CodePtr;
+
+public:
+ RepatchBuffer(CodeBlock* codeBlock)
+ {
+ JITCode& code = codeBlock->getJITCode();
+ m_start = code.start();
+ m_size = code.size();
+
+ ExecutableAllocator::makeWritable(m_start, m_size);
+ }
+
+ ~RepatchBuffer()
+ {
+ ExecutableAllocator::makeExecutable(m_start, m_size);
+ }
+
+ void relink(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchJump(jump, destination);
+ }
+
+ void relink(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationCall call, FunctionPtr destination)
+ {
+ MacroAssembler::repatchCall(call, destination);
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodePtr destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination));
+ }
+
+ void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ MacroAssembler::repatchNearCall(nearCall, destination);
+ }
+
+ void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ MacroAssembler::repatchInt32(dataLabel32, value);
+ }
+
+ void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ MacroAssembler::repatchCompact(dataLabelCompact, value);
+ }
+
+ void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ MacroAssembler::repatchPointer(dataLabelPtr, value);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function)
+ {
+ relink(CodeLocationCall(CodePtr(returnAddress)), function);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label)
+ {
+ relink(CodeLocationNearCall(CodePtr(returnAddress)), label);
+ }
+
+ void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction)
+ {
+ relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction));
+ }
+
+ void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithLoad(label);
+ }
+
+ void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ MacroAssembler::replaceWithAddressComputation(label);
+ }
+
+ void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive)
+ {
+ if (isActive)
+ replaceWithLoad(label);
+ else
+ replaceWithAddressComputation(label);
+ }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label);
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)
+ {
+ return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label);
+ }
+
+ void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ MacroAssembler::replaceWithJump(instructionStart, destination);
+ }
+
+ // This is a *bit* of a silly API, since we currently always also repatch the
+ // immediate after calling this. But I'm fine with that, since this just feels
+ // less yucky.
+ void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value);
+ }
+
+ void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value)
+ {
+ MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value);
+ }
+
+private:
+ void* m_start;
+ size_t m_size;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // RepatchBuffer_h
diff --git a/src/3rdparty/masm/assembler/SH4Assembler.h b/src/3rdparty/masm/assembler/SH4Assembler.h
new file mode 100644
index 0000000000..b7a166ea99
--- /dev/null
+++ b/src/3rdparty/masm/assembler/SH4Assembler.h
@@ -0,0 +1,2152 @@
+/*
+ * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SH4Assembler_h
+#define SH4Assembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(SH4)
+
+#include "AssemblerBuffer.h"
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <wtf/Assertions.h>
+#include <wtf/DataLog.h>
+#include <wtf/Vector.h>
+
+#ifndef NDEBUG
+#define SH4_ASSEMBLER_TRACING
+#endif
+
+namespace JSC {
+typedef uint16_t SH4Word;
+
+enum {
+ INVALID_OPCODE = 0xffff,
+ ADD_OPCODE = 0x300c,
+ ADDIMM_OPCODE = 0x7000,
+ ADDC_OPCODE = 0x300e,
+ ADDV_OPCODE = 0x300f,
+ AND_OPCODE = 0x2009,
+ ANDIMM_OPCODE = 0xc900,
+ DIV0_OPCODE = 0x2007,
+ DIV1_OPCODE = 0x3004,
+ BF_OPCODE = 0x8b00,
+ BFS_OPCODE = 0x8f00,
+ BRA_OPCODE = 0xa000,
+ BRAF_OPCODE = 0x0023,
+ NOP_OPCODE = 0x0009,
+ BSR_OPCODE = 0xb000,
+ RTS_OPCODE = 0x000b,
+ BT_OPCODE = 0x8900,
+ BTS_OPCODE = 0x8d00,
+ BSRF_OPCODE = 0x0003,
+ BRK_OPCODE = 0x003b,
+ FTRC_OPCODE = 0xf03d,
+ CMPEQ_OPCODE = 0x3000,
+ CMPEQIMM_OPCODE = 0x8800,
+ CMPGE_OPCODE = 0x3003,
+ CMPGT_OPCODE = 0x3007,
+ CMPHI_OPCODE = 0x3006,
+ CMPHS_OPCODE = 0x3002,
+ CMPPL_OPCODE = 0x4015,
+ CMPPZ_OPCODE = 0x4011,
+ CMPSTR_OPCODE = 0x200c,
+ DT_OPCODE = 0x4010,
+ FCMPEQ_OPCODE = 0xf004,
+ FCMPGT_OPCODE = 0xf005,
+ FMOV_OPCODE = 0xf00c,
+ FADD_OPCODE = 0xf000,
+ FMUL_OPCODE = 0xf002,
+ FSUB_OPCODE = 0xf001,
+ FDIV_OPCODE = 0xf003,
+ FNEG_OPCODE = 0xf04d,
+ JMP_OPCODE = 0x402b,
+ JSR_OPCODE = 0x400b,
+ LDSPR_OPCODE = 0x402a,
+ LDSLPR_OPCODE = 0x4026,
+ MOV_OPCODE = 0x6003,
+ MOVIMM_OPCODE = 0xe000,
+ MOVB_WRITE_RN_OPCODE = 0x2000,
+ MOVB_WRITE_RNDEC_OPCODE = 0x2004,
+ MOVB_WRITE_R0RN_OPCODE = 0x0004,
+ MOVB_WRITE_OFFGBR_OPCODE = 0xc000,
+ MOVB_WRITE_OFFRN_OPCODE = 0x8000,
+ MOVB_READ_RM_OPCODE = 0x6000,
+ MOVB_READ_RMINC_OPCODE = 0x6004,
+ MOVB_READ_R0RM_OPCODE = 0x000c,
+ MOVB_READ_OFFGBR_OPCODE = 0xc400,
+ MOVB_READ_OFFRM_OPCODE = 0x8400,
+ MOVL_WRITE_RN_OPCODE = 0x2002,
+ MOVL_WRITE_RNDEC_OPCODE = 0x2006,
+ MOVL_WRITE_R0RN_OPCODE = 0x0006,
+ MOVL_WRITE_OFFGBR_OPCODE = 0xc200,
+ MOVL_WRITE_OFFRN_OPCODE = 0x1000,
+ MOVL_READ_RM_OPCODE = 0x6002,
+ MOVL_READ_RMINC_OPCODE = 0x6006,
+ MOVL_READ_R0RM_OPCODE = 0x000e,
+ MOVL_READ_OFFGBR_OPCODE = 0xc600,
+ MOVL_READ_OFFPC_OPCODE = 0xd000,
+ MOVL_READ_OFFRM_OPCODE = 0x5000,
+ MOVW_WRITE_RN_OPCODE = 0x2001,
+ MOVW_READ_RM_OPCODE = 0x6001,
+ MOVW_READ_R0RM_OPCODE = 0x000d,
+ MOVW_READ_OFFRM_OPCODE = 0x8500,
+ MOVW_READ_OFFPC_OPCODE = 0x9000,
+ MOVA_READ_OFFPC_OPCODE = 0xc700,
+ MOVT_OPCODE = 0x0029,
+ MULL_OPCODE = 0x0007,
+ DMULL_L_OPCODE = 0x3005,
+ STSMACL_OPCODE = 0x001a,
+ STSMACH_OPCODE = 0x000a,
+ DMULSL_OPCODE = 0x300d,
+ NEG_OPCODE = 0x600b,
+ NEGC_OPCODE = 0x600a,
+ NOT_OPCODE = 0x6007,
+ OR_OPCODE = 0x200b,
+ ORIMM_OPCODE = 0xcb00,
+ ORBIMM_OPCODE = 0xcf00,
+ SETS_OPCODE = 0x0058,
+ SETT_OPCODE = 0x0018,
+ SHAD_OPCODE = 0x400c,
+ SHAL_OPCODE = 0x4020,
+ SHAR_OPCODE = 0x4021,
+ SHLD_OPCODE = 0x400d,
+ SHLL_OPCODE = 0x4000,
+ SHLL2_OPCODE = 0x4008,
+ SHLL8_OPCODE = 0x4018,
+ SHLL16_OPCODE = 0x4028,
+ SHLR_OPCODE = 0x4001,
+ SHLR2_OPCODE = 0x4009,
+ SHLR8_OPCODE = 0x4019,
+ SHLR16_OPCODE = 0x4029,
+ STSPR_OPCODE = 0x002a,
+ STSLPR_OPCODE = 0x4022,
+ FLOAT_OPCODE = 0xf02d,
+ SUB_OPCODE = 0x3008,
+ SUBC_OPCODE = 0x300a,
+ SUBV_OPCODE = 0x300b,
+ TST_OPCODE = 0x2008,
+ TSTIMM_OPCODE = 0xc800,
+ TSTB_OPCODE = 0xcc00,
+ EXTUB_OPCODE = 0x600c,
+ EXTUW_OPCODE = 0x600d,
+ XOR_OPCODE = 0x200a,
+ XORIMM_OPCODE = 0xca00,
+ XORB_OPCODE = 0xce00,
+ FMOVS_READ_RM_INC_OPCODE = 0xf009,
+ FMOVS_READ_RM_OPCODE = 0xf008,
+ FMOVS_READ_R0RM_OPCODE = 0xf006,
+ FMOVS_WRITE_RN_OPCODE = 0xf00a,
+ FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b,
+ FMOVS_WRITE_R0RN_OPCODE = 0xf007,
+ FCNVDS_DRM_FPUL_OPCODE = 0xf0bd,
+ FCNVSD_FPUL_DRN_OPCODE = 0xf0ad,
+ LDS_RM_FPUL_OPCODE = 0x405a,
+ FLDS_FRM_FPUL_OPCODE = 0xf01d,
+ STS_FPUL_RN_OPCODE = 0x005a,
+ FSTS_FPUL_FRN_OPCODE = 0xF00d,
+ LDSFPSCR_OPCODE = 0x406a,
+ STSFPSCR_OPCODE = 0x006a,
+ LDSRMFPUL_OPCODE = 0x405a,
+ FSTSFPULFRN_OPCODE = 0xf00d,
+ FSQRT_OPCODE = 0xf06d,
+ FSCHG_OPCODE = 0xf3fd,
+ CLRT_OPCODE = 8,
+};
+
+namespace SH4Registers {
+typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14, fp = r14,
+ r15, sp = r15,
+ pc,
+ pr,
+} RegisterID;
+
+typedef enum {
+ fr0, dr0 = fr0,
+ fr1,
+ fr2, dr2 = fr2,
+ fr3,
+ fr4, dr4 = fr4,
+ fr5,
+ fr6, dr6 = fr6,
+ fr7,
+ fr8, dr8 = fr8,
+ fr9,
+ fr10, dr10 = fr10,
+ fr11,
+ fr12, dr12 = fr12,
+ fr13,
+ fr14, dr14 = fr14,
+ fr15,
+} FPRegisterID;
+}
+
+inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup2(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0xf) << 8));
+}
+
+inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | (rn & 0xff));
+}
+
+inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf));
+}
+
+inline uint16_t getOpcodeGroup5(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xff));
+}
+
+inline uint16_t getOpcodeGroup6(uint16_t opc, int rm)
+{
+ return (opc | (rm & 0xfff));
+}
+
+inline uint16_t getOpcodeGroup7(uint16_t opc, int rm)
+{
+ return (opc | ((rm & 0x7) << 9));
+}
+
+inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5));
+}
+
+inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4));
+}
+
+inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn)
+{
+ return (opc | ((rm & 0xf) << 4) | (rn & 0xf));
+}
+
+inline uint16_t getRn(uint16_t x)
+{
+ return ((x & 0xf00) >> 8);
+}
+
+inline uint16_t getRm(uint16_t x)
+{
+ return ((x & 0xf0) >> 4);
+}
+
+inline uint16_t getDisp(uint16_t x)
+{
+ return (x & 0xf);
+}
+
+inline uint16_t getImm8(uint16_t x)
+{
+ return (x & 0xff);
+}
+
+inline uint16_t getImm12(uint16_t x)
+{
+ return (x & 0xfff);
+}
+
+inline uint16_t getDRn(uint16_t x)
+{
+ return ((x & 0xe00) >> 9);
+}
+
+inline uint16_t getDRm(uint16_t x)
+{
+ return ((x & 0xe0) >> 5);
+}
+
+class SH4Assembler {
+public:
+ typedef SH4Registers::RegisterID RegisterID;
+ typedef SH4Registers::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer;
+ static const RegisterID scratchReg1 = SH4Registers::r3;
+ static const RegisterID scratchReg2 = SH4Registers::r11;
+ static const uint32_t maxInstructionSize = 16;
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0009,
+ padForAlign32 = 0x00090009,
+ };
+
+ enum JumpType {
+ JumpFar,
+ JumpNear
+ };
+
+ SH4Assembler()
+ {
+ m_claimscratchReg = 0x0;
+ }
+
+ // SH4 condition codes
+ typedef enum {
+ EQ = 0x0, // Equal
+ NE = 0x1, // Not Equal
+ HS = 0x2, // Unsigend Greater Than equal
+ HI = 0x3, // Unsigend Greater Than
+ LS = 0x4, // Unsigend Lower or Same
+ LI = 0x5, // Unsigend Lower
+ GE = 0x6, // Greater or Equal
+ LT = 0x7, // Less Than
+ GT = 0x8, // Greater Than
+ LE = 0x9, // Less or Equal
+ OF = 0xa, // OverFlow
+ SI = 0xb, // Signed
+ EQU= 0xc, // Equal or unordered(NaN)
+ NEU= 0xd,
+ GTU= 0xe,
+ GEU= 0xf,
+ LTU= 0x10,
+ LEU= 0x11,
+ } Condition;
+
+ // Opaque label types
+public:
+ bool isImmediate(int constant)
+ {
+ return ((constant <= 127) && (constant >= -128));
+ }
+
+ RegisterID claimScratch()
+ {
+ ASSERT((m_claimscratchReg != 0x3));
+
+ if (!(m_claimscratchReg & 0x1)) {
+ m_claimscratchReg = (m_claimscratchReg | 0x1);
+ return scratchReg1;
+ }
+
+ m_claimscratchReg = (m_claimscratchReg | 0x2);
+ return scratchReg2;
+ }
+
+ void releaseScratch(RegisterID scratchR)
+ {
+ if (scratchR == scratchReg1)
+ m_claimscratchReg = (m_claimscratchReg & 0x2);
+ else
+ m_claimscratchReg = (m_claimscratchReg & 0x1);
+ }
+
+ // Stack operations
+
+ void pushReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg));
+ }
+
+ void popReg(RegisterID reg)
+ {
+ if (reg == SH4Registers::pr) {
+ oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp));
+ }
+
+ void movt(RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst);
+ oneShortOp(opc);
+ }
+
+ // Arithmetic operations
+
+ void addlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addclRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void addlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void andlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void andlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void div1lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void div0lRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void notlReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void orlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void sublRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void subvlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void xorlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 255) && (imm8 >= 0));
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8);
+ oneShortOp(opc);
+ }
+
+ void shllImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void neg(RegisterID dst, RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void shllRegReg(RegisterID dst, RegisterID rShift)
+ {
+ uint16_t opc = getOpcodeGroup1(SHLD_OPCODE, dst, rShift);
+ oneShortOp(opc);
+ }
+
+ void shlrRegReg(RegisterID dst, RegisterID rShift)
+ {
+ neg(rShift, rShift);
+ shllRegReg(dst, rShift);
+ }
+
+ void sharRegReg(RegisterID dst, RegisterID rShift)
+ {
+ neg(rShift, rShift);
+ shaRegReg(dst, rShift);
+ }
+
+ void shaRegReg(RegisterID dst, RegisterID rShift)
+ {
+ uint16_t opc = getOpcodeGroup1(SHAD_OPCODE, dst, rShift);
+ oneShortOp(opc);
+ }
+
+ void shlrImm8r(int imm, RegisterID dst)
+ {
+ switch (imm) {
+ case 1:
+ oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst));
+ break;
+ case 2:
+ oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst));
+ break;
+ case 8:
+ oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst));
+ break;
+ case 16:
+ oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void imullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmullRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void dmulslRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void stsmacl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsmach(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // Comparisons
+
+ void cmplRegReg(RegisterID left, RegisterID right, Condition cond)
+ {
+ switch (cond) {
+ case NE:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left));
+ break;
+ case EQ:
+ oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left));
+ break;
+ case GE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left));
+ break;
+ case HS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left));
+ break;
+ case HI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left));
+ break;
+ case LI:
+ oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right));
+ break;
+ case LS:
+ oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right));
+ break;
+ case LE:
+ oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right));
+ break;
+ case LT:
+ oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void cmppl(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmppz(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void cmpEqImmR0(int imm, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void testlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void testlImm8r(int imm, RegisterID dst)
+ {
+ ASSERT((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0));
+
+ uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm);
+ oneShortOp(opc);
+ }
+
+ void nop()
+ {
+ oneShortOp(NOP_OPCODE, false);
+ }
+
+ void sett()
+ {
+ oneShortOp(SETT_OPCODE);
+ }
+
+ void clrt()
+ {
+ oneShortOp(CLRT_OPCODE);
+ }
+
+ void fschg()
+ {
+ oneShortOp(FSCHG_OPCODE);
+ }
+
+ void bkpt()
+ {
+ oneShortOp(BRK_OPCODE, false);
+ }
+
+ void branch(uint16_t opc, int label)
+ {
+ switch (opc) {
+ case BT_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BT_OPCODE, label));
+ break;
+ case BRA_OPCODE:
+ ASSERT((label <= 2047) && (label >= -2048));
+ oneShortOp(getOpcodeGroup6(BRA_OPCODE, label));
+ break;
+ case BF_OPCODE:
+ ASSERT((label <= 127) && (label >= -128));
+ oneShortOp(getOpcodeGroup5(BF_OPCODE, label));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void branch(uint16_t opc, RegisterID reg)
+ {
+ switch (opc) {
+ case BRAF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg));
+ break;
+ case JMP_OPCODE:
+ oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg));
+ break;
+ case JSR_OPCODE:
+ oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg));
+ break;
+ case BSRF_OPCODE:
+ oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void ldspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stspr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void extub(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void extuw(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ // float operations
+
+ void ldsrmfpul(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void fsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst);
+ oneShortOp(opc, true, false);
+ }
+
+ void stsfpulReg(RegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void floatfpulfrn(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmull(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fmovsWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void ftrcRegfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src);
+ oneShortOp(opc, true, false);
+ }
+
+ void fldsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void fstsfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src);
+ oneShortOp(opc);
+ }
+
+ void ldsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ void stsfpscr(RegisterID reg)
+ {
+ uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg);
+ oneShortOp(opc);
+ }
+
+ // double operations
+
+ void dcnvds(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcnvsd(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppeq(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dcmppgt(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmulRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsubRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void daddRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dmovRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ddivRegReg(FPRegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void dsqrt(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void dneg(FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrm(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterm(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovWriter0r(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void fmovReadr0r(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovReadrminc(RegisterID src, FPRegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src);
+ oneShortOp(opc);
+ }
+
+ void fmovWriterndec(FPRegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void floatfpulDreg(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ void ftrcdrmfpul(FPRegisterID src)
+ {
+ uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1);
+ oneShortOp(opc);
+ }
+
+ // Various move ops
+
+ void movImm8(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void movlRegReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwRegMem(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movwPCReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(base == SH4Registers::pc);
+ ASSERT((offset <= 255) && (offset >= 0));
+
+ uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset);
+ oneShortOp(opc);
+ }
+
+ void movwMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movwR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMem(RegisterID src, int offset, RegisterID base)
+ {
+ ASSERT((offset <= 15) && (offset >= 0));
+
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset));
+ }
+
+ void movlRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ if (base == SH4Registers::pc) {
+ ASSERT((offset <= 255) && (offset >= 0));
+ oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset));
+ return;
+ }
+
+ ASSERT((offset <= 15) && (offset >= 0));
+ if (!offset) {
+ oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base));
+ return;
+ }
+
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movlMemRegCompact(int offset, RegisterID base, RegisterID dst)
+ {
+ oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset));
+ }
+
+ void movbRegMem(RegisterID src, RegisterID base)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(int offset, RegisterID base, RegisterID dst)
+ {
+ ASSERT(dst == SH4Registers::r0);
+
+ uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset);
+ oneShortOp(opc);
+ }
+
+ void movbR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movbMemReg(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlMemReg(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlMemRegIn(RegisterID base, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base);
+ oneShortOp(opc);
+ }
+
+ void movlR0mr(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlRegMemr0(RegisterID src, RegisterID dst)
+ {
+ uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src);
+ oneShortOp(opc);
+ }
+
+ void movlImm8r(int imm8, RegisterID dst)
+ {
+ ASSERT((imm8 <= 127) && (imm8 >= -128));
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8);
+ oneShortOp(opc);
+ }
+
+ void loadConstant(uint32_t constant, RegisterID dst)
+ {
+ if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) {
+ movImm8(constant, dst);
+ return;
+ }
+
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant, true);
+ }
+
+ void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false)
+ {
+ uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0);
+
+ if (ensureSpace)
+ m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t));
+
+ printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize());
+ m_buffer.putShortWithConstantInt(opc, constant);
+ }
+
+ // Flow control
+
+ AssemblerLabel call()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ loadConstantUnReusable(0x0, scr);
+ branch(JSR_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JSR_OPCODE, dst);
+ nop();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp()
+ {
+ RegisterID scr = claimScratch();
+ m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t));
+ AssemblerLabel label = m_buffer.label();
+ loadConstantUnReusable(0x0, scr);
+ branch(BRAF_OPCODE, scr);
+ nop();
+ releaseScratch(scr);
+ return label;
+ }
+
+ void extraInstrForBranch(RegisterID dst)
+ {
+ loadConstantUnReusable(0x0, dst);
+ nop();
+ nop();
+ }
+
+ AssemblerLabel jmp(RegisterID dst)
+ {
+ jmpReg(dst);
+ return m_buffer.label();
+ }
+
+ void jmpReg(RegisterID dst)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ branch(JMP_OPCODE, dst);
+ nop();
+ }
+
+ AssemblerLabel jne()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BF_OPCODE, 0);
+ return label;
+ }
+
+ AssemblerLabel je()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BT_OPCODE, 0);
+ return label;
+ }
+
+ AssemblerLabel bra()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BRA_OPCODE, 0);
+ return label;
+ }
+
+ void ret()
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ oneShortOp(RTS_OPCODE, false);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel label()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ while (!m_buffer.isAligned(alignment)) {
+ nop();
+ m_buffer.ensureSpace(maxInstructionSize + 2);
+ }
+ return label();
+ }
+
+ static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress)
+ {
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ *reinterpret_cast<uint32_t*>(address) = newAddress;
+ }
+
+ static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr)
+ {
+ uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3));
+ return *reinterpret_cast<uint32_t*>(address);
+ }
+
+ static uint16_t* getInstructionPtr(void* code, int offset)
+ {
+ return reinterpret_cast<uint16_t*> (reinterpret_cast<uint32_t>(code) + offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+ uint16_t instruction = *instructionPtr;
+ int offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(code)) - from.m_offset;
+
+ if (((instruction & 0xff00) == BT_OPCODE) || ((instruction & 0xff00) == BF_OPCODE)) {
+ /* BT label ==> BF 2
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ offsetBits -= 8;
+ instruction ^= 0x0202;
+ *instructionPtr++ = instruction;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr - 2, from.m_offset, 3);
+ return;
+ }
+
+ /* MOV #imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+
+ offsetBits -= 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 1, from.m_offset, 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset);
+ instructionPtr -= 3;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(value));
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool)
+ {
+ return (constPool + (*insn & 0xff));
+ }
+
+ static SH4Word patchConstantPoolLoad(SH4Word load, int value)
+ {
+ return ((load & ~0xff) | value);
+ }
+
+ static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset)
+ {
+ ASSERT(((offset >> 1) <=2047) && ((offset >> 1) >= -2048));
+
+ SH4Buffer::TwoShorts m_barrier;
+ m_barrier.high = (BRA_OPCODE | (offset >> 1));
+ m_barrier.low = NOP_OPCODE;
+ printInstr(((BRA_OPCODE | (offset >> 1))), 0);
+ printInstr(NOP_OPCODE, 0);
+ return m_barrier;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+ {
+ SH4Word* instructionPtr = reinterpret_cast<SH4Word*>(loadAddr);
+ SH4Word instruction = *instructionPtr;
+ SH4Word index = instruction & 0xff;
+
+ if ((instruction & 0xf000) != MOVIMM_OPCODE)
+ return;
+
+ ASSERT((((reinterpret_cast<uint32_t>(constPoolAddr) - reinterpret_cast<uint32_t>(loadAddr)) + index * 4)) < 1024);
+
+ int offset = reinterpret_cast<uint32_t>(constPoolAddr) + (index * 4) - ((reinterpret_cast<uint32_t>(instructionPtr) & ~0x03) + 4);
+ instruction &=0xf00;
+ instruction |= 0xd000;
+ offset &= 0x03ff;
+ instruction |= (offset >> 2);
+ *instructionPtr = instruction;
+ printInstr(instruction, reinterpret_cast<uint32_t>(loadAddr));
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ patchPointer(where, value);
+ }
+
+ static void* readPointer(void* code)
+ {
+ return reinterpret_cast<void*>(readInt32(code));
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where);
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= 0);
+ ASSERT(value <= 60);
+ *reinterpret_cast<uint16_t*>(where) = ((*reinterpret_cast<uint16_t*>(where) & 0xfff0) | (value >> 2));
+ cacheFlush(reinterpret_cast<uint16_t*>(where), sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to));
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ uint16_t* instructionPtr = reinterpret_cast<uint16_t*> (from);
+ uint16_t instruction = *instructionPtr;
+ int32_t offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(from));
+
+ if (((*instructionPtr & 0xff00) == BT_OPCODE) || ((*instructionPtr & 0xff00) == BF_OPCODE)) {
+ offsetBits -= 8;
+ instructionPtr++;
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr, reinterpret_cast<uint32_t>(from) + 1, 3);
+ return;
+ }
+
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+ offsetBits -= 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 2, reinterpret_cast<uint32_t>(from), 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, reinterpret_cast<uint32_t>(from));
+ }
+
+ // Linking & patching
+
+ static void revertJump(void* instructionStart, SH4Word imm)
+ {
+ SH4Word *insn = reinterpret_cast<SH4Word*>(instructionStart);
+ SH4Word disp;
+
+ ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE);
+
+ disp = insn[0] & 0x00ff;
+ insn += 2 + (disp << 1); // PC += 4 + (disp*4)
+ insn = (SH4Word *) ((unsigned) insn & (~3));
+ insn[0] = imm;
+ cacheFlush(insn, sizeof(SH4Word));
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+
+ uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset);
+ uint16_t instruction = *instructionPtr;
+ int offsetBits;
+
+ if (type == JumpNear) {
+ ASSERT((instruction == BT_OPCODE) || (instruction == BF_OPCODE) || (instruction == BRA_OPCODE));
+ int offset = (codeSize() - from.m_offset) - 4;
+ *instructionPtr++ = instruction | (offset >> 1);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ if (((instruction & 0xff00) == BT_OPCODE) || ((instruction & 0xff00) == BF_OPCODE)) {
+ /* BT label => BF 2
+ nop LDR reg
+ nop braf @reg
+ nop nop
+ */
+ offsetBits = (to.m_offset - from.m_offset) - 8;
+ instruction ^= 0x0202;
+ *instructionPtr++ = instruction;
+ if ((*instructionPtr & 0xf000) == 0xe000) {
+ uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+ *addr = offsetBits;
+ } else
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits);
+ instruction = (BRAF_OPCODE | (*instructionPtr++ & 0xf00));
+ *instructionPtr = instruction;
+ printBlockInstr(instructionPtr - 2, from.m_offset, 3);
+ return;
+ }
+
+ /* MOV # imm, reg => LDR reg
+ braf @reg braf @reg
+ nop nop
+ */
+ ASSERT((*(instructionPtr + 1) & BRAF_OPCODE) == BRAF_OPCODE);
+ offsetBits = (to.m_offset - from.m_offset) - 4;
+ if (offsetBits >= -4096 && offsetBits <= 4094) {
+ *instructionPtr = getOpcodeGroup6(BRA_OPCODE, offsetBits >> 1);
+ *(++instructionPtr) = NOP_OPCODE;
+ printBlockInstr(instructionPtr - 1, from.m_offset, 2);
+ return;
+ }
+
+ instruction = *instructionPtr;
+ if ((instruction & 0xf000) == 0xe000) {
+ uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress());
+ *addr = offsetBits - 2;
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
+ changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits - 2);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static void patchPointer(void* code, AssemblerLabel where, void* value)
+ {
+ patchPointer(reinterpret_cast<uint32_t*>(code) + where.m_offset, value);
+ }
+
+ static void patchPointer(void* code, void* value)
+ {
+ patchInt32(code, reinterpret_cast<uint32_t>(value));
+ }
+
+ static void patchInt32(void* code, uint32_t value)
+ {
+ changePCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code), value);
+ }
+
+ static uint32_t readInt32(void* code)
+ {
+ return readPCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ uint16_t* instructionPtr = static_cast<uint16_t*>(from);
+ instructionPtr -= 3;
+ return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if !OS(LINUX)
+#error "The cacheFlush support is missing on this platform."
+#elif defined CACHEFLUSH_D_L2
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2);
+#else
+ syscall(__NR_cacheflush, reinterpret_cast<unsigned>(code), size, CACHEFLUSH_D_WB | CACHEFLUSH_I);
+#endif
+ }
+
+ void prefix(uint16_t pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true)
+ {
+ printInstr(opcode, m_buffer.codeSize(), isDouble);
+ if (checksize)
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putShortUnchecked(opcode);
+ }
+
+ void ensureSpace(int space)
+ {
+ m_buffer.ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ // Administrative methods
+
+ void* data() const { return m_buffer.data(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+
+#ifdef SH4_ASSEMBLER_TRACING
+ static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true)
+ {
+ if (!getenv("JavaScriptCoreDumpJIT"))
+ return;
+
+ const char *format = 0;
+ printfStdoutInstr("offset: 0x%8.8x\t", size);
+ switch (opc) {
+ case BRK_OPCODE:
+ format = " BRK\n";
+ break;
+ case NOP_OPCODE:
+ format = " NOP\n";
+ break;
+ case RTS_OPCODE:
+ format =" *RTS\n";
+ break;
+ case SETS_OPCODE:
+ format = " SETS\n";
+ break;
+ case SETT_OPCODE:
+ format = " SETT\n";
+ break;
+ case CLRT_OPCODE:
+ format = " CLRT\n";
+ break;
+ case FSCHG_OPCODE:
+ format = " FSCHG\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format);
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case BRAF_OPCODE:
+ format = " *BRAF R%d\n";
+ break;
+ case DT_OPCODE:
+ format = " DT R%d\n";
+ break;
+ case CMPPL_OPCODE:
+ format = " CMP/PL R%d\n";
+ break;
+ case CMPPZ_OPCODE:
+ format = " CMP/PZ R%d\n";
+ break;
+ case JMP_OPCODE:
+ format = " *JMP @R%d\n";
+ break;
+ case JSR_OPCODE:
+ format = " *JSR @R%d\n";
+ break;
+ case LDSPR_OPCODE:
+ format = " LDS R%d, PR\n";
+ break;
+ case LDSLPR_OPCODE:
+ format = " LDS.L @R%d+, PR\n";
+ break;
+ case MOVT_OPCODE:
+ format = " MOVT R%d\n";
+ break;
+ case SHAL_OPCODE:
+ format = " SHAL R%d\n";
+ break;
+ case SHAR_OPCODE:
+ format = " SHAR R%d\n";
+ break;
+ case SHLL_OPCODE:
+ format = " SHLL R%d\n";
+ break;
+ case SHLL2_OPCODE:
+ format = " SHLL2 R%d\n";
+ break;
+ case SHLL8_OPCODE:
+ format = " SHLL8 R%d\n";
+ break;
+ case SHLL16_OPCODE:
+ format = " SHLL16 R%d\n";
+ break;
+ case SHLR_OPCODE:
+ format = " SHLR R%d\n";
+ break;
+ case SHLR2_OPCODE:
+ format = " SHLR2 R%d\n";
+ break;
+ case SHLR8_OPCODE:
+ format = " SHLR8 R%d\n";
+ break;
+ case SHLR16_OPCODE:
+ format = " SHLR16 R%d\n";
+ break;
+ case STSPR_OPCODE:
+ format = " STS PR, R%d\n";
+ break;
+ case STSLPR_OPCODE:
+ format = " STS.L PR, @-R%d\n";
+ break;
+ case LDS_RM_FPUL_OPCODE:
+ format = " LDS R%d, FPUL\n";
+ break;
+ case STS_FPUL_RN_OPCODE:
+ format = " STS FPUL, R%d \n";
+ break;
+ case FLDS_FRM_FPUL_OPCODE:
+ format = " FLDS FR%d, FPUL\n";
+ break;
+ case FSTS_FPUL_FRN_OPCODE:
+ format = " FSTS FPUL, R%d \n";
+ break;
+ case LDSFPSCR_OPCODE:
+ format = " LDS R%d, FPSCR \n";
+ break;
+ case STSFPSCR_OPCODE:
+ format = " STS FPSCR, R%d \n";
+ break;
+ case STSMACL_OPCODE:
+ format = " STS MACL, R%d \n";
+ break;
+ case STSMACH_OPCODE:
+ format = " STS MACH, R%d \n";
+ break;
+ case BSRF_OPCODE:
+ format = " *BSRF R%d";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf0ff) {
+ case FNEG_OPCODE:
+ format = " FNEG DR%d\n";
+ break;
+ case FLOAT_OPCODE:
+ format = " FLOAT DR%d\n";
+ break;
+ case FTRC_OPCODE:
+ format = " FTRC FR%d, FPUL\n";
+ break;
+ case FSQRT_OPCODE:
+ format = " FSQRT FR%d\n";
+ break;
+ case FCNVDS_DRM_FPUL_OPCODE:
+ format = " FCNVDS FR%d, FPUL\n";
+ break;
+ case FCNVSD_FPUL_DRN_OPCODE:
+ format = " FCNVSD FPUL, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case ADD_OPCODE:
+ format = " ADD R%d, R%d\n";
+ break;
+ case ADDC_OPCODE:
+ format = " ADDC R%d, R%d\n";
+ break;
+ case ADDV_OPCODE:
+ format = " ADDV R%d, R%d\n";
+ break;
+ case AND_OPCODE:
+ format = " AND R%d, R%d\n";
+ break;
+ case DIV1_OPCODE:
+ format = " DIV1 R%d, R%d\n";
+ break;
+ case CMPEQ_OPCODE:
+ format = " CMP/EQ R%d, R%d\n";
+ break;
+ case CMPGE_OPCODE:
+ format = " CMP/GE R%d, R%d\n";
+ break;
+ case CMPGT_OPCODE:
+ format = " CMP/GT R%d, R%d\n";
+ break;
+ case CMPHI_OPCODE:
+ format = " CMP/HI R%d, R%d\n";
+ break;
+ case CMPHS_OPCODE:
+ format = " CMP/HS R%d, R%d\n";
+ break;
+ case MOV_OPCODE:
+ format = " MOV R%d, R%d\n";
+ break;
+ case MOVB_WRITE_RN_OPCODE:
+ format = " MOV.B R%d, @R%d\n";
+ break;
+ case MOVB_WRITE_RNDEC_OPCODE:
+ format = " MOV.B R%d, @-R%d\n";
+ break;
+ case MOVB_WRITE_R0RN_OPCODE:
+ format = " MOV.B R%d, @(R0, R%d)\n";
+ break;
+ case MOVB_READ_RM_OPCODE:
+ format = " MOV.B @R%d, R%d\n";
+ break;
+ case MOVB_READ_RMINC_OPCODE:
+ format = " MOV.B @R%d+, R%d\n";
+ break;
+ case MOVB_READ_R0RM_OPCODE:
+ format = " MOV.B @(R0, R%d), R%d\n";
+ break;
+ case MOVL_WRITE_RN_OPCODE:
+ format = " MOV.L R%d, @R%d\n";
+ break;
+ case MOVL_WRITE_RNDEC_OPCODE:
+ format = " MOV.L R%d, @-R%d\n";
+ break;
+ case MOVL_WRITE_R0RN_OPCODE:
+ format = " MOV.L R%d, @(R0, R%d)\n";
+ break;
+ case MOVL_READ_RM_OPCODE:
+ format = " MOV.L @R%d, R%d\n";
+ break;
+ case MOVL_READ_RMINC_OPCODE:
+ format = " MOV.L @R%d+, R%d\n";
+ break;
+ case MOVL_READ_R0RM_OPCODE:
+ format = " MOV.L @(R0, R%d), R%d\n";
+ break;
+ case MULL_OPCODE:
+ format = " MUL.L R%d, R%d\n";
+ break;
+ case DMULL_L_OPCODE:
+ format = " DMULU.L R%d, R%d\n";
+ break;
+ case DMULSL_OPCODE:
+ format = " DMULS.L R%d, R%d\n";
+ break;
+ case NEG_OPCODE:
+ format = " NEG R%d, R%d\n";
+ break;
+ case NEGC_OPCODE:
+ format = " NEGC R%d, R%d\n";
+ break;
+ case NOT_OPCODE:
+ format = " NOT R%d, R%d\n";
+ break;
+ case OR_OPCODE:
+ format = " OR R%d, R%d\n";
+ break;
+ case SHAD_OPCODE:
+ format = " SHAD R%d, R%d\n";
+ break;
+ case SHLD_OPCODE:
+ format = " SHLD R%d, R%d\n";
+ break;
+ case SUB_OPCODE:
+ format = " SUB R%d, R%d\n";
+ break;
+ case SUBC_OPCODE:
+ format = " SUBC R%d, R%d\n";
+ break;
+ case SUBV_OPCODE:
+ format = " SUBV R%d, R%d\n";
+ break;
+ case TST_OPCODE:
+ format = " TST R%d, R%d\n";
+ break;
+ case XOR_OPCODE:
+ format = " XOR R%d, R%d\n";break;
+ case MOVW_WRITE_RN_OPCODE:
+ format = " MOV.W R%d, @R%d\n";
+ break;
+ case MOVW_READ_RM_OPCODE:
+ format = " MOV.W @R%d, R%d\n";
+ break;
+ case MOVW_READ_R0RM_OPCODE:
+ format = " MOV.W @(R0, R%d), R%d\n";
+ break;
+ case EXTUB_OPCODE:
+ format = " EXTU.B R%d, R%d\n";
+ break;
+ case EXTUW_OPCODE:
+ format = " EXTU.W R%d, R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FSUB_OPCODE:
+ format = " FSUB FR%d, FR%d\n";
+ break;
+ case FADD_OPCODE:
+ format = " FADD FR%d, FR%d\n";
+ break;
+ case FDIV_OPCODE:
+ format = " FDIV FR%d, FR%d\n";
+ break;
+ case FMUL_OPCODE:
+ format = " DMULL FR%d, FR%d\n";
+ break;
+ case FMOV_OPCODE:
+ format = " FMOV FR%d, FR%d\n";
+ break;
+ case FCMPEQ_OPCODE:
+ format = " FCMP/EQ FR%d, FR%d\n";
+ break;
+ case FCMPGT_OPCODE:
+ format = " FCMP/GT FR%d, FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_WRITE_RN_DEC_OPCODE:
+ format = " %s FR%d, @-R%d\n";
+ break;
+ case FMOVS_WRITE_RN_OPCODE:
+ format = " %s FR%d, @R%d\n";
+ break;
+ case FMOVS_WRITE_R0RN_OPCODE:
+ format = " %s FR%d, @(R0, R%d)\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc));
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf00f) {
+ case FMOVS_READ_RM_OPCODE:
+ format = " %s @R%d, FR%d\n";
+ break;
+ case FMOVS_READ_RM_INC_OPCODE:
+ format = " %s @R%d+, FR%d\n";
+ break;
+ case FMOVS_READ_R0RM_OPCODE:
+ format = " %s @(R0, R%d), FR%d\n";
+ break;
+ }
+ if (format) {
+ if (isdoubleInst)
+ printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1);
+ else
+ printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case BF_OPCODE:
+ format = " BF %d\n";
+ break;
+ case BFS_OPCODE:
+ format = " *BF/S %d\n";
+ break;
+ case ANDIMM_OPCODE:
+ format = " AND #%d, R0\n";
+ break;
+ case BT_OPCODE:
+ format = " BT %d\n";
+ break;
+ case BTS_OPCODE:
+ format = " *BT/S %d\n";
+ break;
+ case CMPEQIMM_OPCODE:
+ format = " CMP/EQ #%d, R0\n";
+ break;
+ case MOVB_WRITE_OFFGBR_OPCODE:
+ format = " MOV.B R0, @(%d, GBR)\n";
+ break;
+ case MOVB_READ_OFFGBR_OPCODE:
+ format = " MOV.B @(%d, GBR), R0\n";
+ break;
+ case MOVL_WRITE_OFFGBR_OPCODE:
+ format = " MOV.L R0, @(%d, GBR)\n";
+ break;
+ case MOVL_READ_OFFGBR_OPCODE:
+ format = " MOV.L @(%d, GBR), R0\n";
+ break;
+ case MOVA_READ_OFFPC_OPCODE:
+ format = " MOVA @(%d, PC), R0\n";
+ break;
+ case ORIMM_OPCODE:
+ format = " OR #%d, R0\n";
+ break;
+ case ORBIMM_OPCODE:
+ format = " OR.B #%d, @(R0, GBR)\n";
+ break;
+ case TSTIMM_OPCODE:
+ format = " TST #%d, R0\n";
+ break;
+ case TSTB_OPCODE:
+ format = " TST.B %d, @(R0, GBR)\n";
+ break;
+ case XORIMM_OPCODE:
+ format = " XOR #%d, R0\n";
+ break;
+ case XORB_OPCODE:
+ format = " XOR.B %d, @(R0, GBR)\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc));
+ return;
+ }
+ switch (opc & 0xff00) {
+ case MOVB_WRITE_OFFRN_OPCODE:
+ format = " MOV.B R0, @(%d, R%d)\n";
+ break;
+ case MOVB_READ_OFFRM_OPCODE:
+ format = " MOV.B @(%d, R%d), R0\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getDisp(opc), getRm(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case BRA_OPCODE:
+ format = " *BRA %d\n";
+ break;
+ case BSR_OPCODE:
+ format = " *BSR %d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm12(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_READ_OFFPC_OPCODE:
+ format = " MOV.L @(%d, PC), R%d\n";
+ break;
+ case ADDIMM_OPCODE:
+ format = " ADD #%d, R%d\n";
+ break;
+ case MOVIMM_OPCODE:
+ format = " MOV #%d, R%d\n";
+ break;
+ case MOVW_READ_OFFPC_OPCODE:
+ format = " MOV.W @(%d, PC), R%d\n";
+ break;
+ }
+ if (format) {
+ printfStdoutInstr(format, getImm8(opc), getRn(opc));
+ return;
+ }
+ switch (opc & 0xf000) {
+ case MOVL_WRITE_OFFRN_OPCODE:
+ format = " MOV.L R%d, @(%d, R%d)\n";
+ printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc));
+ break;
+ case MOVL_READ_OFFRM_OPCODE:
+ format = " MOV.L @(%d, R%d), R%d\n";
+ printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc));
+ break;
+ }
+ }
+
+ static void printfStdoutInstr(const char* format, ...)
+ {
+ if (getenv("JavaScriptCoreDumpJIT")) {
+ va_list args;
+ va_start(args, format);
+ vprintfStdoutInstr(format, args);
+ va_end(args);
+ }
+ }
+
+ static void vprintfStdoutInstr(const char* format, va_list args)
+ {
+ if (getenv("JavaScriptCoreDumpJIT"))
+ WTF::dataLogFV(format, args);
+ }
+
+ static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr)
+ {
+ printfStdoutInstr(">> repatch instructions after link\n");
+ for (int i = 0; i <= nbInstr; i++)
+ printInstr(*(first + i), offset + i);
+ printfStdoutInstr(">> end repatch\n");
+ }
+#else
+ static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true) { };
+ static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr) { };
+#endif
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) {
+ *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart);
+
+ insPtr += 2; // skip MOV and ADD opcodes
+
+ if (((*insPtr) & 0xf00f) != MOV_OPCODE) {
+ *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0);
+ cacheFlush(insPtr, sizeof(SH4Word));
+ }
+ }
+
+private:
+ SH4Buffer m_buffer;
+ int m_claimscratchReg;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(SH4)
+
+#endif // SH4Assembler_h
diff --git a/src/3rdparty/masm/assembler/X86Assembler.h b/src/3rdparty/masm/assembler/X86Assembler.h
new file mode 100644
index 0000000000..092e775ab5
--- /dev/null
+++ b/src/3rdparty/masm/assembler/X86Assembler.h
@@ -0,0 +1,2540 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef X86Assembler_h
+#define X86Assembler_h
+
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <stdint.h>
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
+
+namespace X86Registers {
+ typedef enum {
+ eax,
+ ecx,
+ edx,
+ ebx,
+ esp,
+ ebp,
+ esi,
+ edi,
+
+#if CPU(X86_64)
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+#endif
+ } RegisterID;
+
+ typedef enum {
+ xmm0,
+ xmm1,
+ xmm2,
+ xmm3,
+ xmm4,
+ xmm5,
+ xmm6,
+ xmm7,
+ } XMMRegisterID;
+}
+
+class X86Assembler {
+public:
+ typedef X86Registers::RegisterID RegisterID;
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
+
+ typedef enum {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE,
+ } Condition;
+
+private:
+ typedef enum {
+ OP_ADD_EvGv = 0x01,
+ OP_ADD_GvEv = 0x03,
+ OP_OR_EvGv = 0x09,
+ OP_OR_GvEv = 0x0B,
+ OP_2BYTE_ESCAPE = 0x0F,
+ OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
+ OP_SUB_EvGv = 0x29,
+ OP_SUB_GvEv = 0x2B,
+ PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
+ OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
+ OP_CMP_EvGv = 0x39,
+ OP_CMP_GvEv = 0x3B,
+#if CPU(X86_64)
+ PRE_REX = 0x40,
+#endif
+ OP_PUSH_EAX = 0x50,
+ OP_POP_EAX = 0x58,
+#if CPU(X86_64)
+ OP_MOVSXD_GvEv = 0x63,
+#endif
+ PRE_OPERAND_SIZE = 0x66,
+ PRE_SSE_66 = 0x66,
+ OP_PUSH_Iz = 0x68,
+ OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EbIb = 0x80,
+ OP_GROUP1_EvIz = 0x81,
+ OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
+ OP_TEST_EvGv = 0x85,
+ OP_XCHG_EvGv = 0x87,
+ OP_MOV_EbGb = 0x88,
+ OP_MOV_EvGv = 0x89,
+ OP_MOV_GvEv = 0x8B,
+ OP_LEA = 0x8D,
+ OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_CDQ = 0x99,
+ OP_MOV_EAXOv = 0xA1,
+ OP_MOV_OvEAX = 0xA3,
+ OP_MOV_EAXIv = 0xB8,
+ OP_GROUP2_EvIb = 0xC1,
+ OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
+ OP_GROUP11_EvIz = 0xC7,
+ OP_INT3 = 0xCC,
+ OP_GROUP2_Ev1 = 0xD1,
+ OP_GROUP2_EvCL = 0xD3,
+ OP_ESCAPE_DD = 0xDD,
+ OP_CALL_rel32 = 0xE8,
+ OP_JMP_rel32 = 0xE9,
+ PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
+ OP_HLT = 0xF4,
+ OP_GROUP3_EbIb = 0xF6,
+ OP_GROUP3_Ev = 0xF7,
+ OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
+ OP_GROUP5_Ev = 0xFF,
+ } OneByteOpcodeID;
+
+ typedef enum {
+ OP2_MOVSD_VsdWsd = 0x10,
+ OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVSS_VsdWsd = 0x10,
+ OP2_MOVSS_WsdVsd = 0x11,
+ OP2_CVTSI2SD_VsdEd = 0x2A,
+ OP2_CVTTSD2SI_GdWsd = 0x2C,
+ OP2_UCOMISD_VsdWsd = 0x2E,
+ OP2_ADDSD_VsdWsd = 0x58,
+ OP2_MULSD_VsdWsd = 0x59,
+ OP2_CVTSD2SS_VsdWsd = 0x5A,
+ OP2_CVTSS2SD_VsdWsd = 0x5A,
+ OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_ANDNPD_VpdWpd = 0x55,
+ OP2_XORPD_VpdWpd = 0x57,
+ OP2_MOVD_VdEd = 0x6E,
+ OP2_MOVD_EdVd = 0x7E,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
+ OP2_IMUL_GvEv = 0xAF,
+ OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVSX_GvEb = 0xBE,
+ OP2_MOVZX_GvEw = 0xB7,
+ OP2_MOVSX_GvEw = 0xBF,
+ OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_PSLLQ_UdqIb = 0x73,
+ OP2_PSRLQ_UdqIb = 0x73,
+ OP2_POR_VdqWdq = 0XEB,
+ } TwoByteOpcodeID;
+
+ TwoByteOpcodeID jccRel32(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ }
+
+ TwoByteOpcodeID setccOpcode(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP_SETCC + cond);
+ }
+
+ typedef enum {
+ GROUP1_OP_ADD = 0,
+ GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
+ GROUP1_OP_AND = 4,
+ GROUP1_OP_SUB = 5,
+ GROUP1_OP_XOR = 6,
+ GROUP1_OP_CMP = 7,
+
+ GROUP1A_OP_POP = 0,
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_RCL = 2,
+ GROUP2_OP_RCR = 3,
+
+ GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
+ GROUP2_OP_SAR = 7,
+
+ GROUP3_OP_TEST = 0,
+ GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
+ GROUP3_OP_IDIV = 7,
+
+ GROUP5_OP_CALLN = 2,
+ GROUP5_OP_JMPN = 4,
+ GROUP5_OP_PUSH = 6,
+
+ GROUP11_MOV = 0,
+
+ GROUP14_OP_PSLLQ = 6,
+ GROUP14_OP_PSRLQ = 2,
+
+ ESCAPE_DD_FSTP_doubleReal = 3,
+ } GroupOpcodeID;
+
+ class X86InstructionFormatter;
+public:
+
+ X86Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ // Stack operations:
+
+ void push_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_PUSH_EAX, reg);
+ }
+
+ void pop_r(RegisterID reg)
+ {
+ m_formatter.oneByteOp(OP_POP_EAX, reg);
+ }
+
+ void push_i32(int imm)
+ {
+ m_formatter.oneByteOp(OP_PUSH_Iz);
+ m_formatter.immediate32(imm);
+ }
+
+ void push_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
+ }
+
+ void pop_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
+ }
+
+ // Arithmetic operations:
+
+#if !CPU(X86_64)
+ void adcl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void addl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
+ }
+
+ void addl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addl_mr(const void* addr, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+ }
+#endif
+
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
+ void addl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void addq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
+ }
+
+ void addq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+ }
+
+ void addq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void addl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void andl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
+ }
+
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
+ void andl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void andq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
+ }
+
+ void andq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void andl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+#if CPU(X86_64)
+ void negq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+#endif
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
+ void notl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
+ }
+
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
+ void orl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
+ }
+
+ void orl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
+ }
+
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
+ void orl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void orq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
+ }
+
+ void orq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void orl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void orl_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+ }
+#endif
+
+ void subl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
+ }
+
+ void subl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
+ }
+
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
+ void subl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void subl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void subq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
+ }
+
+ void subq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void subl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void xorl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
+ void xorq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
+ }
+
+ void xorq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void xorq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void rorq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+#endif
+
+ void sarl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void sarl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void shrl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+ }
+
+ void shll_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shll_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
+ }
+
+#if CPU(X86_64)
+ void sarq_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
+ }
+
+ void sarq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif
+
+ void imull_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
+ }
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
+ void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
+ m_formatter.immediate32(value);
+ }
+
+ void idivl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
+ }
+
+ // Comparisons:
+
+ void cmpl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpl_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpl_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_ir_force32(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void cmpl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void cmpb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpl_im_force32(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void cmpq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
+ }
+
+ void cmpq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
+ }
+
+ void cmpq_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+ }
+
+ void cmpq_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+#else
+ void cmpl_rm(RegisterID reg, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
+ }
+
+ void cmpl_im(int imm, const void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
+ void cmpw_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
+ void testl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testb_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void testb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if CPU(X86_64)
+ void testq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
+ }
+
+ void testq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
+ }
+
+ void testq_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
+ void testb_i8r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void setCC_r(Condition cond, RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+ }
+
+ void sete_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
+ }
+
+ void setz_r(RegisterID dst)
+ {
+ sete_r(dst);
+ }
+
+ void setne_r(RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
+ }
+
+ void setnz_r(RegisterID dst)
+ {
+ setne_r(dst);
+ }
+
+ // Various move ops:
+
+ void cdq()
+ {
+ m_formatter.oneByteOp(OP_CDQ);
+ }
+
+ void fstpl(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+ }
+
+ void xchgl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ }
+
+#if CPU(X86_64)
+ void xchgq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ }
+#endif
+
+ void movl_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXOv);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movl_i32r(int imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if !CPU(X86_64)
+ void movb_i8m(int imm, const void* addr)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void movb_i8m(int imm, int offset, RegisterID base)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
+ }
+
+ void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_OvEAX);
+#if CPU(X86_64)
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+#else
+ m_formatter.immediate32(reinterpret_cast<int>(addr));
+#endif
+ }
+
+#if CPU(X86_64)
+ void movq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
+ }
+
+ void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movq_mEAX(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXOv);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_EAXm(const void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
+ void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
+ }
+
+ void movq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
+ void movq_i64r(int64_t imm, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
+ m_formatter.immediate64(imm);
+ }
+
+ void movsxd_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
+ }
+
+
+#else
+ void movl_rm(RegisterID src, const void* addr)
+ {
+ if (src == X86Registers::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
+ void movl_mr(const void* addr, RegisterID dst)
+ {
+ if (dst == X86Registers::eax)
+ movl_mEAX(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
+ }
+
+ void movl_i32m(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
+ m_formatter.immediate32(imm);
+ }
+#endif
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
+ }
+
+ void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
+ }
+
+ void movzbl_rr(RegisterID src, RegisterID dst)
+ {
+ // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
+ // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
+ // REX prefixes are defined to be silently ignored by the processor.
+ m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
+ }
+
+ void leal_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_LEA, dst, base, offset);
+ }
+#if CPU(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
+
+ // Flow control:
+
+ AssemblerLabel call()
+ {
+ m_formatter.oneByteOp(OP_CALL_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel call(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
+ return m_formatter.label();
+ }
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
+
+ AssemblerLabel jmp()
+ {
+ m_formatter.oneByteOp(OP_JMP_rel32);
+ return m_formatter.immediateRel32();
+ }
+
+ // Return a AssemblerLabel so we have a label to the jump, so we can use this
+ // To make a tail recursive call on x86-64. The MacroAssembler
+ // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+ AssemblerLabel jmp_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ return m_formatter.label();
+ }
+
+ void jmp_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void jmp_m(const void* address)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
+ }
+#endif
+
+ AssemblerLabel jne()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnz()
+ {
+ return jne();
+ }
+
+ AssemblerLabel je()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jz()
+ {
+ return je();
+ }
+
+ AssemblerLabel jl()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionL));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jb()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionB));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jle()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionLE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jbe()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionBE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jge()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionGE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jg()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionG));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel ja()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionA));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jae()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionAE));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jo()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionO));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jnp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel js()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionS));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
+ return m_formatter.immediateRel32();
+ }
+
+ // SSE operations:
+
+ void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void addsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
+ void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
+ }
+
+#if CPU(X86_64)
+ void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+#endif
+
+ void movd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+
+#if CPU(X86_64)
+ void movq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
+ }
+
+ void movq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+#endif
+
+ void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
+ }
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+ void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+ void movsd_rm(XMMRegisterID src, const void* address)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+ }
+#endif
+
+ void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
+ m_formatter.immediate8(whichWord);
+ }
+
+ void psllq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void psrlq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void por_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ // Misc instructions:
+
+ void int3()
+ {
+ m_formatter.oneByteOp(OP_INT3);
+ }
+
+ void ret()
+ {
+ m_formatter.oneByteOp(OP_RET);
+ }
+
+ void predictNotTaken()
+ {
+ m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_formatter.codeSize();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ m_formatter.oneByteOp(OP_HLT);
+
+ return label();
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
+ setRel32(code + from.m_offset, code + to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ ASSERT(where.isSet());
+
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= std::numeric_limits<int8_t>::min());
+ ASSERT(value <= std::numeric_limits<int8_t>::max());
+ setInt8(where, value);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ setInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ setPointer(where, value);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void**>(where)[-1];
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
+ intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
+ ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
+ *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return 5;
+ }
+
+#if CPU(X86_64)
+ static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
+ {
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint64_t asWord;
+ uint8_t asBytes[8];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+#endif
+
+ static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
+ {
+ ASSERT_UNUSED(offset, !offset);
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ break;
+ case OP_LEA:
+ *ptr = OP_MOV_GvEv;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ *ptr = OP_LEA;
+ break;
+ case OP_LEA:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_formatter.executableCopy(globalData, ownerUID, effort);
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+ void nop()
+ {
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ // This is a no-op on x86
+ ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
+
+private:
+
+ static void setPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void setInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void setInt8(void* where, int8_t value)
+ {
+ reinterpret_cast<int8_t*>(where)[-1] = value;
+ }
+
+ static void setRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ setInt32(from, offset);
+ }
+
+ class X86InstructionFormatter {
+
+ static const int maxInstructionSize = 16;
+
+ public:
+
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
+ // Legacy prefix bytes:
+ //
+ // These are emmitted prior to the instruction.
+
+ void prefix(OneByteOpcodeID pre)
+ {
+ m_buffer.putByte(pre);
+ }
+
+ // Word-sized operands / no operand instruction formatters.
+ //
+ // In addition to the opcode, the following operand permutations are supported:
+ // * None - instruction takes no operands.
+ // * One register - the low three bits of the RegisterID are added into the opcode.
+ // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
+ // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
+ // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
+ //
+ // For 32-bit x86 targets, the address operand may also be provided as a void*.
+ // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
+ //
+ // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
+
+ void oneByteOp(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+ void twoByteOp(TwoByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, index, base);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+#if CPU(X86_64)
+ // Quad-word-sized operands:
+ //
+ // Used to format 64-bit operantions, planting a REX.w prefix.
+ // When planting d64 or f64 instructions, not requiring a REX.w prefix,
+ // the normal (non-'64'-postfixed) formatters should be used.
+
+ void oneByteOp64(OneByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, 0);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(0, 0, reg);
+ m_buffer.putByteUnchecked(opcode + (reg & 7));
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp32(reg, base, offset);
+ }
+
+ void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
+
+ void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+#endif
+
+ // Byte-operands:
+ //
+ // These methods format byte operations. Byte operations differ from the normal
+ // formatters in the circumstances under which they will decide to emit REX prefixes.
+ // These should be used where any register operand signifies a byte register.
+ //
+ // The disctinction is due to the handling of register numbers in the range 4..7 on
+ // x86-64. These register numbers may either represent the second byte of the first
+ // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
+ //
+ // Since ah..bh cannot be used in all permutations of operands (specifically cannot
+ // be accessed where a REX prefix is present), these are likely best treated as
+ // deprecated. In order to ensure the correct registers spl..dil are selected a
+ // REX prefix will be emitted for any byte register operand in the range 4..15.
+ //
+ // These formatters may be used in instructions where a mix of operand sizes, in which
+ // case an unnecessary REX will be emitted, for example:
+ // movzbl %al, %edi
+ // In this case a REX will be planted since edi is 7 (and were this a byte operand
+ // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
+ // be silently ignored by the processor.
+ //
+ // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
+ // is provided to check byte register operands.
+
+ void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(groupOp, rm);
+ }
+
+ // Immediates:
+ //
+ // An immedaite should be appended where appropriate after an op has been emitted.
+ // The writes are unchecked since the opcode formatters above will have ensured space.
+
+ void immediate8(int imm)
+ {
+ m_buffer.putByteUnchecked(imm);
+ }
+
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
+ void immediate32(int imm)
+ {
+ m_buffer.putIntUnchecked(imm);
+ }
+
+ void immediate64(int64_t imm)
+ {
+ m_buffer.putInt64Unchecked(imm);
+ }
+
+ AssemblerLabel immediateRel32()
+ {
+ m_buffer.putIntUnchecked(0);
+ return label();
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ return m_buffer.executableCopy(globalData, ownerUID, effort);
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+
+ // Internals; ModRm and REX formatters.
+
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if CPU(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
+
+ // Registers r8 & above require a REX prefixe.
+ inline bool regRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::r8);
+ }
+
+ // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
+ inline bool byteRegRequiresRex(int reg)
+ {
+ return (reg >= X86Registers::esp);
+ }
+
+ // Format a REX prefix byte.
+ inline void emitRex(bool w, int r, int x, int b)
+ {
+ ASSERT(r >= 0);
+ ASSERT(x >= 0);
+ ASSERT(b >= 0);
+ m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
+ }
+
+ // Used to plant a REX byte with REX.w set (for 64-bit operations).
+ inline void emitRexW(int r, int x, int b)
+ {
+ emitRex(true, r, x, b);
+ }
+
+ // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
+ // regRequiresRex() to check other registers (i.e. address base & index).
+ inline void emitRexIf(bool condition, int r, int x, int b)
+ {
+ if (condition) emitRex(false, r, x, b);
+ }
+
+ // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
+ inline void emitRexIfNeeded(int r, int x, int b)
+ {
+ emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
+ }
+#else
+ // No REX prefix bytes on 32-bit x86.
+ inline bool regRequiresRex(int) { return false; }
+ inline bool byteRegRequiresRex(int) { return false; }
+ inline void emitRexIf(bool, int, int, int) {}
+ inline void emitRexIfNeeded(int, int, int) {}
+#endif
+
+ void putModRm(ModRmMode mode, int reg, RegisterID rm)
+ {
+ m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
+ }
+
+ void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(mode != ModRmRegister);
+
+ putModRm(mode, reg, hasSib);
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
+ }
+
+ void registerModRM(int reg, RegisterID rm)
+ {
+ putModRm(ModRmRegister, reg, rm);
+ }
+
+ void memoryModRM(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
+ putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ }
+ } else {
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRm(ModRmMemoryNoDisp, reg, base);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+ }
+
+ void memoryModRM_disp8(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+ ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ }
+ }
+
+ void memoryModRM_disp32(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
+ m_buffer.putIntUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp32, reg, base);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+ void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ ASSERT(index != noIndex);
+
+#if CPU(X86_64)
+ if (!offset && (base != noBase) && (base != noBase2))
+#else
+ if (!offset && (base != noBase))
+#endif
+ putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
+ else if (CAN_SIGN_EXTEND_8_32(offset)) {
+ putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
+ m_buffer.putIntUnchecked(offset);
+ }
+ }
+
+#if !CPU(X86_64)
+ void memoryModRM(int reg, const void* address)
+ {
+ // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
+ putModRm(ModRmMemoryNoDisp, reg, noBase);
+ m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
+ }
+#endif
+
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
+
+#endif // X86Assembler_h
diff --git a/src/3rdparty/masm/config.h b/src/3rdparty/masm/config.h
new file mode 100644
index 0000000000..5f59f311e3
--- /dev/null
+++ b/src/3rdparty/masm/config.h
@@ -0,0 +1,56 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef MASM_CONFIG_H
+#define MASM_CONFIG_H
+
+#include <wtf/Platform.h>
+#ifdef __cplusplus
+#include <wtf/Vector.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/RefPtr.h>
+#include <cmath>
+#else
+#include <math.h>
+#endif
+#include <limits.h>
+
+#endif // MASM_CONFIG_H
diff --git a/src/3rdparty/masm/create_regex_tables b/src/3rdparty/masm/create_regex_tables
new file mode 100644
index 0000000000..7544b75cd9
--- /dev/null
+++ b/src/3rdparty/masm/create_regex_tables
@@ -0,0 +1,121 @@
+# Copyright (C) 2010, 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+types = {
+ "wordchar": { "UseTable" : True, "data": ['_', ('0','9'), ('A', 'Z'), ('a','z')]},
+ "nonwordchar": { "UseTable" : True, "Inverse": "wordchar", "data": ['`', (0, ord('0') - 1), (ord('9') + 1, ord('A') - 1), (ord('Z') + 1, ord('_') - 1), (ord('z') + 1, 0xffff)]},
+ "newline": { "UseTable" : False, "data": ['\n', '\r', 0x2028, 0x2029]},
+ "spaces": { "UseTable" : True, "data": [' ', ('\t', '\r'), 0xa0, 0x1680, 0x180e, 0x2028, 0x2029, 0x202f, 0x205f, 0x3000, (0x2000, 0x200a), 0xfeff]},
+ "nonspaces": { "UseTable" : True, "Inverse": "spaces", "data": [(0, ord('\t') - 1), (ord('\r') + 1, ord(' ') - 1), (ord(' ') + 1, 0x009f), (0x00a1, 0x167f), (0x1681, 0x180d), (0x180f, 0x1fff), (0x200b, 0x2027), (0x202a, 0x202e), (0x2030, 0x205e), (0x2060, 0x2fff), (0x3001, 0xfefe), (0xff00, 0xffff)]},
+ "digits": { "UseTable" : False, "data": [('0', '9')]},
+ "nondigits": { "UseTable" : False, "Inverse": "digits", "data": [(0, ord('0') - 1), (ord('9') + 1, 0xffff)] }
+}
+entriesPerLine = 50
+arrays = "";
+functions = "";
+emitTables = (len(sys.argv) < 2 or sys.argv[1] != "--no-tables")
+
+for name, classes in types.items():
+ ranges = [];
+ size = 0;
+ for _class in classes["data"]:
+ if type(_class) == str:
+ ranges.append((ord(_class), ord(_class)))
+ elif type(_class) == int:
+ ranges.append((_class, _class))
+ else:
+ (min, max) = _class;
+ if type(min) == str:
+ min = ord(min)
+ if type(max) == str:
+ max = ord(max)
+ if max > 0x7f and min <= 0x7f:
+ ranges.append((min, 0x7f))
+ min = 0x80
+ ranges.append((min,max))
+ ranges.sort();
+
+ if emitTables and classes["UseTable"] and (not "Inverse" in classes):
+ array = ("static const char _%sData[65536] = {\n" % name);
+ i = 0
+ for (min,max) in ranges:
+ while i < min:
+ i = i + 1
+ array += ('0,')
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ while i <= max:
+ i = i + 1
+ if (i == 65536):
+ array += ("1")
+ else:
+ array += ('1,')
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ while i < 0xffff:
+ array += ("0,")
+ i = i + 1;
+ if (i % entriesPerLine == 0) and (i != 0):
+ array += ('\n')
+ if i == 0xffff:
+ array += ("0")
+ array += ("\n};\n\n");
+ arrays += array
+
+ # Generate createFunction:
+ function = "";
+ function += ("CharacterClass* %sCreate()\n" % name)
+ function += ("{\n")
+ if emitTables and classes["UseTable"]:
+ if "Inverse" in classes:
+ function += (" CharacterClass* characterClass = new CharacterClass(_%sData, true);\n" % (classes["Inverse"]))
+ else:
+ function += (" CharacterClass* characterClass = new CharacterClass(_%sData, false);\n" % (name))
+ else:
+ function += (" CharacterClass* characterClass = new CharacterClass;\n")
+ for (min, max) in ranges:
+ if (min == max):
+ if (min > 127):
+ function += (" characterClass->m_matchesUnicode.append(0x%04x);\n" % min)
+ else:
+ function += (" characterClass->m_matches.append(0x%02x);\n" % min)
+ continue
+ if (min > 127) or (max > 127):
+ function += (" characterClass->m_rangesUnicode.append(CharacterRange(0x%04x, 0x%04x));\n" % (min, max))
+ else:
+ function += (" characterClass->m_ranges.append(CharacterRange(0x%02x, 0x%02x));\n" % (min, max))
+ function += (" return characterClass;\n")
+ function += ("}\n\n")
+ functions += function
+
+if (len(sys.argv) > 1):
+ f = open(sys.argv[-1], "w")
+ f.write(arrays)
+ f.write(functions)
+ f.close()
+else:
+ print(arrays)
+ print(functions)
+
diff --git a/src/3rdparty/masm/disassembler/Disassembler.cpp b/src/3rdparty/masm/disassembler/Disassembler.cpp
new file mode 100644
index 0000000000..3fed2cdab8
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/Disassembler.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Disassembler.h"
+
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/DataLog.h>
+
+namespace JSC {
+
+void disassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out)
+{
+ if (tryToDisassemble(codePtr, size, prefix, out))
+ return;
+
+ out.printf("%sdisassembly not available for range %p...%p\n", prefix, codePtr.executableAddress(), static_cast<char*>(codePtr.executableAddress()) + size);
+}
+
+} // namespace JSC
+
diff --git a/src/3rdparty/masm/disassembler/Disassembler.h b/src/3rdparty/masm/disassembler/Disassembler.h
new file mode 100644
index 0000000000..a087a657b3
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/Disassembler.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Disassembler_h
+#define Disassembler_h
+
+#include <wtf/Platform.h>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class MacroAssemblerCodePtr;
+
+#if ENABLE(DISASSEMBLER)
+bool tryToDisassemble(const MacroAssemblerCodePtr&, size_t, const char* prefix, PrintStream&);
+#else
+inline bool tryToDisassemble(const MacroAssemblerCodePtr&, size_t, const char*, PrintStream&)
+{
+ return false;
+}
+#endif
+
+// Prints either the disassembly, or a line of text indicating that disassembly failed and
+// the range of machine code addresses.
+void disassemble(const MacroAssemblerCodePtr&, size_t, const char* prefix, PrintStream& out);
+
+} // namespace JSC
+
+#endif // Disassembler_h
+
diff --git a/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp b/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp
new file mode 100644
index 0000000000..63c235b920
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/UDis86Disassembler.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Disassembler.h"
+
+#if USE(UDIS86)
+
+#include "MacroAssemblerCodeRef.h"
+#include "udis86.h"
+
+namespace JSC {
+
+bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out)
+{
+ ud_t disassembler;
+ ud_init(&disassembler);
+ ud_set_input_buffer(&disassembler, static_cast<unsigned char*>(codePtr.executableAddress()), size);
+#if CPU(X86_64)
+ ud_set_mode(&disassembler, 64);
+#else
+ ud_set_mode(&disassembler, 32);
+#endif
+ ud_set_pc(&disassembler, bitwise_cast<uintptr_t>(codePtr.executableAddress()));
+ ud_set_syntax(&disassembler, UD_SYN_ATT);
+
+ uint64_t currentPC = disassembler.pc;
+ while (ud_disassemble(&disassembler)) {
+ char pcString[20];
+ snprintf(pcString, sizeof(pcString), "0x%lx", static_cast<unsigned long>(currentPC));
+ out.printf("%s%16s: %s\n", prefix, pcString, ud_insn_asm(&disassembler));
+ currentPC = disassembler.pc;
+ }
+
+ return true;
+}
+
+} // namespace JSC
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/differences.txt b/src/3rdparty/masm/disassembler/udis86/differences.txt
new file mode 100644
index 0000000000..dc225b6ffe
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/differences.txt
@@ -0,0 +1,24 @@
+This documents the differences between the stock version of udis86 and the one found
+here:
+
+- All files not named "udis86" were prefixed with "udis86".
+
+- assert() has been changed to ASSERT()
+
+- Mass rename of udis86_input.h inp_ prefixed functions and macros to ud_inp_ to
+ avoid namespace pollution.
+
+- Removal of KERNEL checks.
+
+- Added #include of udis86_extern.h in udis86_decode.c.
+
+- Removed s_ie__pause and s_ie__nop from udis86_decode.c, since they weren't used.
+
+- Made udis86_syn.h use WTF_ATTRIBUTE_PRINTF. This required making a bunch of little
+ fixes to make the compiler's format string warnings go away.
+
+- Made the code in udis86_syn.h use vsnprintf() instead of vsprintf().
+
+- Fixed udis86_syn-att.c's jump destination printing to work correctly in 64-bit mode.
+
+- Add --outputDir option to itab.py.
diff --git a/src/3rdparty/masm/disassembler/udis86/itab.py b/src/3rdparty/masm/disassembler/udis86/itab.py
new file mode 100644
index 0000000000..07e20a6e10
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/itab.py
@@ -0,0 +1,360 @@
+# udis86 - scripts/itab.py
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import OptionParser
+import os
+import sys
+
+sys.path.append( '../scripts' );
+
+import ud_optable
+import ud_opcode
+
+class UdItabGenerator( ud_opcode.UdOpcodeTables ):
+
+ OperandDict = {
+ "Ap" : [ "OP_A" , "SZ_P" ],
+ "E" : [ "OP_E" , "SZ_NA" ],
+ "Eb" : [ "OP_E" , "SZ_B" ],
+ "Ew" : [ "OP_E" , "SZ_W" ],
+ "Ev" : [ "OP_E" , "SZ_V" ],
+ "Ed" : [ "OP_E" , "SZ_D" ],
+ "Eq" : [ "OP_E" , "SZ_Q" ],
+ "Ez" : [ "OP_E" , "SZ_Z" ],
+ "Ex" : [ "OP_E" , "SZ_MDQ" ],
+ "Ep" : [ "OP_E" , "SZ_P" ],
+ "G" : [ "OP_G" , "SZ_NA" ],
+ "Gb" : [ "OP_G" , "SZ_B" ],
+ "Gw" : [ "OP_G" , "SZ_W" ],
+ "Gv" : [ "OP_G" , "SZ_V" ],
+ "Gy" : [ "OP_G" , "SZ_MDQ" ],
+ "Gy" : [ "OP_G" , "SZ_MDQ" ],
+ "Gd" : [ "OP_G" , "SZ_D" ],
+ "Gq" : [ "OP_G" , "SZ_Q" ],
+ "Gx" : [ "OP_G" , "SZ_MDQ" ],
+ "Gz" : [ "OP_G" , "SZ_Z" ],
+ "M" : [ "OP_M" , "SZ_NA" ],
+ "Mb" : [ "OP_M" , "SZ_B" ],
+ "Mw" : [ "OP_M" , "SZ_W" ],
+ "Ms" : [ "OP_M" , "SZ_W" ],
+ "Md" : [ "OP_M" , "SZ_D" ],
+ "Mq" : [ "OP_M" , "SZ_Q" ],
+ "Mt" : [ "OP_M" , "SZ_T" ],
+ "Mo" : [ "OP_M" , "SZ_O" ],
+ "MwRv" : [ "OP_MR" , "SZ_WV" ],
+ "MdRy" : [ "OP_MR" , "SZ_DY" ],
+ "MbRv" : [ "OP_MR" , "SZ_BV" ],
+ "I1" : [ "OP_I1" , "SZ_NA" ],
+ "I3" : [ "OP_I3" , "SZ_NA" ],
+ "Ib" : [ "OP_I" , "SZ_B" ],
+ "Isb" : [ "OP_I" , "SZ_SB" ],
+ "Iw" : [ "OP_I" , "SZ_W" ],
+ "Iv" : [ "OP_I" , "SZ_V" ],
+ "Iz" : [ "OP_I" , "SZ_Z" ],
+ "Jv" : [ "OP_J" , "SZ_V" ],
+ "Jz" : [ "OP_J" , "SZ_Z" ],
+ "Jb" : [ "OP_J" , "SZ_B" ],
+ "R" : [ "OP_R" , "SZ_RDQ" ],
+ "C" : [ "OP_C" , "SZ_NA" ],
+ "D" : [ "OP_D" , "SZ_NA" ],
+ "S" : [ "OP_S" , "SZ_NA" ],
+ "Ob" : [ "OP_O" , "SZ_B" ],
+ "Ow" : [ "OP_O" , "SZ_W" ],
+ "Ov" : [ "OP_O" , "SZ_V" ],
+ "V" : [ "OP_V" , "SZ_O" ],
+ "W" : [ "OP_W" , "SZ_O" ],
+ "Wsd" : [ "OP_W" , "SZ_O" ],
+ "Wss" : [ "OP_W" , "SZ_O" ],
+ "P" : [ "OP_P" , "SZ_Q" ],
+ "Q" : [ "OP_Q" , "SZ_Q" ],
+ "VR" : [ "OP_VR" , "SZ_O" ],
+ "PR" : [ "OP_PR" , "SZ_Q" ],
+ "AL" : [ "OP_AL" , "SZ_NA" ],
+ "CL" : [ "OP_CL" , "SZ_NA" ],
+ "DL" : [ "OP_DL" , "SZ_NA" ],
+ "BL" : [ "OP_BL" , "SZ_NA" ],
+ "AH" : [ "OP_AH" , "SZ_NA" ],
+ "CH" : [ "OP_CH" , "SZ_NA" ],
+ "DH" : [ "OP_DH" , "SZ_NA" ],
+ "BH" : [ "OP_BH" , "SZ_NA" ],
+ "AX" : [ "OP_AX" , "SZ_NA" ],
+ "CX" : [ "OP_CX" , "SZ_NA" ],
+ "DX" : [ "OP_DX" , "SZ_NA" ],
+ "BX" : [ "OP_BX" , "SZ_NA" ],
+ "SI" : [ "OP_SI" , "SZ_NA" ],
+ "DI" : [ "OP_DI" , "SZ_NA" ],
+ "SP" : [ "OP_SP" , "SZ_NA" ],
+ "BP" : [ "OP_BP" , "SZ_NA" ],
+ "eAX" : [ "OP_eAX" , "SZ_NA" ],
+ "eCX" : [ "OP_eCX" , "SZ_NA" ],
+ "eDX" : [ "OP_eDX" , "SZ_NA" ],
+ "eBX" : [ "OP_eBX" , "SZ_NA" ],
+ "eSI" : [ "OP_eSI" , "SZ_NA" ],
+ "eDI" : [ "OP_eDI" , "SZ_NA" ],
+ "eSP" : [ "OP_eSP" , "SZ_NA" ],
+ "eBP" : [ "OP_eBP" , "SZ_NA" ],
+ "rAX" : [ "OP_rAX" , "SZ_NA" ],
+ "rCX" : [ "OP_rCX" , "SZ_NA" ],
+ "rBX" : [ "OP_rBX" , "SZ_NA" ],
+ "rDX" : [ "OP_rDX" , "SZ_NA" ],
+ "rSI" : [ "OP_rSI" , "SZ_NA" ],
+ "rDI" : [ "OP_rDI" , "SZ_NA" ],
+ "rSP" : [ "OP_rSP" , "SZ_NA" ],
+ "rBP" : [ "OP_rBP" , "SZ_NA" ],
+ "ES" : [ "OP_ES" , "SZ_NA" ],
+ "CS" : [ "OP_CS" , "SZ_NA" ],
+ "DS" : [ "OP_DS" , "SZ_NA" ],
+ "SS" : [ "OP_SS" , "SZ_NA" ],
+ "GS" : [ "OP_GS" , "SZ_NA" ],
+ "FS" : [ "OP_FS" , "SZ_NA" ],
+ "ST0" : [ "OP_ST0" , "SZ_NA" ],
+ "ST1" : [ "OP_ST1" , "SZ_NA" ],
+ "ST2" : [ "OP_ST2" , "SZ_NA" ],
+ "ST3" : [ "OP_ST3" , "SZ_NA" ],
+ "ST4" : [ "OP_ST4" , "SZ_NA" ],
+ "ST5" : [ "OP_ST5" , "SZ_NA" ],
+ "ST6" : [ "OP_ST6" , "SZ_NA" ],
+ "ST7" : [ "OP_ST7" , "SZ_NA" ],
+ "NONE" : [ "OP_NONE" , "SZ_NA" ],
+ "ALr8b" : [ "OP_ALr8b" , "SZ_NA" ],
+ "CLr9b" : [ "OP_CLr9b" , "SZ_NA" ],
+ "DLr10b" : [ "OP_DLr10b" , "SZ_NA" ],
+ "BLr11b" : [ "OP_BLr11b" , "SZ_NA" ],
+ "AHr12b" : [ "OP_AHr12b" , "SZ_NA" ],
+ "CHr13b" : [ "OP_CHr13b" , "SZ_NA" ],
+ "DHr14b" : [ "OP_DHr14b" , "SZ_NA" ],
+ "BHr15b" : [ "OP_BHr15b" , "SZ_NA" ],
+ "rAXr8" : [ "OP_rAXr8" , "SZ_NA" ],
+ "rCXr9" : [ "OP_rCXr9" , "SZ_NA" ],
+ "rDXr10" : [ "OP_rDXr10" , "SZ_NA" ],
+ "rBXr11" : [ "OP_rBXr11" , "SZ_NA" ],
+ "rSPr12" : [ "OP_rSPr12" , "SZ_NA" ],
+ "rBPr13" : [ "OP_rBPr13" , "SZ_NA" ],
+ "rSIr14" : [ "OP_rSIr14" , "SZ_NA" ],
+ "rDIr15" : [ "OP_rDIr15" , "SZ_NA" ],
+ "jWP" : [ "OP_J" , "SZ_WP" ],
+ "jDP" : [ "OP_J" , "SZ_DP" ],
+
+ }
+
+ #
+ # opcode prefix dictionary
+ #
+ PrefixDict = {
+ "aso" : "P_aso",
+ "oso" : "P_oso",
+ "rexw" : "P_rexw",
+ "rexb" : "P_rexb",
+ "rexx" : "P_rexx",
+ "rexr" : "P_rexr",
+ "seg" : "P_seg",
+ "inv64" : "P_inv64",
+ "def64" : "P_def64",
+ "depM" : "P_depM",
+ "cast1" : "P_c1",
+ "cast2" : "P_c2",
+ "cast3" : "P_c3",
+ "cast" : "P_cast",
+ "sext" : "P_sext"
+ }
+
+ InvalidEntryIdx = 0
+ InvalidEntry = { 'type' : 'invalid',
+ 'mnemonic' : 'invalid',
+ 'operands' : '',
+ 'prefixes' : '',
+ 'meta' : '' }
+
+ Itab = [] # instruction table
+ ItabIdx = 1 # instruction table index
+ GtabIdx = 0 # group table index
+ GtabMeta = []
+
+ ItabLookup = {}
+
+ MnemonicAliases = ( "invalid", "3dnow", "none", "db", "pause" )
+
+ def __init__( self, outputDir ):
+ # first itab entry (0) is Invalid
+ self.Itab.append( self.InvalidEntry )
+ self.MnemonicsTable.extend( self.MnemonicAliases )
+ self.outputDir = outputDir
+
+ def toGroupId( self, id ):
+ return 0x8000 | id
+
+ def genLookupTable( self, table, scope = '' ):
+ idxArray = [ ]
+ ( tabIdx, self.GtabIdx ) = ( self.GtabIdx, self.GtabIdx + 1 )
+ self.GtabMeta.append( { 'type' : table[ 'type' ], 'meta' : table[ 'meta' ] } )
+
+ for _idx in range( self.sizeOfTable( table[ 'type' ] ) ):
+ idx = "%02x" % _idx
+
+ e = self.InvalidEntry
+ i = self.InvalidEntryIdx
+
+ if idx in table[ 'entries' ].keys():
+ e = table[ 'entries' ][ idx ]
+
+ # leaf node (insn)
+ if e[ 'type' ] == 'insn':
+ ( i, self.ItabIdx ) = ( self.ItabIdx, self.ItabIdx + 1 )
+ self.Itab.append( e )
+ elif e[ 'type' ] != 'invalid':
+ i = self.genLookupTable( e, 'static' )
+
+ idxArray.append( i )
+
+ name = "ud_itab__%s" % tabIdx
+ self.ItabLookup[ tabIdx ] = name
+
+ self.ItabC.write( "\n" );
+ if len( scope ):
+ self.ItabC.write( scope + ' ' )
+ self.ItabC.write( "const uint16_t %s[] = {\n" % name )
+ for i in range( len( idxArray ) ):
+ if i > 0 and i % 4 == 0:
+ self.ItabC.write( "\n" )
+ if ( i%4 == 0 ):
+ self.ItabC.write( " /* %2x */" % i)
+ if idxArray[ i ] >= 0x8000:
+ self.ItabC.write( "%12s," % ("GROUP(%d)" % ( ~0x8000 & idxArray[ i ] )))
+ else:
+ self.ItabC.write( "%12d," % ( idxArray[ i ] ))
+ self.ItabC.write( "\n" )
+ self.ItabC.write( "};\n" )
+
+ return self.toGroupId( tabIdx )
+
+ def genLookupTableList( self ):
+ self.ItabC.write( "\n\n" );
+ self.ItabC.write( "struct ud_lookup_table_list_entry ud_lookup_table_list[] = {\n" )
+ for i in range( len( self.GtabMeta ) ):
+ f0 = self.ItabLookup[ i ] + ","
+ f1 = ( self.nameOfTable( self.GtabMeta[ i ][ 'type' ] ) ) + ","
+ f2 = "\"%s\"" % self.GtabMeta[ i ][ 'meta' ]
+ self.ItabC.write( " /* %03d */ { %s %s %s },\n" % ( i, f0, f1, f2 ) )
+ self.ItabC.write( "};" )
+
+ def genInsnTable( self ):
+ self.ItabC.write( "struct ud_itab_entry ud_itab[] = {\n" );
+ idx = 0
+ for e in self.Itab:
+ opr_c = [ "O_NONE", "O_NONE", "O_NONE" ]
+ pfx_c = []
+ opr = e[ 'operands' ]
+ for i in range(len(opr)):
+ if not (opr[i] in self.OperandDict.keys()):
+ print "error: invalid operand declaration: %s\n" % opr[i]
+ opr_c[i] = "O_" + opr[i]
+ opr = "%s %s %s" % (opr_c[0] + ",", opr_c[1] + ",", opr_c[2])
+
+ for p in e['prefixes']:
+ if not ( p in self.PrefixDict.keys() ):
+ print "error: invalid prefix specification: %s \n" % pfx
+ pfx_c.append( self.PrefixDict[p] )
+ if len(e['prefixes']) == 0:
+ pfx_c.append( "P_none" )
+ pfx = "|".join( pfx_c )
+
+ self.ItabC.write( " /* %04d */ { UD_I%s %s, %s },\n" \
+ % ( idx, e[ 'mnemonic' ] + ',', opr, pfx ) )
+ idx += 1
+ self.ItabC.write( "};\n" )
+
+ self.ItabC.write( "\n\n" );
+ self.ItabC.write( "const char * ud_mnemonics_str[] = {\n" )
+ self.ItabC.write( ",\n ".join( [ "\"%s\"" % m for m in self.MnemonicsTable ] ) )
+ self.ItabC.write( "\n};\n" )
+
+
+ def genItabH( self ):
+ self.ItabH = open( os.path.join(self.outputDir, "udis86_itab.h"), "w" )
+
+ # Generate Table Type Enumeration
+ self.ItabH.write( "#ifndef UD_ITAB_H\n" )
+ self.ItabH.write( "#define UD_ITAB_H\n\n" )
+
+ # table type enumeration
+ self.ItabH.write( "/* ud_table_type -- lookup table types (see lookup.c) */\n" )
+ self.ItabH.write( "enum ud_table_type {\n " )
+ enum = [ self.TableInfo[ k ][ 'name' ] for k in self.TableInfo.keys() ]
+ self.ItabH.write( ",\n ".join( enum ) )
+ self.ItabH.write( "\n};\n\n" );
+
+ # mnemonic enumeration
+ self.ItabH.write( "/* ud_mnemonic -- mnemonic constants */\n" )
+ enum = "enum ud_mnemonic_code {\n "
+ enum += ",\n ".join( [ "UD_I%s" % m for m in self.MnemonicsTable ] )
+ enum += "\n} UD_ATTR_PACKED;\n"
+ self.ItabH.write( enum )
+ self.ItabH.write( "\n" )
+
+ self.ItabH.write("\n/* itab entry operand definitions */\n");
+ operands = self.OperandDict.keys()
+ operands.sort()
+ for o in operands:
+ self.ItabH.write("#define O_%-7s { %-12s %-8s }\n" %
+ (o, self.OperandDict[o][0] + ",", self.OperandDict[o][1]));
+ self.ItabH.write("\n\n");
+
+ self.ItabH.write( "extern const char * ud_mnemonics_str[];\n" )
+
+ self.ItabH.write( "#define GROUP(n) (0x8000 | (n))" )
+
+ self.ItabH.write( "\n#endif /* UD_ITAB_H */\n" )
+
+ self.ItabH.close()
+
+
+ def genItabC( self ):
+ self.ItabC = open( os.path.join(self.outputDir, "udis86_itab.c"), "w" )
+ self.ItabC.write( "/* itab.c -- generated by itab.py, do no edit" )
+ self.ItabC.write( " */\n" );
+ self.ItabC.write( "#include \"udis86_decode.h\"\n\n" );
+
+ self.genLookupTable( self.OpcodeTable0 )
+ self.genLookupTableList()
+ self.genInsnTable()
+
+ self.ItabC.close()
+
+ def genItab( self ):
+ self.genItabC()
+ self.genItabH()
+
+def main():
+ parser = OptionParser()
+ parser.add_option("--outputDir", dest="outputDir", default="")
+ options, args = parser.parse_args()
+ generator = UdItabGenerator(os.path.normpath(options.outputDir))
+ optableXmlParser = ud_optable.UdOptableXmlParser()
+ optableXmlParser.parse( args[ 0 ], generator.addInsnDef )
+
+ generator.genItab()
+
+if __name__ == '__main__':
+ main()
diff --git a/src/3rdparty/masm/disassembler/udis86/optable.xml b/src/3rdparty/masm/disassembler/udis86/optable.xml
new file mode 100644
index 0000000000..14b4ac5935
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/optable.xml
@@ -0,0 +1,8959 @@
+<?xml version="1.0"?>
+<?xml-stylesheet href="optable.xsl" type="text/xsl"?>
+<x86optable>
+
+ <instruction>
+ <mnemonic>aaa</mnemonic>
+ <def>
+ <opc>37</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aad</mnemonic>
+ <def>
+ <opc>d5</opc>
+ <opr>Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aam</mnemonic>
+ <def>
+ <opc>d4</opc>
+ <opr>Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>aas</mnemonic>
+ <def>
+ <opc>3f</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>adc</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>10</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>11</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>12</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>13</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>14</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>15</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=2</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=2</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>add</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>00</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>01</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>02</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>03</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>04</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>05</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=0</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <!--
+ SSE2
+ -->
+
+ <instruction>
+ <mnemonic>addpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 58</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>and</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>20</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>21</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>22</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>23</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>24</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>25</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>80 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=4</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=4</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 54</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 54</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andnpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 55</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>andnps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 55</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>arpl</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>63 /m=16</opc>
+ <opr>Ew Gw</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso</pfx>
+ <opc>63 /m=32</opc>
+ <opr>Ew Gw</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsxd</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexx rexr rexb</pfx>
+ <opc>63 /m=64</opc>
+ <opr>Gv Ed</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bound</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>62</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bsf</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bc</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bsr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bd</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bswap</mnemonic>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f c8</opc>
+ <opr>rAXr8</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f c9</opc>
+ <opr>rCXr9</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f ca</opc>
+ <opr>rDXr10</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cb</opc>
+ <opr>rBXr11</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cc</opc>
+ <opr>rSPr12</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cd</opc>
+ <opr>rBPr13</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f ce</opc>
+ <opr>rSIr14</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>0f cf</opc>
+ <opr>rDIr15</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bt</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=4</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a3</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>btc</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bb</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>btr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b3</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=6</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>bts</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ab</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ba /reg=5</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>call</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=2</opc>
+ <opr>Ev</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=3</opc>
+ <opr>Ep</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e8</opc>
+ <opr>Jz</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9a</opc>
+ <opr>Ap</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cbw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cwde</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cdqe</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>98 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clc</mnemonic>
+ <def>
+ <opc>f8</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cld</mnemonic>
+ <def>
+ <opc>fc</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clflush</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /reg=7 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clgi</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cli</mnemonic>
+ <def>
+ <opc>fa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>clts</mnemonic>
+ <def>
+ <opc>0f 06</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmc</mnemonic>
+ <def>
+ <opc>f5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovo</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 40</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovno</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 41</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovb</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 42</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovae</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 43</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovz</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 44</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovnz</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 45</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovbe</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 46</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmova</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 47</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 48</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovns</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 49</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4a</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovnp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovl</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4c</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovge</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4d</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovle</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4e</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmovg</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 4f</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>38</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>39</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>3a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>3b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>3c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>3d</opc>
+ <opr>rAX Iz</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=7</opc>
+ <opr>Ev Iz</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmppd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsb</mnemonic>
+ <def>
+ <opc>a6</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=32</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpsq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a7 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f c2</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpxchg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f b0</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b1</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cmpxchg8b</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>comisd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>comiss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cpuid</mnemonic>
+ <def>
+ <opc>0f a2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtdq2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtdq2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2d</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpd2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpi2ps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2a</opc>
+ <opr>V Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtpi2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2a</opc>
+ <opr>V Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2d</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtps2pd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsd2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2d</opc>
+ <opr>Gy W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsd2ss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsi2ss</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2a</opc>
+ <opr>V Ex</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtss2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2d</opc>
+ <opr>Gy W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtss2sd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttpd2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2c</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttpd2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e6</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttps2dq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttps2pi</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2c</opc>
+ <opr>P W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttsd2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2c</opc>
+ <opr>Gy Wsd</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvtsi2sd</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef2 0f 2a</opc>
+ <opr>V Ex</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cvttss2si</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>ssef3 0f 2c</opc>
+ <opr>Gy Wsd</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cwd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cdq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>cqo</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>99 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>daa</mnemonic>
+ <def>
+ <opc>27</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>das</mnemonic>
+ <def>
+ <opc>2f</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dec</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>48</opc>
+ <opr>eAX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>49</opc>
+ <opr>eCX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4a</opc>
+ <opr>eDX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4b</opc>
+ <opr>eBX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4c</opc>
+ <opr>eSP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4d</opc>
+ <opr>eBP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4e</opc>
+ <opr>eSI</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>4f</opc>
+ <opr>eDI</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>fe /reg=1</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=1</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>div</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=6</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=6</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>divss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>emms</mnemonic>
+ <def>
+ <opc>0f 77</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>enter</mnemonic>
+ <def>
+ <opc>c8</opc>
+ <opr>Iw Ib</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>f2xm1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=30</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fabs</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=21</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fadd</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=0</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=01</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=02</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=03</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=04</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=05</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=06</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=07</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>faddp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=01</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=02</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=03</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=04</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=05</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=06</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=07</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fbld</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=4</opc>
+ <opr>Mt</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fbstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=6</opc>
+ <opr>Mt</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fchs</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=20</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fclex</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=22</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovb</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmove</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovbe</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovu</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>da /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnb</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=00</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=01</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=02</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=03</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=04</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=05</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=06</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=07</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovne</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnbe</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcmovnu</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcom</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=2</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=10</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=11</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=12</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=13</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=14</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=15</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=16</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=17</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcom2</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>dc /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp3</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>dc /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>db /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomip</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomip</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=3</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=18</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=19</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=1f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcomp5</mnemonic>
+ <class>X87 UNDOC</class>
+ <def>
+ <opc>de /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcompp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=19</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fcos</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3f</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdecstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=36</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdiv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=39</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=3f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=6</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=31</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=32</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=33</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=34</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=35</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=36</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=37</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=39</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=3f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=31</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=32</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=33</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=34</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=35</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=36</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=37</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=7</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=38</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=39</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=3f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fdivrp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=30</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=31</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=32</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=33</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=34</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=35</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=36</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=37</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>femms</mnemonic>
+ <def>
+ <opc>0f 0e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ffree</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ffreep</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ficom</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=2</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ficomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=3</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fild</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=0</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=5</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fncstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=37</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fninit</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>db /mod=11 /x87=23</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fiadd</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=0</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fidivr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=7</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fidiv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=6</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=6</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisub</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=4</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=4</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisubr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=5</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=5</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fist</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=2</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fistp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=3</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fisttp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=1</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>df /mod=!11 /reg=1</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fld</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=5</opc>
+ <opr>Mt</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=0</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=0</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=00</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=01</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=02</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=03</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=04</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=05</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=06</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=07</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fld1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=28</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldl2t</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=29</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldl2e</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2a</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldlpi</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldlg2</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldln2</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2d</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldz</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=2e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldcw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=5</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fldenv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fmul</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=1</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=09</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=0f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fmulp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=09</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=0f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fimul</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>da /mod=!11 /reg=1</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>de /mod=!11 /reg=1</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnop</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=10</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fpatan</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=33</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fprem</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=38</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fprem1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=35</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fptan</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>frndint</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>frstor</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnsave</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fscale</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3d</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsin</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsincos</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsqrt</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=3a</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>db /mod=!11 /reg=7</opc>
+ <opr>Mt</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=3</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp1</mnemonic>
+ <def>
+ <opc>d9 /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp8</mnemonic>
+ <def>
+ <opc>df /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fstp9</mnemonic>
+ <def>
+ <opc>df /mod=11 /x87=18</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=19</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=1f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fst</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=2</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=2</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=10</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=11</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=12</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=13</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=14</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=15</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=16</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=17</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstcw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstenv</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d9 /mod=!11 /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fnstsw</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dd /mod=!11 /reg=7</opc>
+ <opr>Mw</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=20</opc>
+ <opr>AX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsub</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=4</opc>
+ <opr>Md</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=4</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=21</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=22</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=23</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=24</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=25</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=26</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=27</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=29</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=2f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=29</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2a</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2b</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2c</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2d</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2e</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=2f</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubr</mnemonic>
+ <class>X87</class>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>dc /mod=!11 /reg=5</opc>
+ <opr>Mq</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=28</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=29</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d8 /mod=11 /x87=2f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=21</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=22</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=23</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=24</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=25</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=26</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>dc /mod=11 /x87=27</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d8 /mod=!11 /reg=5</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fsubrp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>de /mod=11 /x87=20</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=21</opc>
+ <opr>ST1 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=22</opc>
+ <opr>ST2 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=23</opc>
+ <opr>ST3 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=24</opc>
+ <opr>ST4 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=25</opc>
+ <opr>ST5 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=26</opc>
+ <opr>ST6 ST0</opr>
+ </def>
+ <def>
+ <opc>de /mod=11 /x87=27</opc>
+ <opr>ST7 ST0</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ftst</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=24</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucom</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=20</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=21</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=22</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=23</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=24</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=25</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=26</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=27</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucomp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=28</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=29</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=2f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fucompp</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>da /mod=11 /x87=29</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxam</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=25</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=08</opc>
+ <opr>ST0 ST0</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=09</opc>
+ <opr>ST0 ST1</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0a</opc>
+ <opr>ST0 ST2</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0b</opc>
+ <opr>ST0 ST3</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0c</opc>
+ <opr>ST0 ST4</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0d</opc>
+ <opr>ST0 ST5</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0e</opc>
+ <opr>ST0 ST6</opr>
+ </def>
+ <def>
+ <opc>d9 /mod=11 /x87=0f</opc>
+ <opr>ST0 ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch4</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>dd /mod=11 /x87=08</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=09</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>dd /mod=11 /x87=0f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxch7</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>df /mod=11 /x87=08</opc>
+ <opr>ST0</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=09</opc>
+ <opr>ST1</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0a</opc>
+ <opr>ST2</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0b</opc>
+ <opr>ST3</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0c</opc>
+ <opr>ST4</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0d</opc>
+ <opr>ST5</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0e</opc>
+ <opr>ST6</opr>
+ </def>
+ <def>
+ <opc>df /mod=11 /x87=0f</opc>
+ <opr>ST7</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxrstor</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fxsave</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fpxtract</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=34</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fyl2x</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=31</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>fyl2xp1</mnemonic>
+ <class>X87</class>
+ <def>
+ <opc>d9 /mod=11 /x87=39</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hlt</mnemonic>
+ <def>
+ <opc>f4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>idiv</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=7</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=7</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>in</mnemonic>
+ <def>
+ <opc>e4</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e5</opc>
+ <opr>eAX Ib</opr>
+ </def>
+ <def>
+ <opc>ec</opc>
+ <opr>AL DX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>ed</opc>
+ <opr>eAX DX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>imul</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f af</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=5</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=5</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>69</opc>
+ <opr>Gv Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>6b</opc>
+ <opr>Gv Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>inc</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>40</opc>
+ <opr>eAX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>41</opc>
+ <opr>eCX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>42</opc>
+ <opr>eDX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>43</opc>
+ <opr>eBX</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>44</opc>
+ <opr>eSP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>45</opc>
+ <opr>eBP</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>46</opc>
+ <opr>eSI</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>47</opc>
+ <opr>eDI</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=0</opc>
+ <opr>Ev</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>fe /reg=0</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insb</mnemonic>
+ <def>
+ <opc>6c</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6d /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>insd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6d /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int1</mnemonic>
+ <def>
+ <opc>f1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int3</mnemonic>
+ <def>
+ <opc>cc</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>int</mnemonic>
+ <def>
+ <opc>cd</opc>
+ <opr>Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>into</mnemonic>
+ <def>
+ <opc>ce</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invd</mnemonic>
+ <def>
+ <opc>0f 08</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invept</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>sse66 0f 38 80 /m=32</opc>
+ <opr>Gd Mo</opr>
+ </def>
+ <def>
+ <opc>sse66 0f 38 80 /m=64</opc>
+ <opr>Gq Mo</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invlpg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=7 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invlpga</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invvpid</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>sse66 0f 38 81 /m=32</opc>
+ <opr>Gd Mo</opr>
+ </def>
+ <def>
+ <opc>sse66 0f 38 81 /m=64</opc>
+ <opr>Gq Mo</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>iretq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>cf /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jo</mnemonic>
+ <def>
+ <opc>70</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 80</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jno</mnemonic>
+ <def>
+ <opc>71</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 81</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jb</mnemonic>
+ <def>
+ <opc>72</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 82</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jae</mnemonic>
+ <def>
+ <opc>73</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 83</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jz</mnemonic>
+ <def>
+ <opc>74</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 84</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jnz</mnemonic>
+ <def>
+ <opc>75</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 85</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jbe</mnemonic>
+ <def>
+ <opc>76</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 86</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ja</mnemonic>
+ <def>
+ <opc>77</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 87</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>js</mnemonic>
+ <def>
+ <opc>78</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 88</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jns</mnemonic>
+ <def>
+ <opc>79</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 89</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jp</mnemonic>
+ <def>
+ <opc>7a</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8a</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jnp</mnemonic>
+ <def>
+ <opc>7b</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8b</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jl</mnemonic>
+ <def>
+ <opc>7c</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8c</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jge</mnemonic>
+ <def>
+ <opc>7d</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8d</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jle</mnemonic>
+ <def>
+ <opc>7e</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8e</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jg</mnemonic>
+ <def>
+ <opc>7f</opc>
+ <opr>Jb</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>0f 8f</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jcxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=16</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jecxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=32</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jrcxz</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>e3 /a=64</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>jmp</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=4</opc>
+ <opr>Ev</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=5</opc>
+ <opr>Ep</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e9</opc>
+ <opr>Jz</opr>
+ <mode>def64 depM</mode>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <opc>ea</opc>
+ <opr>Ap</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>eb</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lahf</mnemonic>
+ <def>
+ <opc>9f</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lar</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 02</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lddqu</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f f0</opc>
+ <opr>V M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ldmxcsr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /reg=2 /mod=11</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lds</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>c5</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lea</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8d</opc>
+ <opr>Gv M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>les</mnemonic>
+ <def>
+ <pfx>aso oso</pfx>
+ <opc>c4</opc>
+ <opr>Gv M</opr>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lfs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b4</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lgs</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b5</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lidt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=3 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lss</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b2</opc>
+ <opr>Gz M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>leave</mnemonic>
+ <def>
+ <opc>c9</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=5 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lgdt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=2 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lldt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=2</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lmsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=6 /mod=!11</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lock</mnemonic>
+ <def>
+ <opc>f0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>ac</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lodsq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ad /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loopnz</mnemonic>
+ <def>
+ <opc>e0</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loope</mnemonic>
+ <def>
+ <opc>e1</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>loop</mnemonic>
+ <def>
+ <opc>e2</opc>
+ <opr>Jb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>lsl</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f 03</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ltr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=3</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maskmovq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f7</opc>
+ <opr>P PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>maxss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=6 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>minss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>monitor</mnemonic>
+ <def>
+ <opc>0f 01 /reg=1 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>montmul</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mov</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c6 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c7 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>88</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>89</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>8a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>8c</opc>
+ <opr>Ev S</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>8e</opc>
+ <opr>S Ev</opr>
+ </def>
+ <def>
+ <opc>a0</opc>
+ <opr>AL Ob</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw</pfx>
+ <opc>a1</opc>
+ <opr>rAX Ov</opr>
+ </def>
+ <def>
+ <opc>a2</opc>
+ <opr>Ob AL</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw</pfx>
+ <opc>a3</opc>
+ <opr>Ov rAX</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b0</opc>
+ <opr>ALr8b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b1</opc>
+ <opr>CLr9b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b2</opc>
+ <opr>DLr10b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b3</opc>
+ <opr>BLr11b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b4</opc>
+ <opr>AHr12b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b5</opc>
+ <opr>CHr13b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b6</opc>
+ <opr>DHr14b Ib</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>b7</opc>
+ <opr>BHr15b Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>b8</opc>
+ <opr>rAXr8 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>b9</opc>
+ <opr>rCXr9 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>ba</opc>
+ <opr>rDXr10 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bb</opc>
+ <opr>rBXr11 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bc</opc>
+ <opr>rSPr12 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bd</opc>
+ <opr>rBPr13 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>be</opc>
+ <opr>rSIr14 Iv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>bf</opc>
+ <opr>rDIr15 Iv</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 20</opc>
+ <opr>R C</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 21</opc>
+ <opr>R D</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 22</opc>
+ <opr>C R</opr>
+ </def>
+ <def>
+ <pfx>rexr</pfx>
+ <opc>0f 23</opc>
+ <opr>D R</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movapd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 28</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 29</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movaps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 28</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 29</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movd</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f 6e</opc>
+ <opr>V Ex</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6e</opc>
+ <opr>P Ex</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f 7e</opc>
+ <opr>Ex V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 7e</opc>
+ <opr>Ex P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 16 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 17</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 16 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 17</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 16 /mod=11</opc>
+ <opr>V VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 12 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 13</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movlps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 12 /mod=!11</opc>
+ <opr>V M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 13</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movhlps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 12 /mod=11</opc>
+ <opr>V VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movmskpd</mnemonic>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>sse66 0f 50</opc>
+ <opr>Gd VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movmskps</mnemonic>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>0f 50</opc>
+ <opr>Gd VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e7</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movnti</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f c3</opc>
+ <opr>M Gy</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2b</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2b</opc>
+ <opr>M V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movntq</mnemonic>
+ <def>
+ <opc>0f e7</opc>
+ <opr>M P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6f</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d6</opc>
+ <opr>W V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 7e</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 7f</opc>
+ <opr>Q P</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>a4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=32</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>a5 /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsx</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f be</opc>
+ <opr>Gv Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f bf</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movupd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movups</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 10</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 11</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movzx</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b6</opc>
+ <opr>Gv Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f b7</opc>
+ <opr>Gv Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mul</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=4</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=4</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mulss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 59</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mwait</mnemonic>
+ <def>
+ <opc>0f 01 /reg=1 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>neg</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=3</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=3</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>nop</mnemonic>
+ <def>
+ <opc>90</opc>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 19</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1a</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1b</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1c</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1d</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1e</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 1f</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>not</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=2</opc>
+ <opr>Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=2</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>or</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>08</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>09</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>0c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>0d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=1</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=1</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>orpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 56</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>orps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 56</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>out</mnemonic>
+ <def>
+ <opc>e6</opc>
+ <opr>Ib AL</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>e7</opc>
+ <opr>Ib eAX</opr>
+ </def>
+ <def>
+ <opc>ee</opc>
+ <opr>DX AL</opr>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>ef</opc>
+ <opr>DX eAX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsb</mnemonic>
+ <def>
+ <opc>6e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>outsq</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>6f /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packsswb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 63</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 63</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packssdw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6b</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6b</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>packuswb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 67</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 67</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fc</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fc</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fd</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fd</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fe</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fe</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+
+ <instruction>
+ <mnemonic>paddsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ec</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ec</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ed</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ed</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddusb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f dc</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f dc</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddusw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f dd</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f dd</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pand</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f db</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f db</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pandn</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f df</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f df</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e0</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e3</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 74</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 74</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 75</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 75</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpeqd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 76</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 76</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 64</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 64</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 65</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 65</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pcmpgtd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 66</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 66</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrb</mnemonic>
+ <def>
+ <pfx>aso rexr rexb</pfx>
+ <opc>sse66 0f 3a 14</opc>
+ <opr>MbRv V Ib</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrd</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=16</opc>
+ <opr>Ev V Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=32</opc>
+ <opr>Ev V Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrq</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 16 /o=64</opc>
+ <opr>Ev V Ib</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pextrw</mnemonic>
+ <def>
+ <pfx>aso rexr rexb</pfx>
+ <opc>sse66 0f c5</opc>
+ <opr>Gd VR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c5</opc>
+ <opr>Gd PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pinsrw</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c4</opc>
+ <opr>P Ew Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>sse66 0f c4</opc>
+ <opr>V Ew Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaddwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f5</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f5</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ee</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ee</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f de</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f de</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ea</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ea</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f da</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f da</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmovmskb</mnemonic>
+ <def>
+ <pfx>rexr rexb</pfx>
+ <opc>sse66 0f d7</opc>
+ <opr>Gd VR</opr>
+ </def>
+ <def>
+ <pfx>oso rexr rexb</pfx>
+ <opc>0f d7</opc>
+ <opr>Gd PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e5</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e5</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmullw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d5</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d5</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pop</mnemonic>
+ <def>
+ <opc>07</opc>
+ <opr>ES</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>17</opc>
+ <opr>SS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>1f</opc>
+ <opr>DS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0f a9</opc>
+ <opr>GS</opr>
+ </def>
+ <def>
+ <opc>0f a1</opc>
+ <opr>FS</opr>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>58</opc>
+ <opr>rAXr8</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>59</opc>
+ <opr>rCXr9</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5a</opc>
+ <opr>rDXr10</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5b</opc>
+ <opr>rBXr11</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5c</opc>
+ <opr>rSPr12</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5d</opc>
+ <opr>rBPr13</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5e</opc>
+ <opr>rSIr14</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>5f</opc>
+ <opr>rDIr15</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>8f /reg=0</opc>
+ <opr>Ev</opr>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popa</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>61 /o=16</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popad</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>61 /o=32</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=32 /o=16</opc>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=16 /o=16</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=16 /o=32</opc>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=32 /o=32</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>popfq</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9d /m=64 /o=64</opc>
+ <mode>def64 depM</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>por</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f eb</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f eb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetch</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=2</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=3</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=4</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=5</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=6</opc>
+ <opr>M</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 0d /reg=7</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetchnta</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=0</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht0</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=1</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht1</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=2</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>prefetcht2</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f 18 /reg=3</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psadbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f6</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 70</opc>
+ <opr>P Q Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psllw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 71 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pslld</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 72 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psllq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f3</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=6</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 73 /reg=6</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psraw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=4</opc>
+ <opr>VR Ib</opr>
+ </def>
+ <def>
+ <opc>0f 71 /reg=4</opc>
+ <opr>PR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrad</mnemonic>
+ <def>
+ <opc>0f 72 /reg=4</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=4</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrlw</mnemonic>
+ <def>
+ <opc>0f 71 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d1</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d1</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 71 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrld</mnemonic>
+ <def>
+ <opc>0f 72 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d2</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d2</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 72 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrlq</mnemonic>
+ <def>
+ <opc>0f 73 /reg=2</opc>
+ <opr>PR Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d3</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d3</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=2</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f8</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f8</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f9</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f9</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fa</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fa</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e8</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e8</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f e9</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f e9</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubusb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d8</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d8</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubusw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d9</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d9</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 68</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 68</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 69</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 69</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6a</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 6a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 60</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 60</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklwd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 61</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 61</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckldq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 62</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 62</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pi2fw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=0c</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pi2fd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=0d</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pf2iw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=1c</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pf2id</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=1d</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfnacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=8a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfpnacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=8e</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpge</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=90</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmin</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=94</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcp</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=96</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrsqrt</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=97</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfsub</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=9a</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfadd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=9e</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpgt</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmax</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a4</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcpit1</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrsqit1</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=a7</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfsubr</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=aa</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfacc</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=ae</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfcmpeq</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b0</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfmul</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b4</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pfrcpit2</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b6</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhrw</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=b7</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pswapd</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=bb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pavgusb</mnemonic>
+ <def>
+ <opc>0f 0f /3dnow=bf</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>push</mnemonic>
+ <def>
+ <opc>06</opc>
+ <opr>ES</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0e</opc>
+ <opr>CS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>16</opc>
+ <opr>SS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>1e</opc>
+ <opr>DS</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <opc>0f a8</opc>
+ <opr>GS</opr>
+ </def>
+ <def>
+ <opc>0f a0</opc>
+ <opr>FS</opr>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>50</opc>
+ <opr>rAXr8</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>51</opc>
+ <opr>rCXr9</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>52</opc>
+ <opr>rDXr10</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>53</opc>
+ <opr>rBXr11</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>54</opc>
+ <opr>rSPr12</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>55</opc>
+ <opr>rBPr13</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>56</opc>
+ <opr>rSIr14</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso rexb</pfx>
+ <opc>57</opc>
+ <opr>rDIr15</opr>
+ <mode>def64 depM</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>68</opc>
+ <opr>Iz</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>ff /reg=6</opc>
+ <opr>Ev</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <opc>6a</opc>
+ <opr>Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pusha</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>60 /o=16</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushad</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>60 /o=32</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfw</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=32 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=16 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=16</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfd</mnemonic>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=16 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso</pfx>
+ <opc>9c /m=32 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pushfq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=32</opc>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>9c /m=64 /o=64</opc>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pxor</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f ef</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f ef</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcl</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=2</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=2</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=2</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=2</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=2</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=2</opc>
+ <opr>Ev I1</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=3</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=3</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=3</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=3</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=3</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rol</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=0</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=0</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=0</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=0</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=0</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ror</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=1</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=1</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=1</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=1</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=1</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 53</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rcpss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 53</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdmsr</mnemonic>
+ <def>
+ <opc>0f 32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdpmc</mnemonic>
+ <def>
+ <opc>0f 33</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdtsc</mnemonic>
+ <def>
+ <opc>0f 31</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rdtscp</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=7 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>repne</mnemonic>
+ <def>
+ <opc>f2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rep</mnemonic>
+ <def>
+ <opc>f3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ret</mnemonic>
+ <def>
+ <opc>c2</opc>
+ <opr>Iw</opr>
+ </def>
+ <def>
+ <opc>c3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>retf</mnemonic>
+ <def>
+ <opc>ca</opc>
+ <opr>Iw</opr>
+ </def>
+ <def>
+ <opc>cb</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsm</mnemonic>
+ <def>
+ <opc>0f aa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsqrtps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 52</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>rsqrtss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 52</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sahf</mnemonic>
+ <def>
+ <opc>9e</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sal</mnemonic>
+ </instruction>
+
+ <instruction>
+ <mnemonic>salc</mnemonic>
+ <def>
+ <opc>d6</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sar</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=7</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=7</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=7</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=7</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=7</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=7</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shl</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=6</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=6</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=6</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=6</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=4</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>d2 /reg=4</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=4</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=4</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=4</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=4</opc>
+ <opr>Ev CL</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=6</opc>
+ <opr>Ev I1</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shr</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>c1 /reg=5</opc>
+ <opr>Ev Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d2 /reg=5</opc>
+ <opr>Eb CL</opr>
+ <syn>cast</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d1 /reg=5</opc>
+ <opr>Ev I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>d0 /reg=5</opc>
+ <opr>Eb I1</opr>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>c0 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>d3 /reg=5</opc>
+ <opr>Ev CL</opr>
+ <syn>cast</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sbb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>18</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>19</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>1a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>1b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>1c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>1d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=3</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=3</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=3</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasb</mnemonic>
+ <def>
+ <opc>ae</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasw</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasd</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>scasq</mnemonic>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>af /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>seto</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 90</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setno</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 91</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 92</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 93</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setz</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 94</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnz</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 95</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setbe</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 96</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>seta</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 97</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sets</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 98</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setns</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 99</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9a</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setnp</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9b</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setl</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9c</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setge</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9d</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setle</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9e</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>setg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 9f</opc>
+ <opr>Eb</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sfence</mnemonic>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=0</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=1</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=2</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=3</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=4</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=5</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=6</opc>
+ </def>
+ <def>
+ <opc>0f ae /reg=7 /mod=11 /rm=7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sgdt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=0 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shld</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a4</opc>
+ <opr>Ev Gv Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f a5</opc>
+ <opr>Ev Gv CL</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shrd</mnemonic>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ac</opc>
+ <opr>Ev Gv Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f ad</opc>
+ <opr>Ev Gv CL</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shufpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c6</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>shufps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c6</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sidt</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=1 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sldt</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=0</opc>
+ <opr>MwRv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>smsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 01 /reg=4 /mod=!11</opc>
+ <opr>M</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sqrtss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 51</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stc</mnemonic>
+ <def>
+ <opc>f9</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>std</mnemonic>
+ <def>
+ <opc>fd</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stgi</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sti</mnemonic>
+ <def>
+ <opc>fb</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>skinit</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=6</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stmxcsr</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>0f ae /mod=11 /reg=3</opc>
+ <opr>Md</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosb</mnemonic>
+ <def>
+ <pfx>seg</pfx>
+ <opc>aa</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosw</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=16</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosd</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=32</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>stosq</mnemonic>
+ <def>
+ <pfx>seg oso rexw</pfx>
+ <opc>ab /o=64</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>str</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=1</opc>
+ <opr>Ev</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sub</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>28</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>29</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>2a</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>2b</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>2c</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>2d</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=5</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=5</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=5</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>subss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 5c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>swapgs</mnemonic>
+ <def>
+ <opc>0f 01 /reg=7 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>syscall</mnemonic>
+ <def>
+ <opc>0f 05</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysenter</mnemonic>
+ <def>
+ <opc>0f 34</opc>
+ <mode>inv64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysexit</mnemonic>
+ <def>
+ <opc>0f 35</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>sysret</mnemonic>
+ <def>
+ <opc>0f 07</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>test</mnemonic>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=0</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>84</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>85</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <opc>a8</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>a9</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexw rexr rexx rexb</pfx>
+ <opc>f6 /reg=1</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=0</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>f7 /reg=1</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ucomisd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 2e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ucomiss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 2e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>ud2</mnemonic>
+ <def>
+ <opc>0f 0b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpckhpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpckhps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpcklps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>unpcklpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>verr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=4</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>verw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 00 /reg=5</opc>
+ <opr>Ew</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmcall</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmclear</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmxon</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmptrld</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=6</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmptrst</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f c7 /reg=7</opc>
+ <opr>Mq</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmlaunch</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmresume</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmxoff</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <opc>0f 01 /reg=0 /mod=11 /rm=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmread</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=16</opc>
+ <opr>Ed Gd</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=32</opc>
+ <opr>Ed Gd</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 78 /m=64</opc>
+ <opr>Eq Gq</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmwrite</mnemonic>
+ <vendor>intel</vendor>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=16</opc>
+ <opr>Gd Ed</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=32</opc>
+ <opr>Gd Ed</opr>
+ <mode>def64</mode>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 79 /m=64</opc>
+ <opr>Gq Eq</opr>
+ <mode>def64</mode>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmrun</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmmcall</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmload</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>vmsave</mnemonic>
+ <vendor>amd</vendor>
+ <def>
+ <opc>0f 01 /reg=3 /mod=11 /rm=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wait</mnemonic>
+ <def>
+ <opc>9b</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wbinvd</mnemonic>
+ <def>
+ <opc>0f 09</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>wrmsr</mnemonic>
+ <def>
+ <opc>0f 30</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xadd</mnemonic>
+ <def>
+ <pfx>aso oso rexr rexx rexb</pfx>
+ <opc>0f c0</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>0f c1</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xchg</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>86</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>87</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>90</opc>
+ <opr>rAXr8 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>91</opc>
+ <opr>rCXr9 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>92</opc>
+ <opr>rDXr10 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>93</opc>
+ <opr>rBXr11 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>94</opc>
+ <opr>rSPr12 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>95</opc>
+ <opr>rBPr13 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>96</opc>
+ <opr>rSIr14 rAX</opr>
+ </def>
+ <def>
+ <pfx>oso rexw rexb</pfx>
+ <opc>97</opc>
+ <opr>rDIr15 rAX</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xlatb</mnemonic>
+ <def>
+ <pfx>rexw</pfx>
+ <opc>d7</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xor</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>30</opc>
+ <opr>Eb Gb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>31</opc>
+ <opr>Ev Gv</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>32</opc>
+ <opr>Gb Eb</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>33</opc>
+ <opr>Gv Ev</opr>
+ </def>
+ <def>
+ <opc>34</opc>
+ <opr>AL Ib</opr>
+ </def>
+ <def>
+ <pfx>oso rexw</pfx>
+ <opc>35</opc>
+ <opr>rAX Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>80 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>81 /reg=6</opc>
+ <opr>Ev Iz</opr>
+ <syn>sext</syn>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>82 /reg=6</opc>
+ <opr>Eb Ib</opr>
+ <mode>inv64</mode>
+ </def>
+ <def>
+ <pfx>aso oso rexw rexr rexx rexb</pfx>
+ <opc>83 /reg=6</opc>
+ <opr>Ev Ib</opr>
+ <syn>sext</syn>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xorpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 57</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xorps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 57</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptecb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptcbc</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptctr</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=3</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptcfb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=4</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xcryptofb</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=5</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xsha1</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=1</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xsha256</mnemonic>
+ <def>
+ <opc>0f a6 /mod=11 /rm=0 /reg=2</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>xstore</mnemonic>
+ <def>
+ <opc>0f a7 /mod=11 /rm=0 /reg=0</opc>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>db</mnemonic>
+ </instruction>
+
+ <!--
+ SSE 2
+ -->
+
+ <instruction>
+ <mnemonic>movdqa</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7f</opc>
+ <opr>W V</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movdq2q</mnemonic>
+ <def>
+ <pfx>aso rexb</pfx>
+ <opc>ssef2 0f d6</opc>
+ <opr>P VR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movdqu</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 6f</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 7f</opc>
+ <opr>W V</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movq2dq</mnemonic>
+ <def>
+ <pfx>aso</pfx>
+ <opc>ssef3 0f d6</opc>
+ <opr>V PR</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>paddq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f d4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psubq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f fb</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f fb</opc>
+ <opr>P Q</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmuludq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f f4</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f f4</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufhw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshuflw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pshufd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 70</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pslldq</mnemonic>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=7</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psrldq</mnemonic>
+ <def>
+ <pfx>rexb</pfx>
+ <opc>sse66 0f 73 /reg=3</opc>
+ <opr>VR Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpckhqdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>punpcklqdq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 6c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSE 3
+ -->
+
+ <instruction>
+ <mnemonic>addsubpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f d0</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>addsubps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f d0</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>haddpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>haddps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 7c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hsubpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 7d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>hsubps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 7d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movddup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 12 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef2 0f 12 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movshdup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 16 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 16 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>movsldup</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 12 /mod=11</opc>
+ <opr>V W</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>ssef3 0f 12 /mod=!11</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSSE 3
+ -->
+
+ <instruction>
+ <mnemonic>pabsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1c</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pabsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1d</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pabsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 1e</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 1e</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 00</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 00</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 01</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 01</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 02</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 02</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phaddsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 03</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 03</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaddubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 04</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 04</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 05</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 05</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 06</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 06</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phsubsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 07</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 07</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 08</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 08</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 0a</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 0a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>psignw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 09</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 09</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulhrsw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 38 0b</opc>
+ <opr>P Q</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 0b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>palignr</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>0f 3a 0f</opc>
+ <opr>P Q Ib</opr>
+ </def>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0f</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <!--
+ SSE 4.1
+ -->
+
+ <instruction>
+ <mnemonic>pblendvb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 10</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmuldq</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 28</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 38</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 39</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3a</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pminud</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3b</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsb</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3c</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3d</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmaxud</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 3f</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pmulld</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 40</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>phminposuw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 41</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 08</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 09</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundss</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0a</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>roundsd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0b</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0d</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>pblendw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0e</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 0c</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendvpd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 15</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>blendvps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 38 14</opc>
+ <opr>V W</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dpps</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 40</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>dppd</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 41</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>mpsadbw</mnemonic>
+ <def>
+ <pfx>aso rexr rexx rexb</pfx>
+ <opc>sse66 0f 3a 42</opc>
+ <opr>V W Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>extractps</mnemonic>
+ <def>
+ <pfx>aso rexr rexw rexb</pfx>
+ <opc>sse66 0f 3a 17</opc>
+ <opr>MdRy V Ib</opr>
+ </def>
+ </instruction>
+
+ <instruction>
+ <mnemonic>invalid</mnemonic>
+ </instruction>
+
+</x86optable>
diff --git a/src/3rdparty/masm/disassembler/udis86/ud_opcode.py b/src/3rdparty/masm/disassembler/udis86/ud_opcode.py
new file mode 100644
index 0000000000..f301b52461
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/ud_opcode.py
@@ -0,0 +1,235 @@
+# udis86 - scripts/ud_opcode.py
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+class UdOpcodeTables:
+
+ TableInfo = {
+ 'opctbl' : { 'name' : 'UD_TAB__OPC_TABLE', 'size' : 256 },
+ '/sse' : { 'name' : 'UD_TAB__OPC_SSE', 'size' : 4 },
+ '/reg' : { 'name' : 'UD_TAB__OPC_REG', 'size' : 8 },
+ '/rm' : { 'name' : 'UD_TAB__OPC_RM', 'size' : 8 },
+ '/mod' : { 'name' : 'UD_TAB__OPC_MOD', 'size' : 2 },
+ '/m' : { 'name' : 'UD_TAB__OPC_MODE', 'size' : 3 },
+ '/x87' : { 'name' : 'UD_TAB__OPC_X87', 'size' : 64 },
+ '/a' : { 'name' : 'UD_TAB__OPC_ASIZE', 'size' : 3 },
+ '/o' : { 'name' : 'UD_TAB__OPC_OSIZE', 'size' : 3 },
+ '/3dnow' : { 'name' : 'UD_TAB__OPC_3DNOW', 'size' : 256 },
+ 'vendor' : { 'name' : 'UD_TAB__OPC_VENDOR', 'size' : 3 },
+ }
+
+ OpcodeTable0 = {
+ 'type' : 'opctbl',
+ 'entries' : {},
+ 'meta' : 'table0'
+ }
+
+ OpcExtIndex = {
+
+ # ssef2, ssef3, sse66
+ 'sse': {
+ 'none' : '00',
+ 'f2' : '01',
+ 'f3' : '02',
+ '66' : '03'
+ },
+
+ # /mod=
+ 'mod': {
+ '!11' : '00',
+ '11' : '01'
+ },
+
+ # /m=, /o=, /a=
+ 'mode': {
+ '16' : '00',
+ '32' : '01',
+ '64' : '02'
+ },
+
+ 'vendor' : {
+ 'amd' : '00',
+ 'intel' : '01',
+ 'any' : '02'
+ }
+ }
+
+ InsnTable = []
+ MnemonicsTable = []
+
+ ThreeDNowTable = {}
+
+ def sizeOfTable( self, t ):
+ return self.TableInfo[ t ][ 'size' ]
+
+ def nameOfTable( self, t ):
+ return self.TableInfo[ t ][ 'name' ]
+
+ #
+ # Updates a table entry: If the entry doesn't exist
+ # it will create the entry, otherwise, it will walk
+ # while validating the path.
+ #
+ def updateTable( self, table, index, type, meta ):
+ if not index in table[ 'entries' ]:
+ table[ 'entries' ][ index ] = { 'type' : type, 'entries' : {}, 'meta' : meta }
+ if table[ 'entries' ][ index ][ 'type' ] != type:
+ raise NameError( "error: violation in opcode mapping (overwrite) %s with %s." %
+ ( table[ 'entries' ][ index ][ 'type' ], type) )
+ return table[ 'entries' ][ index ]
+
+ class Insn:
+ """An abstract type representing an instruction in the opcode map.
+ """
+
+ # A mapping of opcode extensions to their representational
+ # values used in the opcode map.
+ OpcExtMap = {
+ '/rm' : lambda v: "%02x" % int(v, 16),
+ '/x87' : lambda v: "%02x" % int(v, 16),
+ '/3dnow' : lambda v: "%02x" % int(v, 16),
+ '/reg' : lambda v: "%02x" % int(v, 16),
+ # modrm.mod
+ # (!11, 11) => (00, 01)
+ '/mod' : lambda v: '00' if v == '!11' else '01',
+ # Mode extensions:
+ # (16, 32, 64) => (00, 01, 02)
+ '/o' : lambda v: "%02x" % (int(v) / 32),
+ '/a' : lambda v: "%02x" % (int(v) / 32),
+ '/m' : lambda v: "%02x" % (int(v) / 32),
+ '/sse' : lambda v: UdOpcodeTables.OpcExtIndex['sse'][v]
+ }
+
+ def __init__(self, prefixes, mnemonic, opcodes, operands, vendor):
+ self.opcodes = opcodes
+ self.prefixes = prefixes
+ self.mnemonic = mnemonic
+ self.operands = operands
+ self.vendor = vendor
+ self.opcext = {}
+
+ ssePrefix = None
+ if self.opcodes[0] in ('ssef2', 'ssef3', 'sse66'):
+ ssePrefix = self.opcodes[0][3:]
+ self.opcodes.pop(0)
+
+ # do some preliminary decoding of the instruction type
+ # 1byte, 2byte or 3byte instruction?
+ self.nByteInsn = 1
+ if self.opcodes[0] == '0f': # 2byte
+ # 2+ byte opcodes are always disambiguated by an
+ # sse prefix, unless it is a 3d now instruction
+ # which is 0f 0f ...
+ if self.opcodes[1] != '0f' and ssePrefix is None:
+ ssePrefix = 'none'
+ if self.opcodes[1] in ('38', '3a'): # 3byte
+ self.nByteInsn = 3
+ else:
+ self.nByteInsn = 2
+
+ # The opcode that indexes into the opcode table.
+ self.opcode = self.opcodes[self.nByteInsn - 1]
+
+ # Record opcode extensions
+ for opcode in self.opcodes[self.nByteInsn:]:
+ arg, val = opcode.split('=')
+ self.opcext[arg] = self.OpcExtMap[arg](val)
+
+ # Record sse extension: the reason sse extension is handled
+ # separately is that historically sse was handled as a first
+ # class opcode, not as an extension. Now that sse is handled
+ # as an extension, we do the manual conversion here, as opposed
+ # to modifying the opcode xml file.
+ if ssePrefix is not None:
+ self.opcext['/sse'] = self.OpcExtMap['/sse'](ssePrefix)
+
+ def parse(self, table, insn):
+ index = insn.opcodes[0];
+ if insn.nByteInsn > 1:
+ assert index == '0f'
+ table = self.updateTable(table, index, 'opctbl', '0f')
+ index = insn.opcodes[1]
+
+ if insn.nByteInsn == 3:
+ table = self.updateTable(table, index, 'opctbl', index)
+ index = insn.opcodes[2]
+
+ # Walk down the tree, create levels as needed, for opcode
+ # extensions. The order is important, and determines how
+ # well the opcode table is packed. Also note, /sse must be
+ # before /o, because /sse may consume operand size prefix
+ # affect the outcome of /o.
+ for ext in ('/mod', '/x87', '/reg', '/rm', '/sse',
+ '/o', '/a', '/m', '/3dnow'):
+ if ext in insn.opcext:
+ table = self.updateTable(table, index, ext, ext)
+ index = insn.opcext[ext]
+
+ # additional table for disambiguating vendor
+ if len(insn.vendor):
+ table = self.updateTable(table, index, 'vendor', insn.vendor)
+ index = self.OpcExtIndex['vendor'][insn.vendor]
+
+ # make leaf node entries
+ leaf = self.updateTable(table, index, 'insn', '')
+
+ leaf['mnemonic'] = insn.mnemonic
+ leaf['prefixes'] = insn.prefixes
+ leaf['operands'] = insn.operands
+
+ # add instruction to linear table of instruction forms
+ self.InsnTable.append({ 'prefixes' : insn.prefixes,
+ 'mnemonic' : insn.mnemonic,
+ 'operands' : insn.operands })
+
+ # add mnemonic to mnemonic table
+ if not insn.mnemonic in self.MnemonicsTable:
+ self.MnemonicsTable.append(insn.mnemonic)
+
+
+ # Adds an instruction definition to the opcode tables
+ def addInsnDef( self, prefixes, mnemonic, opcodes, operands, vendor ):
+ insn = self.Insn(prefixes=prefixes,
+ mnemonic=mnemonic,
+ opcodes=opcodes,
+ operands=operands,
+ vendor=vendor)
+ self.parse(self.OpcodeTable0, insn)
+
+ def print_table( self, table, pfxs ):
+ print "%s |" % pfxs
+ keys = table[ 'entries' ].keys()
+ if ( len( keys ) ):
+ keys.sort()
+ for idx in keys:
+ e = table[ 'entries' ][ idx ]
+ if e[ 'type' ] == 'insn':
+ print "%s |-<%s>" % ( pfxs, idx ),
+ print "%s %s" % ( e[ 'mnemonic' ], ' '.join( e[ 'operands'] ) )
+ else:
+ print "%s |-<%s> %s" % ( pfxs, idx, e['type'] )
+ self.print_table( e, pfxs + ' |' )
+
+ def print_tree( self ):
+ self.print_table( self.OpcodeTable0, '' )
diff --git a/src/3rdparty/masm/disassembler/udis86/ud_optable.py b/src/3rdparty/masm/disassembler/udis86/ud_optable.py
new file mode 100644
index 0000000000..5b5c55d3b8
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/ud_optable.py
@@ -0,0 +1,103 @@
+# udis86 - scripts/ud_optable.py (optable.xml parser)
+#
+# Copyright (c) 2009 Vivek Thampi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+from xml.dom import minidom
+
+class UdOptableXmlParser:
+
+ def parseDef( self, node ):
+ ven = ''
+ pfx = []
+ opc = []
+ opr = []
+ for def_node in node.childNodes:
+ if not def_node.localName:
+ continue
+ if def_node.localName == 'pfx':
+ pfx = def_node.firstChild.data.split();
+ elif def_node.localName == 'opc':
+ opc = def_node.firstChild.data.split();
+ elif def_node.localName == 'opr':
+ opr = def_node.firstChild.data.split();
+ elif def_node.localName == 'mode':
+ pfx.extend( def_node.firstChild.data.split() );
+ elif def_node.localName == 'syn':
+ pfx.extend( def_node.firstChild.data.split() );
+ elif def_node.localName == 'vendor':
+ ven = ( def_node.firstChild.data );
+ else:
+ print "warning: invalid node - %s" % def_node.localName
+ continue
+ return ( pfx, opc, opr, ven )
+
+ def parse( self, xml, fn ):
+ xmlDoc = minidom.parse( xml )
+ self.TlNode = xmlDoc.firstChild
+
+ while self.TlNode and self.TlNode.localName != "x86optable":
+ self.TlNode = self.TlNode.nextSibling
+
+ for insnNode in self.TlNode.childNodes:
+ if not insnNode.localName:
+ continue
+ if insnNode.localName != "instruction":
+ print "warning: invalid insn node - %s" % insnNode.localName
+ continue
+
+ mnemonic = insnNode.getElementsByTagName( 'mnemonic' )[ 0 ].firstChild.data
+ vendor = ''
+
+ for node in insnNode.childNodes:
+ if node.localName == 'vendor':
+ vendor = node.firstChild.data
+ elif node.localName == 'def':
+ ( prefixes, opcodes, operands, local_vendor ) = \
+ self.parseDef( node )
+ if ( len( local_vendor ) ):
+ vendor = local_vendor
+ # callback
+ fn( prefixes, mnemonic, opcodes, operands, vendor )
+
+
+def printFn( pfx, mnm, opc, opr, ven ):
+ print 'def: ',
+ if len( pfx ):
+ print ' '.join( pfx ),
+ print "%s %s %s %s" % \
+ ( mnm, ' '.join( opc ), ' '.join( opr ), ven )
+
+
+def parse( xml, callback ):
+ parser = UdOptableXmlParser()
+ parser.parse( xml, callback )
+
+def main():
+ parser = UdOptableXmlParser()
+ parser.parse( sys.argv[ 1 ], printFn )
+
+if __name__ == "__main__":
+ main()
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86.c b/src/3rdparty/masm/disassembler/udis86/udis86.c
new file mode 100644
index 0000000000..2641034232
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86.c
@@ -0,0 +1,182 @@
+/* udis86 - libudis86/udis86.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_input.h"
+#include "udis86_extern.h"
+
+#ifndef __UD_STANDALONE__
+# include <stdlib.h>
+# include <string.h>
+#endif /* __UD_STANDALONE__ */
+
+/* =============================================================================
+ * ud_init() - Initializes ud_t object.
+ * =============================================================================
+ */
+extern void
+ud_init(struct ud* u)
+{
+ memset((void*)u, 0, sizeof(struct ud));
+ ud_set_mode(u, 16);
+ u->mnemonic = UD_Iinvalid;
+ ud_set_pc(u, 0);
+#ifndef __UD_STANDALONE__
+ ud_set_input_file(u, stdin);
+#endif /* __UD_STANDALONE__ */
+}
+
+/* =============================================================================
+ * ud_disassemble() - disassembles one instruction and returns the number of
+ * bytes disassembled. A zero means end of disassembly.
+ * =============================================================================
+ */
+extern unsigned int
+ud_disassemble(struct ud* u)
+{
+ if (ud_input_end(u))
+ return 0;
+
+
+ u->insn_buffer[0] = u->insn_hexcode[0] = 0;
+
+
+ if (ud_decode(u) == 0)
+ return 0;
+ if (u->translator)
+ u->translator(u);
+ return ud_insn_len(u);
+}
+
+/* =============================================================================
+ * ud_set_mode() - Set Disassemly Mode.
+ * =============================================================================
+ */
+extern void
+ud_set_mode(struct ud* u, uint8_t m)
+{
+ switch(m) {
+ case 16:
+ case 32:
+ case 64: u->dis_mode = m ; return;
+ default: u->dis_mode = 16; return;
+ }
+}
+
+/* =============================================================================
+ * ud_set_vendor() - Set vendor.
+ * =============================================================================
+ */
+extern void
+ud_set_vendor(struct ud* u, unsigned v)
+{
+ switch(v) {
+ case UD_VENDOR_INTEL:
+ u->vendor = v;
+ break;
+ case UD_VENDOR_ANY:
+ u->vendor = v;
+ break;
+ default:
+ u->vendor = UD_VENDOR_AMD;
+ }
+}
+
+/* =============================================================================
+ * ud_set_pc() - Sets code origin.
+ * =============================================================================
+ */
+extern void
+ud_set_pc(struct ud* u, uint64_t o)
+{
+ u->pc = o;
+}
+
+/* =============================================================================
+ * ud_set_syntax() - Sets the output syntax.
+ * =============================================================================
+ */
+extern void
+ud_set_syntax(struct ud* u, void (*t)(struct ud*))
+{
+ u->translator = t;
+}
+
+/* =============================================================================
+ * ud_insn() - returns the disassembled instruction
+ * =============================================================================
+ */
+extern char*
+ud_insn_asm(struct ud* u)
+{
+ return u->insn_buffer;
+}
+
+/* =============================================================================
+ * ud_insn_offset() - Returns the offset.
+ * =============================================================================
+ */
+extern uint64_t
+ud_insn_off(struct ud* u)
+{
+ return u->insn_offset;
+}
+
+
+/* =============================================================================
+ * ud_insn_hex() - Returns hex form of disassembled instruction.
+ * =============================================================================
+ */
+extern char*
+ud_insn_hex(struct ud* u)
+{
+ return u->insn_hexcode;
+}
+
+/* =============================================================================
+ * ud_insn_ptr() - Returns code disassembled.
+ * =============================================================================
+ */
+extern uint8_t*
+ud_insn_ptr(struct ud* u)
+{
+ return u->inp_sess;
+}
+
+/* =============================================================================
+ * ud_insn_len() - Returns the count of bytes disassembled.
+ * =============================================================================
+ */
+extern unsigned int
+ud_insn_len(struct ud* u)
+{
+ return u->inp_ctr;
+}
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86.h b/src/3rdparty/masm/disassembler/udis86/udis86.h
new file mode 100644
index 0000000000..baaf495e04
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86.h
@@ -0,0 +1,33 @@
+/* udis86 - udis86.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UDIS86_H
+#define UDIS86_H
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_itab.h"
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_decode.c b/src/3rdparty/masm/disassembler/udis86/udis86_decode.c
new file mode 100644
index 0000000000..3d567b6df2
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_decode.c
@@ -0,0 +1,1141 @@
+/* udis86 - libudis86/decode.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_extern.h"
+#include "udis86_types.h"
+#include "udis86_input.h"
+#include "udis86_decode.h"
+#include <wtf/Assertions.h>
+
+#define dbg(x, n...)
+/* #define dbg printf */
+
+#ifndef __UD_STANDALONE__
+# include <string.h>
+#endif /* __UD_STANDALONE__ */
+
+/* The max number of prefixes to an instruction */
+#define MAX_PREFIXES 15
+
+/* instruction aliases and special cases */
+static struct ud_itab_entry s_ie__invalid =
+ { UD_Iinvalid, O_NONE, O_NONE, O_NONE, P_none };
+
+static int
+decode_ext(struct ud *u, uint16_t ptr);
+
+
+static inline int
+eff_opr_mode(int dis_mode, int rex_w, int pfx_opr)
+{
+ if (dis_mode == 64) {
+ return rex_w ? 64 : (pfx_opr ? 16 : 32);
+ } else if (dis_mode == 32) {
+ return pfx_opr ? 16 : 32;
+ } else {
+ ASSERT(dis_mode == 16);
+ return pfx_opr ? 32 : 16;
+ }
+}
+
+
+static inline int
+eff_adr_mode(int dis_mode, int pfx_adr)
+{
+ if (dis_mode == 64) {
+ return pfx_adr ? 32 : 64;
+ } else if (dis_mode == 32) {
+ return pfx_adr ? 16 : 32;
+ } else {
+ ASSERT(dis_mode == 16);
+ return pfx_adr ? 32 : 16;
+ }
+}
+
+
+/* Looks up mnemonic code in the mnemonic string table
+ * Returns NULL if the mnemonic code is invalid
+ */
+const char * ud_lookup_mnemonic( enum ud_mnemonic_code c )
+{
+ return ud_mnemonics_str[ c ];
+}
+
+
+/*
+ * decode_prefixes
+ *
+ * Extracts instruction prefixes.
+ */
+static int
+decode_prefixes(struct ud *u)
+{
+ unsigned int have_pfx = 1;
+ unsigned int i;
+ uint8_t curr;
+
+ /* if in error state, bail out */
+ if ( u->error )
+ return -1;
+
+ /* keep going as long as there are prefixes available */
+ for ( i = 0; have_pfx ; ++i ) {
+
+ /* Get next byte. */
+ ud_inp_next(u);
+ if ( u->error )
+ return -1;
+ curr = ud_inp_curr( u );
+
+ /* rex prefixes in 64bit mode */
+ if ( u->dis_mode == 64 && ( curr & 0xF0 ) == 0x40 ) {
+ u->pfx_rex = curr;
+ } else {
+ switch ( curr )
+ {
+ case 0x2E :
+ u->pfx_seg = UD_R_CS;
+ u->pfx_rex = 0;
+ break;
+ case 0x36 :
+ u->pfx_seg = UD_R_SS;
+ u->pfx_rex = 0;
+ break;
+ case 0x3E :
+ u->pfx_seg = UD_R_DS;
+ u->pfx_rex = 0;
+ break;
+ case 0x26 :
+ u->pfx_seg = UD_R_ES;
+ u->pfx_rex = 0;
+ break;
+ case 0x64 :
+ u->pfx_seg = UD_R_FS;
+ u->pfx_rex = 0;
+ break;
+ case 0x65 :
+ u->pfx_seg = UD_R_GS;
+ u->pfx_rex = 0;
+ break;
+ case 0x67 : /* adress-size override prefix */
+ u->pfx_adr = 0x67;
+ u->pfx_rex = 0;
+ break;
+ case 0xF0 :
+ u->pfx_lock = 0xF0;
+ u->pfx_rex = 0;
+ break;
+ case 0x66:
+ /* the 0x66 sse prefix is only effective if no other sse prefix
+ * has already been specified.
+ */
+ if ( !u->pfx_insn ) u->pfx_insn = 0x66;
+ u->pfx_opr = 0x66;
+ u->pfx_rex = 0;
+ break;
+ case 0xF2:
+ u->pfx_insn = 0xF2;
+ u->pfx_repne = 0xF2;
+ u->pfx_rex = 0;
+ break;
+ case 0xF3:
+ u->pfx_insn = 0xF3;
+ u->pfx_rep = 0xF3;
+ u->pfx_repe = 0xF3;
+ u->pfx_rex = 0;
+ break;
+ default :
+ /* No more prefixes */
+ have_pfx = 0;
+ break;
+ }
+ }
+
+ /* check if we reached max instruction length */
+ if ( i + 1 == MAX_INSN_LENGTH ) {
+ u->error = 1;
+ break;
+ }
+ }
+
+ /* return status */
+ if ( u->error )
+ return -1;
+
+ /* rewind back one byte in stream, since the above loop
+ * stops with a non-prefix byte.
+ */
+ ud_inp_back(u);
+ return 0;
+}
+
+
+static inline unsigned int modrm( struct ud * u )
+{
+ if ( !u->have_modrm ) {
+ u->modrm = ud_inp_next( u );
+ u->have_modrm = 1;
+ }
+ return u->modrm;
+}
+
+
+static unsigned int resolve_operand_size( const struct ud * u, unsigned int s )
+{
+ switch ( s )
+ {
+ case SZ_V:
+ return ( u->opr_mode );
+ case SZ_Z:
+ return ( u->opr_mode == 16 ) ? 16 : 32;
+ case SZ_P:
+ return ( u->opr_mode == 16 ) ? SZ_WP : SZ_DP;
+ case SZ_MDQ:
+ return ( u->opr_mode == 16 ) ? 32 : u->opr_mode;
+ case SZ_RDQ:
+ return ( u->dis_mode == 64 ) ? 64 : 32;
+ default:
+ return s;
+ }
+}
+
+
+static int resolve_mnemonic( struct ud* u )
+{
+ /* far/near flags */
+ u->br_far = 0;
+ u->br_near = 0;
+ /* readjust operand sizes for call/jmp instrcutions */
+ if ( u->mnemonic == UD_Icall || u->mnemonic == UD_Ijmp ) {
+ /* WP: 16:16 pointer */
+ if ( u->operand[ 0 ].size == SZ_WP ) {
+ u->operand[ 0 ].size = 16;
+ u->br_far = 1;
+ u->br_near= 0;
+ /* DP: 32:32 pointer */
+ } else if ( u->operand[ 0 ].size == SZ_DP ) {
+ u->operand[ 0 ].size = 32;
+ u->br_far = 1;
+ u->br_near= 0;
+ } else {
+ u->br_far = 0;
+ u->br_near= 1;
+ }
+ /* resolve 3dnow weirdness. */
+ } else if ( u->mnemonic == UD_I3dnow ) {
+ u->mnemonic = ud_itab[ u->le->table[ ud_inp_curr( u ) ] ].mnemonic;
+ }
+ /* SWAPGS is only valid in 64bits mode */
+ if ( u->mnemonic == UD_Iswapgs && u->dis_mode != 64 ) {
+ u->error = 1;
+ return -1;
+ }
+
+ if (u->mnemonic == UD_Ixchg) {
+ if ((u->operand[0].type == UD_OP_REG && u->operand[0].base == UD_R_AX &&
+ u->operand[1].type == UD_OP_REG && u->operand[1].base == UD_R_AX) ||
+ (u->operand[0].type == UD_OP_REG && u->operand[0].base == UD_R_EAX &&
+ u->operand[1].type == UD_OP_REG && u->operand[1].base == UD_R_EAX)) {
+ u->operand[0].type = UD_NONE;
+ u->operand[1].type = UD_NONE;
+ u->mnemonic = UD_Inop;
+ }
+ }
+
+ if (u->mnemonic == UD_Inop && u->pfx_rep) {
+ u->pfx_rep = 0;
+ u->mnemonic = UD_Ipause;
+ }
+ return 0;
+}
+
+
+/* -----------------------------------------------------------------------------
+ * decode_a()- Decodes operands of the type seg:offset
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_a(struct ud* u, struct ud_operand *op)
+{
+ if (u->opr_mode == 16) {
+ /* seg16:off16 */
+ op->type = UD_OP_PTR;
+ op->size = 32;
+ op->lval.ptr.off = ud_inp_uint16(u);
+ op->lval.ptr.seg = ud_inp_uint16(u);
+ } else {
+ /* seg16:off32 */
+ op->type = UD_OP_PTR;
+ op->size = 48;
+ op->lval.ptr.off = ud_inp_uint32(u);
+ op->lval.ptr.seg = ud_inp_uint16(u);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_gpr() - Returns decoded General Purpose Register
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+decode_gpr(register struct ud* u, unsigned int s, unsigned char rm)
+{
+ s = resolve_operand_size(u, s);
+
+ switch (s) {
+ case 64:
+ return UD_R_RAX + rm;
+ case SZ_DP:
+ case 32:
+ return UD_R_EAX + rm;
+ case SZ_WP:
+ case 16:
+ return UD_R_AX + rm;
+ case 8:
+ if (u->dis_mode == 64 && u->pfx_rex) {
+ if (rm >= 4)
+ return UD_R_SPL + (rm-4);
+ return UD_R_AL + rm;
+ } else return UD_R_AL + rm;
+ default:
+ return 0;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_gpr64() - 64bit General Purpose Register-Selection.
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_gpr64(struct ud* u, enum ud_operand_code gpr_op, enum ud_operand_size * size)
+{
+ if (gpr_op >= OP_rAXr8 && gpr_op <= OP_rDIr15)
+ gpr_op = (gpr_op - OP_rAXr8) | (REX_B(u->pfx_rex) << 3);
+ else gpr_op = (gpr_op - OP_rAX);
+
+ if (u->opr_mode == 16) {
+ *size = 16;
+ return gpr_op + UD_R_AX;
+ }
+ if (u->dis_mode == 32 ||
+ (u->opr_mode == 32 && ! (REX_W(u->pfx_rex) || u->default64))) {
+ *size = 32;
+ return gpr_op + UD_R_EAX;
+ }
+
+ *size = 64;
+ return gpr_op + UD_R_RAX;
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_gpr32 () - 32bit General Purpose Register-Selection.
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_gpr32(struct ud* u, enum ud_operand_code gpr_op)
+{
+ gpr_op = gpr_op - OP_eAX;
+
+ if (u->opr_mode == 16)
+ return gpr_op + UD_R_AX;
+
+ return gpr_op + UD_R_EAX;
+}
+
+/* -----------------------------------------------------------------------------
+ * resolve_reg() - Resolves the register type
+ * -----------------------------------------------------------------------------
+ */
+static enum ud_type
+resolve_reg(struct ud* u, unsigned int type, unsigned char i)
+{
+ switch (type) {
+ case T_MMX : return UD_R_MM0 + (i & 7);
+ case T_XMM : return UD_R_XMM0 + i;
+ case T_CRG : return UD_R_CR0 + i;
+ case T_DBG : return UD_R_DR0 + i;
+ case T_SEG : {
+ /*
+ * Only 6 segment registers, anything else is an error.
+ */
+ if ((i & 7) > 5) {
+ u->error = 1;
+ } else {
+ return UD_R_ES + (i & 7);
+ }
+ }
+ case T_NONE:
+ default: return UD_NONE;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_imm() - Decodes Immediate values.
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_imm(struct ud* u, unsigned int s, struct ud_operand *op)
+{
+ op->size = resolve_operand_size(u, s);
+ op->type = UD_OP_IMM;
+
+ switch (op->size) {
+ case 8: op->lval.sbyte = ud_inp_uint8(u); break;
+ case 16: op->lval.uword = ud_inp_uint16(u); break;
+ case 32: op->lval.udword = ud_inp_uint32(u); break;
+ case 64: op->lval.uqword = ud_inp_uint64(u); break;
+ default: return;
+ }
+}
+
+
+/*
+ * decode_modrm_reg
+ *
+ * Decodes reg field of mod/rm byte
+ *
+ */
+static void
+decode_modrm_reg(struct ud *u,
+ struct ud_operand *operand,
+ unsigned int type,
+ unsigned int size)
+{
+ uint8_t reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(modrm(u));
+ operand->type = UD_OP_REG;
+ operand->size = resolve_operand_size(u, size);
+
+ if (type == T_GPR) {
+ operand->base = decode_gpr(u, operand->size, reg);
+ } else {
+ operand->base = resolve_reg(u, type, reg);
+ }
+}
+
+
+/*
+ * decode_modrm_rm
+ *
+ * Decodes rm field of mod/rm byte
+ *
+ */
+static void
+decode_modrm_rm(struct ud *u,
+ struct ud_operand *op,
+ unsigned char type,
+ unsigned int size)
+
+{
+ unsigned char mod, rm, reg;
+
+ /* get mod, r/m and reg fields */
+ mod = MODRM_MOD(modrm(u));
+ rm = (REX_B(u->pfx_rex) << 3) | MODRM_RM(modrm(u));
+ reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(modrm(u));
+
+ op->size = resolve_operand_size(u, size);
+
+ /*
+ * If mod is 11b, then the modrm.rm specifies a register.
+ *
+ */
+ if (mod == 3) {
+ op->type = UD_OP_REG;
+ if (type == T_GPR) {
+ op->base = decode_gpr(u, op->size, rm);
+ } else {
+ op->base = resolve_reg(u, type, (REX_B(u->pfx_rex) << 3) | (rm & 7));
+ }
+ return;
+ }
+
+
+ /*
+ * !11 => Memory Address
+ */
+ op->type = UD_OP_MEM;
+
+ if (u->adr_mode == 64) {
+ op->base = UD_R_RAX + rm;
+ if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 32;
+ } else if (mod == 0 && (rm & 7) == 5) {
+ op->base = UD_R_RIP;
+ op->offset = 32;
+ } else {
+ op->offset = 0;
+ }
+ /*
+ * Scale-Index-Base (SIB)
+ */
+ if ((rm & 7) == 4) {
+ ud_inp_next(u);
+
+ op->scale = (1 << SIB_S(ud_inp_curr(u))) & ~1;
+ op->index = UD_R_RAX + (SIB_I(ud_inp_curr(u)) | (REX_X(u->pfx_rex) << 3));
+ op->base = UD_R_RAX + (SIB_B(ud_inp_curr(u)) | (REX_B(u->pfx_rex) << 3));
+
+ /* special conditions for base reference */
+ if (op->index == UD_R_RSP) {
+ op->index = UD_NONE;
+ op->scale = UD_NONE;
+ }
+
+ if (op->base == UD_R_RBP || op->base == UD_R_R13) {
+ if (mod == 0) {
+ op->base = UD_NONE;
+ }
+ if (mod == 1) {
+ op->offset = 8;
+ } else {
+ op->offset = 32;
+ }
+ }
+ }
+ } else if (u->adr_mode == 32) {
+ op->base = UD_R_EAX + rm;
+ if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 32;
+ } else if (mod == 0 && rm == 5) {
+ op->base = UD_NONE;
+ op->offset = 32;
+ } else {
+ op->offset = 0;
+ }
+
+ /* Scale-Index-Base (SIB) */
+ if ((rm & 7) == 4) {
+ ud_inp_next(u);
+
+ op->scale = (1 << SIB_S(ud_inp_curr(u))) & ~1;
+ op->index = UD_R_EAX + (SIB_I(ud_inp_curr(u)) | (REX_X(u->pfx_rex) << 3));
+ op->base = UD_R_EAX + (SIB_B(ud_inp_curr(u)) | (REX_B(u->pfx_rex) << 3));
+
+ if (op->index == UD_R_ESP) {
+ op->index = UD_NONE;
+ op->scale = UD_NONE;
+ }
+
+ /* special condition for base reference */
+ if (op->base == UD_R_EBP) {
+ if (mod == 0) {
+ op->base = UD_NONE;
+ }
+ if (mod == 1) {
+ op->offset = 8;
+ } else {
+ op->offset = 32;
+ }
+ }
+ }
+ } else {
+ const unsigned int bases[] = { UD_R_BX, UD_R_BX, UD_R_BP, UD_R_BP,
+ UD_R_SI, UD_R_DI, UD_R_BP, UD_R_BX };
+ const unsigned int indices[] = { UD_R_SI, UD_R_DI, UD_R_SI, UD_R_DI,
+ UD_NONE, UD_NONE, UD_NONE, UD_NONE };
+ op->base = bases[rm & 7];
+ op->index = indices[rm & 7];
+ if (mod == 0 && rm == 6) {
+ op->offset= 16;
+ op->base = UD_NONE;
+ } else if (mod == 1) {
+ op->offset = 8;
+ } else if (mod == 2) {
+ op->offset = 16;
+ }
+ }
+
+ /*
+ * extract offset, if any
+ */
+ switch (op->offset) {
+ case 8 : op->lval.ubyte = ud_inp_uint8(u); break;
+ case 16: op->lval.uword = ud_inp_uint16(u); break;
+ case 32: op->lval.udword = ud_inp_uint32(u); break;
+ case 64: op->lval.uqword = ud_inp_uint64(u); break;
+ default: break;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_o() - Decodes offset
+ * -----------------------------------------------------------------------------
+ */
+static void
+decode_o(struct ud* u, unsigned int s, struct ud_operand *op)
+{
+ switch (u->adr_mode) {
+ case 64:
+ op->offset = 64;
+ op->lval.uqword = ud_inp_uint64(u);
+ break;
+ case 32:
+ op->offset = 32;
+ op->lval.udword = ud_inp_uint32(u);
+ break;
+ case 16:
+ op->offset = 16;
+ op->lval.uword = ud_inp_uint16(u);
+ break;
+ default:
+ return;
+ }
+ op->type = UD_OP_MEM;
+ op->size = resolve_operand_size(u, s);
+}
+
+/* -----------------------------------------------------------------------------
+ * decode_operands() - Disassembles Operands.
+ * -----------------------------------------------------------------------------
+ */
+static int
+decode_operand(struct ud *u,
+ struct ud_operand *operand,
+ enum ud_operand_code type,
+ unsigned int size)
+{
+ switch (type) {
+ case OP_A :
+ decode_a(u, operand);
+ break;
+ case OP_MR:
+ if (MODRM_MOD(modrm(u)) == 3) {
+ decode_modrm_rm(u, operand, T_GPR,
+ size == SZ_DY ? SZ_MDQ : SZ_V);
+ } else if (size == SZ_WV) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_W);
+ } else if (size == SZ_BV) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_B);
+ } else if (size == SZ_DY) {
+ decode_modrm_rm( u, operand, T_GPR, SZ_D);
+ } else {
+ ASSERT(!"unexpected size");
+ }
+ break;
+ case OP_M:
+ if (MODRM_MOD(modrm(u)) == 3) {
+ u->error = 1;
+ }
+ /* intended fall through */
+ case OP_E:
+ decode_modrm_rm(u, operand, T_GPR, size);
+ break;
+ break;
+ case OP_G:
+ decode_modrm_reg(u, operand, T_GPR, size);
+ break;
+ case OP_I:
+ decode_imm(u, size, operand);
+ break;
+ case OP_I1:
+ operand->type = UD_OP_CONST;
+ operand->lval.udword = 1;
+ break;
+ case OP_PR:
+ if (MODRM_MOD(modrm(u)) != 3) {
+ u->error = 1;
+ }
+ decode_modrm_rm(u, operand, T_MMX, size);
+ break;
+ case OP_P:
+ decode_modrm_reg(u, operand, T_MMX, size);
+ break;
+ case OP_VR:
+ if (MODRM_MOD(modrm(u)) != 3) {
+ u->error = 1;
+ }
+ /* intended fall through */
+ case OP_W:
+ decode_modrm_rm(u, operand, T_XMM, size);
+ break;
+ case OP_V:
+ decode_modrm_reg(u, operand, T_XMM, size);
+ break;
+ case OP_S:
+ decode_modrm_reg(u, operand, T_SEG, size);
+ break;
+ case OP_AL:
+ case OP_CL:
+ case OP_DL:
+ case OP_BL:
+ case OP_AH:
+ case OP_CH:
+ case OP_DH:
+ case OP_BH:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_AL + (type - OP_AL);
+ operand->size = 8;
+ break;
+ case OP_DX:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_DX;
+ operand->size = 16;
+ break;
+ case OP_O:
+ decode_o(u, size, operand);
+ break;
+ case OP_rAXr8:
+ case OP_rCXr9:
+ case OP_rDXr10:
+ case OP_rBXr11:
+ case OP_rSPr12:
+ case OP_rBPr13:
+ case OP_rSIr14:
+ case OP_rDIr15:
+ case OP_rAX:
+ case OP_rCX:
+ case OP_rDX:
+ case OP_rBX:
+ case OP_rSP:
+ case OP_rBP:
+ case OP_rSI:
+ case OP_rDI:
+ operand->type = UD_OP_REG;
+ operand->base = resolve_gpr64(u, type, &operand->size);
+ break;
+ case OP_ALr8b:
+ case OP_CLr9b:
+ case OP_DLr10b:
+ case OP_BLr11b:
+ case OP_AHr12b:
+ case OP_CHr13b:
+ case OP_DHr14b:
+ case OP_BHr15b: {
+ ud_type_t gpr = (type - OP_ALr8b) + UD_R_AL
+ + (REX_B(u->pfx_rex) << 3);
+ if (UD_R_AH <= gpr && u->pfx_rex) {
+ gpr = gpr + 4;
+ }
+ operand->type = UD_OP_REG;
+ operand->base = gpr;
+ break;
+ }
+ case OP_eAX:
+ case OP_eCX:
+ case OP_eDX:
+ case OP_eBX:
+ case OP_eSP:
+ case OP_eBP:
+ case OP_eSI:
+ case OP_eDI:
+ operand->type = UD_OP_REG;
+ operand->base = resolve_gpr32(u, type);
+ operand->size = u->opr_mode == 16 ? 16 : 32;
+ break;
+ case OP_ES:
+ case OP_CS:
+ case OP_DS:
+ case OP_SS:
+ case OP_FS:
+ case OP_GS:
+ /* in 64bits mode, only fs and gs are allowed */
+ if (u->dis_mode == 64) {
+ if (type != OP_FS && type != OP_GS) {
+ u->error= 1;
+ }
+ }
+ operand->type = UD_OP_REG;
+ operand->base = (type - OP_ES) + UD_R_ES;
+ operand->size = 16;
+ break;
+ case OP_J :
+ decode_imm(u, size, operand);
+ operand->type = UD_OP_JIMM;
+ break ;
+ case OP_Q:
+ decode_modrm_rm(u, operand, T_MMX, size);
+ break;
+ case OP_R :
+ decode_modrm_rm(u, operand, T_GPR, size);
+ break;
+ case OP_C:
+ decode_modrm_reg(u, operand, T_CRG, size);
+ break;
+ case OP_D:
+ decode_modrm_reg(u, operand, T_DBG, size);
+ break;
+ case OP_I3 :
+ operand->type = UD_OP_CONST;
+ operand->lval.sbyte = 3;
+ break;
+ case OP_ST0:
+ case OP_ST1:
+ case OP_ST2:
+ case OP_ST3:
+ case OP_ST4:
+ case OP_ST5:
+ case OP_ST6:
+ case OP_ST7:
+ operand->type = UD_OP_REG;
+ operand->base = (type - OP_ST0) + UD_R_ST0;
+ operand->size = 0;
+ break;
+ case OP_AX:
+ operand->type = UD_OP_REG;
+ operand->base = UD_R_AX;
+ operand->size = 16;
+ break;
+ default :
+ operand->type = UD_NONE;
+ break;
+ }
+ return 0;
+}
+
+
+/*
+ * decode_operands
+ *
+ * Disassemble upto 3 operands of the current instruction being
+ * disassembled. By the end of the function, the operand fields
+ * of the ud structure will have been filled.
+ */
+static int
+decode_operands(struct ud* u)
+{
+ decode_operand(u, &u->operand[0],
+ u->itab_entry->operand1.type,
+ u->itab_entry->operand1.size);
+ decode_operand(u, &u->operand[1],
+ u->itab_entry->operand2.type,
+ u->itab_entry->operand2.size);
+ decode_operand(u, &u->operand[2],
+ u->itab_entry->operand3.type,
+ u->itab_entry->operand3.size);
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * clear_insn() - clear instruction structure
+ * -----------------------------------------------------------------------------
+ */
+static void
+clear_insn(register struct ud* u)
+{
+ u->error = 0;
+ u->pfx_seg = 0;
+ u->pfx_opr = 0;
+ u->pfx_adr = 0;
+ u->pfx_lock = 0;
+ u->pfx_repne = 0;
+ u->pfx_rep = 0;
+ u->pfx_repe = 0;
+ u->pfx_rex = 0;
+ u->pfx_insn = 0;
+ u->mnemonic = UD_Inone;
+ u->itab_entry = NULL;
+ u->have_modrm = 0;
+
+ memset( &u->operand[ 0 ], 0, sizeof( struct ud_operand ) );
+ memset( &u->operand[ 1 ], 0, sizeof( struct ud_operand ) );
+ memset( &u->operand[ 2 ], 0, sizeof( struct ud_operand ) );
+}
+
+static int
+resolve_mode( struct ud* u )
+{
+ /* if in error state, bail out */
+ if ( u->error ) return -1;
+
+ /* propagate prefix effects */
+ if ( u->dis_mode == 64 ) { /* set 64bit-mode flags */
+
+ /* Check validity of instruction m64 */
+ if ( P_INV64( u->itab_entry->prefix ) ) {
+ u->error = 1;
+ return -1;
+ }
+
+ /* effective rex prefix is the effective mask for the
+ * instruction hard-coded in the opcode map.
+ */
+ u->pfx_rex = ( u->pfx_rex & 0x40 ) |
+ ( u->pfx_rex & REX_PFX_MASK( u->itab_entry->prefix ) );
+
+ /* whether this instruction has a default operand size of
+ * 64bit, also hardcoded into the opcode map.
+ */
+ u->default64 = P_DEF64( u->itab_entry->prefix );
+ /* calculate effective operand size */
+ if ( REX_W( u->pfx_rex ) ) {
+ u->opr_mode = 64;
+ } else if ( u->pfx_opr ) {
+ u->opr_mode = 16;
+ } else {
+ /* unless the default opr size of instruction is 64,
+ * the effective operand size in the absence of rex.w
+ * prefix is 32.
+ */
+ u->opr_mode = ( u->default64 ) ? 64 : 32;
+ }
+
+ /* calculate effective address size */
+ u->adr_mode = (u->pfx_adr) ? 32 : 64;
+ } else if ( u->dis_mode == 32 ) { /* set 32bit-mode flags */
+ u->opr_mode = ( u->pfx_opr ) ? 16 : 32;
+ u->adr_mode = ( u->pfx_adr ) ? 16 : 32;
+ } else if ( u->dis_mode == 16 ) { /* set 16bit-mode flags */
+ u->opr_mode = ( u->pfx_opr ) ? 32 : 16;
+ u->adr_mode = ( u->pfx_adr ) ? 32 : 16;
+ }
+
+ /* These flags determine which operand to apply the operand size
+ * cast to.
+ */
+ u->c1 = ( P_C1( u->itab_entry->prefix ) ) ? 1 : 0;
+ u->c2 = ( P_C2( u->itab_entry->prefix ) ) ? 1 : 0;
+ u->c3 = ( P_C3( u->itab_entry->prefix ) ) ? 1 : 0;
+
+ /* set flags for implicit addressing */
+ u->implicit_addr = P_IMPADDR( u->itab_entry->prefix );
+
+ return 0;
+}
+
+static int gen_hex( struct ud *u )
+{
+ unsigned int i;
+ unsigned char *src_ptr = ud_inp_sess( u );
+ char* src_hex;
+
+ /* bail out if in error stat. */
+ if ( u->error ) return -1;
+ /* output buffer pointe */
+ src_hex = ( char* ) u->insn_hexcode;
+ /* for each byte used to decode instruction */
+ for ( i = 0; i < u->inp_ctr; ++i, ++src_ptr) {
+ sprintf( src_hex, "%02x", *src_ptr & 0xFF );
+ src_hex += 2;
+ }
+ return 0;
+}
+
+
+static inline int
+decode_insn(struct ud *u, uint16_t ptr)
+{
+ ASSERT((ptr & 0x8000) == 0);
+ u->itab_entry = &ud_itab[ ptr ];
+ u->mnemonic = u->itab_entry->mnemonic;
+ return (resolve_mode(u) == 0 &&
+ decode_operands(u) == 0 &&
+ resolve_mnemonic(u) == 0) ? 0 : -1;
+}
+
+
+/*
+ * decode_3dnow()
+ *
+ * Decoding 3dnow is a little tricky because of its strange opcode
+ * structure. The final opcode disambiguation depends on the last
+ * byte that comes after the operands have been decoded. Fortunately,
+ * all 3dnow instructions have the same set of operand types. So we
+ * go ahead and decode the instruction by picking an arbitrarily chosen
+ * valid entry in the table, decode the operands, and read the final
+ * byte to resolve the menmonic.
+ */
+static inline int
+decode_3dnow(struct ud* u)
+{
+ uint16_t ptr;
+ ASSERT(u->le->type == UD_TAB__OPC_3DNOW);
+ ASSERT(u->le->table[0xc] != 0);
+ decode_insn(u, u->le->table[0xc]);
+ ud_inp_next(u);
+ if (u->error) {
+ return -1;
+ }
+ ptr = u->le->table[ud_inp_curr(u)];
+ ASSERT((ptr & 0x8000) == 0);
+ u->mnemonic = ud_itab[ptr].mnemonic;
+ return 0;
+}
+
+
+static int
+decode_ssepfx(struct ud *u)
+{
+ uint8_t idx = ((u->pfx_insn & 0xf) + 1) / 2;
+ if (u->le->table[idx] == 0) {
+ idx = 0;
+ }
+ if (idx && u->le->table[idx] != 0) {
+ /*
+ * "Consume" the prefix as a part of the opcode, so it is no
+ * longer exported as an instruction prefix.
+ */
+ switch (u->pfx_insn) {
+ case 0xf2:
+ u->pfx_repne = 0;
+ break;
+ case 0xf3:
+ u->pfx_rep = 0;
+ u->pfx_repe = 0;
+ break;
+ case 0x66:
+ u->pfx_opr = 0;
+ break;
+ }
+ }
+ return decode_ext(u, u->le->table[idx]);
+}
+
+
+/*
+ * decode_ext()
+ *
+ * Decode opcode extensions (if any)
+ */
+static int
+decode_ext(struct ud *u, uint16_t ptr)
+{
+ uint8_t idx = 0;
+ if ((ptr & 0x8000) == 0) {
+ return decode_insn(u, ptr);
+ }
+ u->le = &ud_lookup_table_list[(~0x8000 & ptr)];
+ if (u->le->type == UD_TAB__OPC_3DNOW) {
+ return decode_3dnow(u);
+ }
+
+ switch (u->le->type) {
+ case UD_TAB__OPC_MOD:
+ /* !11 = 0, 11 = 1 */
+ idx = (MODRM_MOD(modrm(u)) + 1) / 4;
+ break;
+ /* disassembly mode/operand size/address size based tables.
+ * 16 = 0,, 32 = 1, 64 = 2
+ */
+ case UD_TAB__OPC_MODE:
+ idx = u->dis_mode / 32;
+ break;
+ case UD_TAB__OPC_OSIZE:
+ idx = eff_opr_mode(u->dis_mode, REX_W(u->pfx_rex), u->pfx_opr) / 32;
+ break;
+ case UD_TAB__OPC_ASIZE:
+ idx = eff_adr_mode(u->dis_mode, u->pfx_adr) / 32;
+ break;
+ case UD_TAB__OPC_X87:
+ idx = modrm(u) - 0xC0;
+ break;
+ case UD_TAB__OPC_VENDOR:
+ if (u->vendor == UD_VENDOR_ANY) {
+ /* choose a valid entry */
+ idx = (u->le->table[idx] != 0) ? 0 : 1;
+ } else if (u->vendor == UD_VENDOR_AMD) {
+ idx = 0;
+ } else {
+ idx = 1;
+ }
+ break;
+ case UD_TAB__OPC_RM:
+ idx = MODRM_RM(modrm(u));
+ break;
+ case UD_TAB__OPC_REG:
+ idx = MODRM_REG(modrm(u));
+ break;
+ case UD_TAB__OPC_SSE:
+ return decode_ssepfx(u);
+ default:
+ ASSERT(!"not reached");
+ break;
+ }
+
+ return decode_ext(u, u->le->table[idx]);
+}
+
+
+static inline int
+decode_opcode(struct ud *u)
+{
+ uint16_t ptr;
+ ASSERT(u->le->type == UD_TAB__OPC_TABLE);
+ ud_inp_next(u);
+ if (u->error) {
+ return -1;
+ }
+ ptr = u->le->table[ud_inp_curr(u)];
+ if (ptr & 0x8000) {
+ u->le = &ud_lookup_table_list[ptr & ~0x8000];
+ if (u->le->type == UD_TAB__OPC_TABLE) {
+ return decode_opcode(u);
+ }
+ }
+ return decode_ext(u, ptr);
+}
+
+
+/* =============================================================================
+ * ud_decode() - Instruction decoder. Returns the number of bytes decoded.
+ * =============================================================================
+ */
+unsigned int
+ud_decode(struct ud *u)
+{
+ ud_inp_start(u);
+ clear_insn(u);
+ u->le = &ud_lookup_table_list[0];
+ u->error = decode_prefixes(u) == -1 ||
+ decode_opcode(u) == -1 ||
+ u->error;
+ /* Handle decode error. */
+ if (u->error) {
+ /* clear out the decode data. */
+ clear_insn(u);
+ /* mark the sequence of bytes as invalid. */
+ u->itab_entry = & s_ie__invalid;
+ u->mnemonic = u->itab_entry->mnemonic;
+ }
+
+ /* maybe this stray segment override byte
+ * should be spewed out?
+ */
+ if ( !P_SEG( u->itab_entry->prefix ) &&
+ u->operand[0].type != UD_OP_MEM &&
+ u->operand[1].type != UD_OP_MEM )
+ u->pfx_seg = 0;
+
+ u->insn_offset = u->pc; /* set offset of instruction */
+ u->insn_fill = 0; /* set translation buffer index to 0 */
+ u->pc += u->inp_ctr; /* move program counter by bytes decoded */
+ gen_hex( u ); /* generate hex code */
+
+ /* return number of bytes disassembled. */
+ return u->inp_ctr;
+}
+
+/*
+vim: set ts=2 sw=2 expandtab
+*/
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_decode.h b/src/3rdparty/masm/disassembler/udis86/udis86_decode.h
new file mode 100644
index 0000000000..940ed5ad6f
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_decode.h
@@ -0,0 +1,258 @@
+/* udis86 - libudis86/decode.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_DECODE_H
+#define UD_DECODE_H
+
+#include "udis86_types.h"
+#include "udis86_itab.h"
+
+#define MAX_INSN_LENGTH 15
+
+/* register classes */
+#define T_NONE 0
+#define T_GPR 1
+#define T_MMX 2
+#define T_CRG 3
+#define T_DBG 4
+#define T_SEG 5
+#define T_XMM 6
+
+/* itab prefix bits */
+#define P_none ( 0 )
+#define P_cast ( 1 << 0 )
+#define P_CAST(n) ( ( n >> 0 ) & 1 )
+#define P_c1 ( 1 << 0 )
+#define P_C1(n) ( ( n >> 0 ) & 1 )
+#define P_rexb ( 1 << 1 )
+#define P_REXB(n) ( ( n >> 1 ) & 1 )
+#define P_depM ( 1 << 2 )
+#define P_DEPM(n) ( ( n >> 2 ) & 1 )
+#define P_c3 ( 1 << 3 )
+#define P_C3(n) ( ( n >> 3 ) & 1 )
+#define P_inv64 ( 1 << 4 )
+#define P_INV64(n) ( ( n >> 4 ) & 1 )
+#define P_rexw ( 1 << 5 )
+#define P_REXW(n) ( ( n >> 5 ) & 1 )
+#define P_c2 ( 1 << 6 )
+#define P_C2(n) ( ( n >> 6 ) & 1 )
+#define P_def64 ( 1 << 7 )
+#define P_DEF64(n) ( ( n >> 7 ) & 1 )
+#define P_rexr ( 1 << 8 )
+#define P_REXR(n) ( ( n >> 8 ) & 1 )
+#define P_oso ( 1 << 9 )
+#define P_OSO(n) ( ( n >> 9 ) & 1 )
+#define P_aso ( 1 << 10 )
+#define P_ASO(n) ( ( n >> 10 ) & 1 )
+#define P_rexx ( 1 << 11 )
+#define P_REXX(n) ( ( n >> 11 ) & 1 )
+#define P_ImpAddr ( 1 << 12 )
+#define P_IMPADDR(n) ( ( n >> 12 ) & 1 )
+#define P_seg ( 1 << 13 )
+#define P_SEG(n) ( ( n >> 13 ) & 1 )
+#define P_sext ( 1 << 14 )
+#define P_SEXT(n) ( ( n >> 14 ) & 1 )
+
+/* rex prefix bits */
+#define REX_W(r) ( ( 0xF & ( r ) ) >> 3 )
+#define REX_R(r) ( ( 0x7 & ( r ) ) >> 2 )
+#define REX_X(r) ( ( 0x3 & ( r ) ) >> 1 )
+#define REX_B(r) ( ( 0x1 & ( r ) ) >> 0 )
+#define REX_PFX_MASK(n) ( ( P_REXW(n) << 3 ) | \
+ ( P_REXR(n) << 2 ) | \
+ ( P_REXX(n) << 1 ) | \
+ ( P_REXB(n) << 0 ) )
+
+/* scable-index-base bits */
+#define SIB_S(b) ( ( b ) >> 6 )
+#define SIB_I(b) ( ( ( b ) >> 3 ) & 7 )
+#define SIB_B(b) ( ( b ) & 7 )
+
+/* modrm bits */
+#define MODRM_REG(b) ( ( ( b ) >> 3 ) & 7 )
+#define MODRM_NNN(b) ( ( ( b ) >> 3 ) & 7 )
+#define MODRM_MOD(b) ( ( ( b ) >> 6 ) & 3 )
+#define MODRM_RM(b) ( ( b ) & 7 )
+
+/* operand type constants -- order is important! */
+
+enum ud_operand_code {
+ OP_NONE,
+
+ OP_A, OP_E, OP_M, OP_G,
+ OP_I,
+
+ OP_AL, OP_CL, OP_DL, OP_BL,
+ OP_AH, OP_CH, OP_DH, OP_BH,
+
+ OP_ALr8b, OP_CLr9b, OP_DLr10b, OP_BLr11b,
+ OP_AHr12b, OP_CHr13b, OP_DHr14b, OP_BHr15b,
+
+ OP_AX, OP_CX, OP_DX, OP_BX,
+ OP_SI, OP_DI, OP_SP, OP_BP,
+
+ OP_rAX, OP_rCX, OP_rDX, OP_rBX,
+ OP_rSP, OP_rBP, OP_rSI, OP_rDI,
+
+ OP_rAXr8, OP_rCXr9, OP_rDXr10, OP_rBXr11,
+ OP_rSPr12, OP_rBPr13, OP_rSIr14, OP_rDIr15,
+
+ OP_eAX, OP_eCX, OP_eDX, OP_eBX,
+ OP_eSP, OP_eBP, OP_eSI, OP_eDI,
+
+ OP_ES, OP_CS, OP_SS, OP_DS,
+ OP_FS, OP_GS,
+
+ OP_ST0, OP_ST1, OP_ST2, OP_ST3,
+ OP_ST4, OP_ST5, OP_ST6, OP_ST7,
+
+ OP_J, OP_S, OP_O,
+ OP_I1, OP_I3,
+
+ OP_V, OP_W, OP_Q, OP_P,
+
+ OP_R, OP_C, OP_D, OP_VR, OP_PR,
+
+ OP_MR
+} UD_ATTR_PACKED;
+
+
+/* operand size constants */
+
+enum ud_operand_size {
+ SZ_NA = 0,
+ SZ_Z = 1,
+ SZ_V = 2,
+ SZ_P = 3,
+ SZ_WP = 4,
+ SZ_DP = 5,
+ SZ_MDQ = 6,
+ SZ_RDQ = 7,
+
+ /* the following values are used as is,
+ * and thus hard-coded. changing them
+ * will break internals
+ */
+ SZ_B = 8,
+ SZ_W = 16,
+ SZ_D = 32,
+ SZ_Q = 64,
+ SZ_T = 80,
+ SZ_O = 128,
+
+ SZ_WV = 17,
+ SZ_BV = 18,
+ SZ_DY = 19
+
+} UD_ATTR_PACKED;
+
+
+/* A single operand of an entry in the instruction table.
+ * (internal use only)
+ */
+struct ud_itab_entry_operand
+{
+ enum ud_operand_code type;
+ enum ud_operand_size size;
+};
+
+
+/* A single entry in an instruction table.
+ *(internal use only)
+ */
+struct ud_itab_entry
+{
+ enum ud_mnemonic_code mnemonic;
+ struct ud_itab_entry_operand operand1;
+ struct ud_itab_entry_operand operand2;
+ struct ud_itab_entry_operand operand3;
+ uint32_t prefix;
+};
+
+struct ud_lookup_table_list_entry {
+ const uint16_t *table;
+ enum ud_table_type type;
+ const char *meta;
+};
+
+
+static inline unsigned int sse_pfx_idx( const unsigned int pfx )
+{
+ /* 00 = 0
+ * f2 = 1
+ * f3 = 2
+ * 66 = 3
+ */
+ return ( ( pfx & 0xf ) + 1 ) / 2;
+}
+
+static inline unsigned int mode_idx( const unsigned int mode )
+{
+ /* 16 = 0
+ * 32 = 1
+ * 64 = 2
+ */
+ return ( mode / 32 );
+}
+
+static inline unsigned int modrm_mod_idx( const unsigned int mod )
+{
+ /* !11 = 0
+ * 11 = 1
+ */
+ return ( mod + 1 ) / 4;
+}
+
+static inline unsigned int vendor_idx( const unsigned int vendor )
+{
+ switch ( vendor ) {
+ case UD_VENDOR_AMD: return 0;
+ case UD_VENDOR_INTEL: return 1;
+ case UD_VENDOR_ANY: return 2;
+ default: return 2;
+ }
+}
+
+static inline unsigned int is_group_ptr( uint16_t ptr )
+{
+ return ( 0x8000 & ptr );
+}
+
+static inline unsigned int group_idx( uint16_t ptr )
+{
+ return ( ~0x8000 & ptr );
+}
+
+
+extern struct ud_itab_entry ud_itab[];
+extern struct ud_lookup_table_list_entry ud_lookup_table_list[];
+
+#endif /* UD_DECODE_H */
+
+/* vim:cindent
+ * vim:expandtab
+ * vim:ts=4
+ * vim:sw=4
+ */
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_extern.h b/src/3rdparty/masm/disassembler/udis86/udis86_extern.h
new file mode 100644
index 0000000000..8e87721e8c
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_extern.h
@@ -0,0 +1,88 @@
+/* udis86 - libudis86/extern.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_EXTERN_H
+#define UD_EXTERN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "udis86_types.h"
+
+/* ============================= PUBLIC API ================================= */
+
+extern void ud_init(struct ud*);
+
+extern void ud_set_mode(struct ud*, uint8_t);
+
+extern void ud_set_pc(struct ud*, uint64_t);
+
+extern void ud_set_input_hook(struct ud*, int (*)(struct ud*));
+
+extern void ud_set_input_buffer(struct ud*, uint8_t*, size_t);
+
+#ifndef __UD_STANDALONE__
+extern void ud_set_input_file(struct ud*, FILE*);
+#endif /* __UD_STANDALONE__ */
+
+extern void ud_set_vendor(struct ud*, unsigned);
+
+extern void ud_set_syntax(struct ud*, void (*)(struct ud*));
+
+extern void ud_input_skip(struct ud*, size_t);
+
+extern int ud_input_end(struct ud*);
+
+extern unsigned int ud_decode(struct ud*);
+
+extern unsigned int ud_disassemble(struct ud*);
+
+extern void ud_translate_intel(struct ud*);
+
+extern void ud_translate_att(struct ud*);
+
+extern char* ud_insn_asm(struct ud* u);
+
+extern uint8_t* ud_insn_ptr(struct ud* u);
+
+extern uint64_t ud_insn_off(struct ud*);
+
+extern char* ud_insn_hex(struct ud*);
+
+extern unsigned int ud_insn_len(struct ud* u);
+
+extern const char* ud_lookup_mnemonic(enum ud_mnemonic_code c);
+
+extern void ud_set_user_opaque_data(struct ud*, void*);
+
+extern void *ud_get_user_opaque_data(struct ud*);
+
+/* ========================================================================== */
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_input.c b/src/3rdparty/masm/disassembler/udis86/udis86_input.c
new file mode 100644
index 0000000000..4dbe328766
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_input.c
@@ -0,0 +1,262 @@
+/* udis86 - libudis86/input.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_extern.h"
+#include "udis86_types.h"
+#include "udis86_input.h"
+
+/* -----------------------------------------------------------------------------
+ * inp_buff_hook() - Hook for buffered inputs.
+ * -----------------------------------------------------------------------------
+ */
+static int
+inp_buff_hook(struct ud* u)
+{
+ if (u->inp_buff < u->inp_buff_end)
+ return *u->inp_buff++;
+ else return -1;
+}
+
+#ifndef __UD_STANDALONE__
+/* -----------------------------------------------------------------------------
+ * inp_file_hook() - Hook for FILE inputs.
+ * -----------------------------------------------------------------------------
+ */
+static int
+inp_file_hook(struct ud* u)
+{
+ return fgetc(u->inp_file);
+}
+#endif /* __UD_STANDALONE__*/
+
+/* =============================================================================
+ * ud_inp_set_hook() - Sets input hook.
+ * =============================================================================
+ */
+extern void
+ud_set_input_hook(register struct ud* u, int (*hook)(struct ud*))
+{
+ u->inp_hook = hook;
+ ud_inp_init(u);
+}
+
+extern void
+ud_set_user_opaque_data( struct ud * u, void * opaque )
+{
+ u->user_opaque_data = opaque;
+}
+
+extern void *
+ud_get_user_opaque_data( struct ud * u )
+{
+ return u->user_opaque_data;
+}
+
+/* =============================================================================
+ * ud_inp_set_buffer() - Set buffer as input.
+ * =============================================================================
+ */
+extern void
+ud_set_input_buffer(register struct ud* u, uint8_t* buf, size_t len)
+{
+ u->inp_hook = inp_buff_hook;
+ u->inp_buff = buf;
+ u->inp_buff_end = buf + len;
+ ud_inp_init(u);
+}
+
+#ifndef __UD_STANDALONE__
+/* =============================================================================
+ * ud_input_set_file() - Set buffer as input.
+ * =============================================================================
+ */
+extern void
+ud_set_input_file(register struct ud* u, FILE* f)
+{
+ u->inp_hook = inp_file_hook;
+ u->inp_file = f;
+ ud_inp_init(u);
+}
+#endif /* __UD_STANDALONE__ */
+
+/* =============================================================================
+ * ud_input_skip() - Skip n input bytes.
+ * =============================================================================
+ */
+extern void
+ud_input_skip(struct ud* u, size_t n)
+{
+ while (n--) {
+ u->inp_hook(u);
+ }
+}
+
+/* =============================================================================
+ * ud_input_end() - Test for end of input.
+ * =============================================================================
+ */
+extern int
+ud_input_end(struct ud* u)
+{
+ return (u->inp_curr == u->inp_fill) && u->inp_end;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_next() - Loads and returns the next byte from input.
+ *
+ * inp_curr and inp_fill are pointers to the cache. The program is written based
+ * on the property that they are 8-bits in size, and will eventually wrap around
+ * forming a circular buffer. So, the size of the cache is 256 in size, kind of
+ * unnecessary yet optimized.
+ *
+ * A buffer inp_sess stores the bytes disassembled for a single session.
+ * -----------------------------------------------------------------------------
+ */
+extern uint8_t ud_inp_next(struct ud* u)
+{
+ int c = -1;
+ /* if current pointer is not upto the fill point in the
+ * input cache.
+ */
+ if ( u->inp_curr != u->inp_fill ) {
+ c = u->inp_cache[ ++u->inp_curr ];
+ /* if !end-of-input, call the input hook and get a byte */
+ } else if ( u->inp_end || ( c = u->inp_hook( u ) ) == -1 ) {
+ /* end-of-input, mark it as an error, since the decoder,
+ * expected a byte more.
+ */
+ u->error = 1;
+ /* flag end of input */
+ u->inp_end = 1;
+ return 0;
+ } else {
+ /* increment pointers, we have a new byte. */
+ u->inp_curr = ++u->inp_fill;
+ /* add the byte to the cache */
+ u->inp_cache[ u->inp_fill ] = c;
+ }
+ /* record bytes input per decode-session. */
+ u->inp_sess[ u->inp_ctr++ ] = c;
+ /* return byte */
+ return ( uint8_t ) c;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_back() - Move back a single byte in the stream.
+ * -----------------------------------------------------------------------------
+ */
+extern void
+ud_inp_back(struct ud* u)
+{
+ if ( u->inp_ctr > 0 ) {
+ --u->inp_curr;
+ --u->inp_ctr;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_peek() - Peek into the next byte in source.
+ * -----------------------------------------------------------------------------
+ */
+extern uint8_t
+ud_inp_peek(struct ud* u)
+{
+ uint8_t r = ud_inp_next(u);
+ if ( !u->error ) ud_inp_back(u); /* Don't backup if there was an error */
+ return r;
+}
+
+/* -----------------------------------------------------------------------------
+ * ud_inp_move() - Move ahead n input bytes.
+ * -----------------------------------------------------------------------------
+ */
+extern void
+ud_inp_move(struct ud* u, size_t n)
+{
+ while (n--)
+ ud_inp_next(u);
+}
+
+/*------------------------------------------------------------------------------
+ * ud_inp_uintN() - return uintN from source.
+ *------------------------------------------------------------------------------
+ */
+extern uint8_t
+ud_inp_uint8(struct ud* u)
+{
+ return ud_inp_next(u);
+}
+
+extern uint16_t
+ud_inp_uint16(struct ud* u)
+{
+ uint16_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ return ret | (r << 8);
+}
+
+extern uint32_t
+ud_inp_uint32(struct ud* u)
+{
+ uint32_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ ret = ret | (r << 8);
+ r = ud_inp_next(u);
+ ret = ret | (r << 16);
+ r = ud_inp_next(u);
+ return ret | (r << 24);
+}
+
+extern uint64_t
+ud_inp_uint64(struct ud* u)
+{
+ uint64_t r, ret;
+
+ ret = ud_inp_next(u);
+ r = ud_inp_next(u);
+ ret = ret | (r << 8);
+ r = ud_inp_next(u);
+ ret = ret | (r << 16);
+ r = ud_inp_next(u);
+ ret = ret | (r << 24);
+ r = ud_inp_next(u);
+ ret = ret | (r << 32);
+ r = ud_inp_next(u);
+ ret = ret | (r << 40);
+ r = ud_inp_next(u);
+ ret = ret | (r << 48);
+ r = ud_inp_next(u);
+ return ret | (r << 56);
+}
+
+#endif // USE(UDIS86)
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_input.h b/src/3rdparty/masm/disassembler/udis86/udis86_input.h
new file mode 100644
index 0000000000..96865a88b5
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_input.h
@@ -0,0 +1,67 @@
+/* udis86 - libudis86/input.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_INPUT_H
+#define UD_INPUT_H
+
+#include "udis86_types.h"
+
+uint8_t ud_inp_next(struct ud*);
+uint8_t ud_inp_peek(struct ud*);
+uint8_t ud_inp_uint8(struct ud*);
+uint16_t ud_inp_uint16(struct ud*);
+uint32_t ud_inp_uint32(struct ud*);
+uint64_t ud_inp_uint64(struct ud*);
+void ud_inp_move(struct ud*, size_t);
+void ud_inp_back(struct ud*);
+
+/* ud_inp_init() - Initializes the input system. */
+#define ud_inp_init(u) \
+do { \
+ u->inp_curr = 0; \
+ u->inp_fill = 0; \
+ u->inp_ctr = 0; \
+ u->inp_end = 0; \
+} while (0)
+
+/* ud_inp_start() - Should be called before each de-code operation. */
+#define ud_inp_start(u) u->inp_ctr = 0
+
+/* ud_inp_back() - Resets the current pointer to its position before the current
+ * instruction disassembly was started.
+ */
+#define ud_inp_reset(u) \
+do { \
+ u->inp_curr -= u->inp_ctr; \
+ u->inp_ctr = 0; \
+} while (0)
+
+/* ud_inp_sess() - Returns the pointer to current session. */
+#define ud_inp_sess(u) (u->inp_sess)
+
+/* inp_cur() - Returns the current input byte. */
+#define ud_inp_curr(u) ((u)->inp_cache[(u)->inp_curr])
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c b/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c
new file mode 100644
index 0000000000..80dda3a199
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_itab.c"
+
+#endif
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c
new file mode 100644
index 0000000000..0d1c57d482
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c
@@ -0,0 +1,252 @@
+/* udis86 - libudis86/syn-att.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_decode.h"
+#include "udis86_itab.h"
+#include "udis86_syn.h"
+
+/* -----------------------------------------------------------------------------
+ * opr_cast() - Prints an operand cast.
+ * -----------------------------------------------------------------------------
+ */
+static void
+opr_cast(struct ud* u, struct ud_operand* op)
+{
+ switch(op->size) {
+ case 16 : case 32 :
+ mkasm(u, "*"); break;
+ default: break;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * gen_operand() - Generates assembly output for each operand.
+ * -----------------------------------------------------------------------------
+ */
+static void
+gen_operand(struct ud* u, struct ud_operand* op)
+{
+ switch(op->type) {
+ case UD_OP_REG:
+ mkasm(u, "%%%s", ud_reg_tab[op->base - UD_R_AL]);
+ break;
+
+ case UD_OP_MEM:
+ if (u->br_far) opr_cast(u, op);
+ if (u->pfx_seg)
+ mkasm(u, "%%%s:", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+ if (op->offset == 8) {
+ if (op->lval.sbyte < 0)
+ mkasm(u, "-0x%x", (-op->lval.sbyte) & 0xff);
+ else mkasm(u, "0x%x", op->lval.sbyte);
+ }
+ else if (op->offset == 16)
+ mkasm(u, "0x%x", op->lval.uword);
+ else if (op->offset == 32)
+ mkasm(u, "0x%lx", (unsigned long)op->lval.udword);
+ else if (op->offset == 64)
+ mkasm(u, "0x" FMT64 "x", op->lval.uqword);
+
+ if (op->base)
+ mkasm(u, "(%%%s", ud_reg_tab[op->base - UD_R_AL]);
+ if (op->index) {
+ if (op->base)
+ mkasm(u, ",");
+ else mkasm(u, "(");
+ mkasm(u, "%%%s", ud_reg_tab[op->index - UD_R_AL]);
+ }
+ if (op->scale)
+ mkasm(u, ",%d", op->scale);
+ if (op->base || op->index)
+ mkasm(u, ")");
+ break;
+
+ case UD_OP_IMM: {
+ int64_t imm = 0;
+ uint64_t sext_mask = 0xffffffffffffffffull;
+ unsigned sext_size = op->size;
+
+ switch (op->size) {
+ case 8: imm = op->lval.sbyte; break;
+ case 16: imm = op->lval.sword; break;
+ case 32: imm = op->lval.sdword; break;
+ case 64: imm = op->lval.sqword; break;
+ }
+ if ( P_SEXT( u->itab_entry->prefix ) ) {
+ sext_size = u->operand[ 0 ].size;
+ if ( u->mnemonic == UD_Ipush )
+ /* push sign-extends to operand size */
+ sext_size = u->opr_mode;
+ }
+ if ( sext_size < 64 )
+ sext_mask = ( 1ull << sext_size ) - 1;
+ mkasm( u, "$0x" FMT64 "x", imm & sext_mask );
+
+ break;
+ }
+
+ case UD_OP_JIMM:
+ switch (op->size) {
+ case 8:
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sbyte);
+ break;
+ case 16:
+ mkasm(u, "0x" FMT64 "x", (u->pc + op->lval.sword) & 0xffff );
+ break;
+ case 32:
+ if (u->dis_mode == 32)
+ mkasm(u, "0x" FMT64 "x", (u->pc + op->lval.sdword) & 0xffffffff);
+ else
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sdword);
+ break;
+ default:break;
+ }
+ break;
+
+ case UD_OP_PTR:
+ switch (op->size) {
+ case 32:
+ mkasm(u, "$0x%x, $0x%x", op->lval.ptr.seg,
+ op->lval.ptr.off & 0xFFFF);
+ break;
+ case 48:
+ mkasm(u, "$0x%x, $0x%lx", op->lval.ptr.seg,
+ (unsigned long)op->lval.ptr.off);
+ break;
+ }
+ break;
+
+ default: return;
+ }
+}
+
+/* =============================================================================
+ * translates to AT&T syntax
+ * =============================================================================
+ */
+extern void
+ud_translate_att(struct ud *u)
+{
+ int size = 0;
+
+ /* check if P_OSO prefix is used */
+ if (! P_OSO(u->itab_entry->prefix) && u->pfx_opr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "o32 ");
+ break;
+ case 32:
+ case 64:
+ mkasm(u, "o16 ");
+ break;
+ }
+ }
+
+ /* check if P_ASO prefix was used */
+ if (! P_ASO(u->itab_entry->prefix) && u->pfx_adr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "a32 ");
+ break;
+ case 32:
+ mkasm(u, "a16 ");
+ break;
+ case 64:
+ mkasm(u, "a32 ");
+ break;
+ }
+ }
+
+ if (u->pfx_lock)
+ mkasm(u, "lock ");
+ if (u->pfx_rep)
+ mkasm(u, "rep ");
+ if (u->pfx_repne)
+ mkasm(u, "repne ");
+
+ /* special instructions */
+ switch (u->mnemonic) {
+ case UD_Iretf:
+ mkasm(u, "lret ");
+ break;
+ case UD_Idb:
+ mkasm(u, ".byte 0x%x", u->operand[0].lval.ubyte);
+ return;
+ case UD_Ijmp:
+ case UD_Icall:
+ if (u->br_far) mkasm(u, "l");
+ mkasm(u, "%s", ud_lookup_mnemonic(u->mnemonic));
+ break;
+ case UD_Ibound:
+ case UD_Ienter:
+ if (u->operand[0].type != UD_NONE)
+ gen_operand(u, &u->operand[0]);
+ if (u->operand[1].type != UD_NONE) {
+ mkasm(u, ",");
+ gen_operand(u, &u->operand[1]);
+ }
+ return;
+ default:
+ mkasm(u, "%s", ud_lookup_mnemonic(u->mnemonic));
+ }
+
+ if (u->c1)
+ size = u->operand[0].size;
+ else if (u->c2)
+ size = u->operand[1].size;
+ else if (u->c3)
+ size = u->operand[2].size;
+
+ if (size == 8)
+ mkasm(u, "b");
+ else if (size == 16)
+ mkasm(u, "w");
+ else if (size == 64)
+ mkasm(u, "q");
+
+ mkasm(u, " ");
+
+ if (u->operand[2].type != UD_NONE) {
+ gen_operand(u, &u->operand[2]);
+ mkasm(u, ", ");
+ }
+
+ if (u->operand[1].type != UD_NONE) {
+ gen_operand(u, &u->operand[1]);
+ mkasm(u, ", ");
+ }
+
+ if (u->operand[0].type != UD_NONE)
+ gen_operand(u, &u->operand[0]);
+}
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c
new file mode 100644
index 0000000000..38251db889
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c
@@ -0,0 +1,278 @@
+/* udis86 - libudis86/syn-intel.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "config.h"
+
+#if USE(UDIS86)
+
+#include "udis86_types.h"
+#include "udis86_extern.h"
+#include "udis86_decode.h"
+#include "udis86_itab.h"
+#include "udis86_syn.h"
+
+/* -----------------------------------------------------------------------------
+ * opr_cast() - Prints an operand cast.
+ * -----------------------------------------------------------------------------
+ */
+static void
+opr_cast(struct ud* u, struct ud_operand* op)
+{
+ switch(op->size) {
+ case 8: mkasm(u, "byte " ); break;
+ case 16: mkasm(u, "word " ); break;
+ case 32: mkasm(u, "dword "); break;
+ case 64: mkasm(u, "qword "); break;
+ case 80: mkasm(u, "tword "); break;
+ default: break;
+ }
+ if (u->br_far)
+ mkasm(u, "far ");
+}
+
+/* -----------------------------------------------------------------------------
+ * gen_operand() - Generates assembly output for each operand.
+ * -----------------------------------------------------------------------------
+ */
+static void gen_operand(struct ud* u, struct ud_operand* op, int syn_cast)
+{
+ switch(op->type) {
+ case UD_OP_REG:
+ mkasm(u, "%s", ud_reg_tab[op->base - UD_R_AL]);
+ break;
+
+ case UD_OP_MEM: {
+
+ int op_f = 0;
+
+ if (syn_cast)
+ opr_cast(u, op);
+
+ mkasm(u, "[");
+
+ if (u->pfx_seg)
+ mkasm(u, "%s:", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+
+ if (op->base) {
+ mkasm(u, "%s", ud_reg_tab[op->base - UD_R_AL]);
+ op_f = 1;
+ }
+
+ if (op->index) {
+ if (op_f)
+ mkasm(u, "+");
+ mkasm(u, "%s", ud_reg_tab[op->index - UD_R_AL]);
+ op_f = 1;
+ }
+
+ if (op->scale)
+ mkasm(u, "*%d", op->scale);
+
+ if (op->offset == 8) {
+ if (op->lval.sbyte < 0)
+ mkasm(u, "-0x%x", -op->lval.sbyte);
+ else mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.sbyte);
+ }
+ else if (op->offset == 16)
+ mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.uword);
+ else if (op->offset == 32) {
+ if (u->adr_mode == 64) {
+ if (op->lval.sdword < 0)
+ mkasm(u, "-0x%x", -op->lval.sdword);
+ else mkasm(u, "%s0x%x", (op_f) ? "+" : "", op->lval.sdword);
+ }
+ else mkasm(u, "%s0x%lx", (op_f) ? "+" : "", (unsigned long)op->lval.udword);
+ }
+ else if (op->offset == 64)
+ mkasm(u, "%s0x" FMT64 "x", (op_f) ? "+" : "", op->lval.uqword);
+
+ mkasm(u, "]");
+ break;
+ }
+
+ case UD_OP_IMM: {
+ int64_t imm = 0;
+ uint64_t sext_mask = 0xffffffffffffffffull;
+ unsigned sext_size = op->size;
+
+ if (syn_cast)
+ opr_cast(u, op);
+ switch (op->size) {
+ case 8: imm = op->lval.sbyte; break;
+ case 16: imm = op->lval.sword; break;
+ case 32: imm = op->lval.sdword; break;
+ case 64: imm = op->lval.sqword; break;
+ }
+ if ( P_SEXT( u->itab_entry->prefix ) ) {
+ sext_size = u->operand[ 0 ].size;
+ if ( u->mnemonic == UD_Ipush )
+ /* push sign-extends to operand size */
+ sext_size = u->opr_mode;
+ }
+ if ( sext_size < 64 )
+ sext_mask = ( 1ull << sext_size ) - 1;
+ mkasm( u, "0x" FMT64 "x", imm & sext_mask );
+
+ break;
+ }
+
+
+ case UD_OP_JIMM:
+ if (syn_cast) opr_cast(u, op);
+ switch (op->size) {
+ case 8:
+ mkasm(u, "0x" FMT64 "x", u->pc + op->lval.sbyte);
+ break;
+ case 16:
+ mkasm(u, "0x" FMT64 "x", ( u->pc + op->lval.sword ) & 0xffff );
+ break;
+ case 32:
+ mkasm(u, "0x" FMT64 "x", ( u->pc + op->lval.sdword ) & 0xfffffffful );
+ break;
+ default:break;
+ }
+ break;
+
+ case UD_OP_PTR:
+ switch (op->size) {
+ case 32:
+ mkasm(u, "word 0x%x:0x%x", op->lval.ptr.seg,
+ op->lval.ptr.off & 0xFFFF);
+ break;
+ case 48:
+ mkasm(u, "dword 0x%x:0x%lx", op->lval.ptr.seg,
+ (unsigned long)op->lval.ptr.off);
+ break;
+ }
+ break;
+
+ case UD_OP_CONST:
+ if (syn_cast) opr_cast(u, op);
+ mkasm(u, "%d", op->lval.udword);
+ break;
+
+ default: return;
+ }
+}
+
+/* =============================================================================
+ * translates to intel syntax
+ * =============================================================================
+ */
+extern void ud_translate_intel(struct ud* u)
+{
+ /* -- prefixes -- */
+
+ /* check if P_OSO prefix is used */
+ if (! P_OSO(u->itab_entry->prefix) && u->pfx_opr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "o32 ");
+ break;
+ case 32:
+ case 64:
+ mkasm(u, "o16 ");
+ break;
+ }
+ }
+
+ /* check if P_ASO prefix was used */
+ if (! P_ASO(u->itab_entry->prefix) && u->pfx_adr) {
+ switch (u->dis_mode) {
+ case 16:
+ mkasm(u, "a32 ");
+ break;
+ case 32:
+ mkasm(u, "a16 ");
+ break;
+ case 64:
+ mkasm(u, "a32 ");
+ break;
+ }
+ }
+
+ if ( u->pfx_seg &&
+ u->operand[0].type != UD_OP_MEM &&
+ u->operand[1].type != UD_OP_MEM ) {
+ mkasm(u, "%s ", ud_reg_tab[u->pfx_seg - UD_R_AL]);
+ }
+ if (u->pfx_lock)
+ mkasm(u, "lock ");
+ if (u->pfx_rep)
+ mkasm(u, "rep ");
+ if (u->pfx_repne)
+ mkasm(u, "repne ");
+
+ /* print the instruction mnemonic */
+ mkasm(u, "%s ", ud_lookup_mnemonic(u->mnemonic));
+
+ /* operand 1 */
+ if (u->operand[0].type != UD_NONE) {
+ int cast = 0;
+ if ( u->operand[0].type == UD_OP_IMM &&
+ u->operand[1].type == UD_NONE )
+ cast = u->c1;
+ if ( u->operand[0].type == UD_OP_MEM ) {
+ cast = u->c1;
+ if ( u->operand[1].type == UD_OP_IMM ||
+ u->operand[1].type == UD_OP_CONST )
+ cast = 1;
+ if ( u->operand[1].type == UD_NONE )
+ cast = 1;
+ if ( ( u->operand[0].size != u->operand[1].size ) && u->operand[1].size )
+ cast = 1;
+ } else if ( u->operand[ 0 ].type == UD_OP_JIMM ) {
+ if ( u->operand[ 0 ].size > 8 ) cast = 1;
+ }
+ gen_operand(u, &u->operand[0], cast);
+ }
+ /* operand 2 */
+ if (u->operand[1].type != UD_NONE) {
+ int cast = 0;
+ mkasm(u, ", ");
+ if ( u->operand[1].type == UD_OP_MEM ) {
+ cast = u->c1;
+
+ if ( u->operand[0].type != UD_OP_REG )
+ cast = 1;
+ if ( u->operand[0].size != u->operand[1].size && u->operand[1].size )
+ cast = 1;
+ if ( u->operand[0].type == UD_OP_REG &&
+ u->operand[0].base >= UD_R_ES &&
+ u->operand[0].base <= UD_R_GS )
+ cast = 0;
+ }
+ gen_operand(u, &u->operand[1], cast );
+ }
+
+ /* operand 3 */
+ if (u->operand[2].type != UD_NONE) {
+ mkasm(u, ", ");
+ gen_operand(u, &u->operand[2], u->c3);
+ }
+}
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn.c b/src/3rdparty/masm/disassembler/udis86/udis86_syn.c
new file mode 100644
index 0000000000..31a45ea5c5
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn.c
@@ -0,0 +1,86 @@
+/* udis86 - libudis86/syn.c
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(UDIS86)
+
+/* -----------------------------------------------------------------------------
+ * Intel Register Table - Order Matters (types.h)!
+ * -----------------------------------------------------------------------------
+ */
+const char* ud_reg_tab[] =
+{
+ "al", "cl", "dl", "bl",
+ "ah", "ch", "dh", "bh",
+ "spl", "bpl", "sil", "dil",
+ "r8b", "r9b", "r10b", "r11b",
+ "r12b", "r13b", "r14b", "r15b",
+
+ "ax", "cx", "dx", "bx",
+ "sp", "bp", "si", "di",
+ "r8w", "r9w", "r10w", "r11w",
+ "r12w", "r13W" , "r14w", "r15w",
+
+ "eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi",
+ "r8d", "r9d", "r10d", "r11d",
+ "r12d", "r13d", "r14d", "r15d",
+
+ "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+
+ "es", "cs", "ss", "ds",
+ "fs", "gs",
+
+ "cr0", "cr1", "cr2", "cr3",
+ "cr4", "cr5", "cr6", "cr7",
+ "cr8", "cr9", "cr10", "cr11",
+ "cr12", "cr13", "cr14", "cr15",
+
+ "dr0", "dr1", "dr2", "dr3",
+ "dr4", "dr5", "dr6", "dr7",
+ "dr8", "dr9", "dr10", "dr11",
+ "dr12", "dr13", "dr14", "dr15",
+
+ "mm0", "mm1", "mm2", "mm3",
+ "mm4", "mm5", "mm6", "mm7",
+
+ "st0", "st1", "st2", "st3",
+ "st4", "st5", "st6", "st7",
+
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11",
+ "xmm12", "xmm13", "xmm14", "xmm15",
+
+ "rip"
+};
+
+#endif // USE(UDIS86)
+
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_syn.h b/src/3rdparty/masm/disassembler/udis86/udis86_syn.h
new file mode 100644
index 0000000000..e8636163ef
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_syn.h
@@ -0,0 +1,47 @@
+/* udis86 - libudis86/syn.h
+ *
+ * Copyright (c) 2002-2009
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_SYN_H
+#define UD_SYN_H
+
+#include "udis86_types.h"
+#include <wtf/Assertions.h>
+
+#ifndef __UD_STANDALONE__
+# include <stdarg.h>
+#endif /* __UD_STANDALONE__ */
+
+extern const char* ud_reg_tab[];
+
+static void mkasm(struct ud* u, const char* fmt, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+static void mkasm(struct ud* u, const char* fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ u->insn_fill += vsnprintf((char*) u->insn_buffer + u->insn_fill, UD_STRING_BUFFER_SIZE - u->insn_fill, fmt, ap);
+ va_end(ap);
+}
+
+#endif
diff --git a/src/3rdparty/masm/disassembler/udis86/udis86_types.h b/src/3rdparty/masm/disassembler/udis86/udis86_types.h
new file mode 100644
index 0000000000..320d1ca491
--- /dev/null
+++ b/src/3rdparty/masm/disassembler/udis86/udis86_types.h
@@ -0,0 +1,238 @@
+/* udis86 - libudis86/types.h
+ *
+ * Copyright (c) 2002-2009 Vivek Thampi
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef UD_TYPES_H
+#define UD_TYPES_H
+
+#ifndef __UD_STANDALONE__
+# include <stdio.h>
+#endif /* __UD_STANDALONE__ */
+
+/* gcc specific extensions */
+#ifdef __GNUC__
+# define UD_ATTR_PACKED __attribute__((packed))
+#else
+# define UD_ATTR_PACKED
+#endif /* UD_ATTR_PACKED */
+
+#ifdef _MSC_VER
+# define FMT64 "%I64"
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+ typedef unsigned __int64 uint64_t;
+ typedef __int8 int8_t;
+ typedef __int16 int16_t;
+ typedef __int32 int32_t;
+ typedef __int64 int64_t;
+#else
+# define FMT64 "%ll"
+# ifndef __UD_STANDALONE__
+# include <inttypes.h>
+# endif /* __UD_STANDALONE__ */
+#endif
+
+/* -----------------------------------------------------------------------------
+ * All possible "types" of objects in udis86. Order is Important!
+ * -----------------------------------------------------------------------------
+ */
+enum ud_type
+{
+ UD_NONE,
+
+ /* 8 bit GPRs */
+ UD_R_AL, UD_R_CL, UD_R_DL, UD_R_BL,
+ UD_R_AH, UD_R_CH, UD_R_DH, UD_R_BH,
+ UD_R_SPL, UD_R_BPL, UD_R_SIL, UD_R_DIL,
+ UD_R_R8B, UD_R_R9B, UD_R_R10B, UD_R_R11B,
+ UD_R_R12B, UD_R_R13B, UD_R_R14B, UD_R_R15B,
+
+ /* 16 bit GPRs */
+ UD_R_AX, UD_R_CX, UD_R_DX, UD_R_BX,
+ UD_R_SP, UD_R_BP, UD_R_SI, UD_R_DI,
+ UD_R_R8W, UD_R_R9W, UD_R_R10W, UD_R_R11W,
+ UD_R_R12W, UD_R_R13W, UD_R_R14W, UD_R_R15W,
+
+ /* 32 bit GPRs */
+ UD_R_EAX, UD_R_ECX, UD_R_EDX, UD_R_EBX,
+ UD_R_ESP, UD_R_EBP, UD_R_ESI, UD_R_EDI,
+ UD_R_R8D, UD_R_R9D, UD_R_R10D, UD_R_R11D,
+ UD_R_R12D, UD_R_R13D, UD_R_R14D, UD_R_R15D,
+
+ /* 64 bit GPRs */
+ UD_R_RAX, UD_R_RCX, UD_R_RDX, UD_R_RBX,
+ UD_R_RSP, UD_R_RBP, UD_R_RSI, UD_R_RDI,
+ UD_R_R8, UD_R_R9, UD_R_R10, UD_R_R11,
+ UD_R_R12, UD_R_R13, UD_R_R14, UD_R_R15,
+
+ /* segment registers */
+ UD_R_ES, UD_R_CS, UD_R_SS, UD_R_DS,
+ UD_R_FS, UD_R_GS,
+
+ /* control registers*/
+ UD_R_CR0, UD_R_CR1, UD_R_CR2, UD_R_CR3,
+ UD_R_CR4, UD_R_CR5, UD_R_CR6, UD_R_CR7,
+ UD_R_CR8, UD_R_CR9, UD_R_CR10, UD_R_CR11,
+ UD_R_CR12, UD_R_CR13, UD_R_CR14, UD_R_CR15,
+
+ /* debug registers */
+ UD_R_DR0, UD_R_DR1, UD_R_DR2, UD_R_DR3,
+ UD_R_DR4, UD_R_DR5, UD_R_DR6, UD_R_DR7,
+ UD_R_DR8, UD_R_DR9, UD_R_DR10, UD_R_DR11,
+ UD_R_DR12, UD_R_DR13, UD_R_DR14, UD_R_DR15,
+
+ /* mmx registers */
+ UD_R_MM0, UD_R_MM1, UD_R_MM2, UD_R_MM3,
+ UD_R_MM4, UD_R_MM5, UD_R_MM6, UD_R_MM7,
+
+ /* x87 registers */
+ UD_R_ST0, UD_R_ST1, UD_R_ST2, UD_R_ST3,
+ UD_R_ST4, UD_R_ST5, UD_R_ST6, UD_R_ST7,
+
+ /* extended multimedia registers */
+ UD_R_XMM0, UD_R_XMM1, UD_R_XMM2, UD_R_XMM3,
+ UD_R_XMM4, UD_R_XMM5, UD_R_XMM6, UD_R_XMM7,
+ UD_R_XMM8, UD_R_XMM9, UD_R_XMM10, UD_R_XMM11,
+ UD_R_XMM12, UD_R_XMM13, UD_R_XMM14, UD_R_XMM15,
+
+ UD_R_RIP,
+
+ /* Operand Types */
+ UD_OP_REG, UD_OP_MEM, UD_OP_PTR, UD_OP_IMM,
+ UD_OP_JIMM, UD_OP_CONST
+};
+
+#include "udis86_itab.h"
+
+/* -----------------------------------------------------------------------------
+ * struct ud_operand - Disassembled instruction Operand.
+ * -----------------------------------------------------------------------------
+ */
+struct ud_operand
+{
+ enum ud_type type;
+ uint8_t size;
+ union {
+ int8_t sbyte;
+ uint8_t ubyte;
+ int16_t sword;
+ uint16_t uword;
+ int32_t sdword;
+ uint32_t udword;
+ int64_t sqword;
+ uint64_t uqword;
+
+ struct {
+ uint16_t seg;
+ uint32_t off;
+ } ptr;
+ } lval;
+
+ enum ud_type base;
+ enum ud_type index;
+ uint8_t offset;
+ uint8_t scale;
+};
+
+#define UD_STRING_BUFFER_SIZE 64
+
+/* -----------------------------------------------------------------------------
+ * struct ud - The udis86 object.
+ * -----------------------------------------------------------------------------
+ */
+struct ud
+{
+ int (*inp_hook) (struct ud*);
+ uint8_t inp_curr;
+ uint8_t inp_fill;
+#ifndef __UD_STANDALONE__
+ FILE* inp_file;
+#endif
+ uint8_t inp_ctr;
+ uint8_t* inp_buff;
+ uint8_t* inp_buff_end;
+ uint8_t inp_end;
+ void (*translator)(struct ud*);
+ uint64_t insn_offset;
+ char insn_hexcode[32];
+ char insn_buffer[UD_STRING_BUFFER_SIZE];
+ unsigned int insn_fill;
+ uint8_t dis_mode;
+ uint64_t pc;
+ uint8_t vendor;
+ struct map_entry* mapen;
+ enum ud_mnemonic_code mnemonic;
+ struct ud_operand operand[3];
+ uint8_t error;
+ uint8_t pfx_rex;
+ uint8_t pfx_seg;
+ uint8_t pfx_opr;
+ uint8_t pfx_adr;
+ uint8_t pfx_lock;
+ uint8_t pfx_rep;
+ uint8_t pfx_repe;
+ uint8_t pfx_repne;
+ uint8_t pfx_insn;
+ uint8_t default64;
+ uint8_t opr_mode;
+ uint8_t adr_mode;
+ uint8_t br_far;
+ uint8_t br_near;
+ uint8_t implicit_addr;
+ uint8_t c1;
+ uint8_t c2;
+ uint8_t c3;
+ uint8_t inp_cache[256];
+ uint8_t inp_sess[64];
+ uint8_t have_modrm;
+ uint8_t modrm;
+ void * user_opaque_data;
+ struct ud_itab_entry * itab_entry;
+ struct ud_lookup_table_list_entry *le;
+};
+
+/* -----------------------------------------------------------------------------
+ * Type-definitions
+ * -----------------------------------------------------------------------------
+ */
+typedef enum ud_type ud_type_t;
+typedef enum ud_mnemonic_code ud_mnemonic_code_t;
+
+typedef struct ud ud_t;
+typedef struct ud_operand ud_operand_t;
+
+#define UD_SYN_INTEL ud_translate_intel
+#define UD_SYN_ATT ud_translate_att
+#define UD_EOI -1
+#define UD_INP_CACHE_SZ 32
+#define UD_VENDOR_AMD 0
+#define UD_VENDOR_INTEL 1
+#define UD_VENDOR_ANY 2
+
+#define bail_out(ud,error_code) longjmp( (ud)->bailout, error_code )
+#define try_decode(ud) if ( setjmp( (ud)->bailout ) == 0 )
+#define catch_error() else
+
+#endif
diff --git a/src/3rdparty/masm/jit/JITCompilationEffort.h b/src/3rdparty/masm/jit/JITCompilationEffort.h
new file mode 100644
index 0000000000..5eb6801789
--- /dev/null
+++ b/src/3rdparty/masm/jit/JITCompilationEffort.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCompilationEffort_h
+#define JITCompilationEffort_h
+
+namespace JSC {
+
+enum JITCompilationEffort {
+ JITCompilationCanFail,
+ JITCompilationMustSucceed
+};
+
+} // namespace JSC
+
+#endif // JITCompilationEffort_h
+
diff --git a/src/3rdparty/masm/masm-defs.pri b/src/3rdparty/masm/masm-defs.pri
new file mode 100644
index 0000000000..f8055d0ff4
--- /dev/null
+++ b/src/3rdparty/masm/masm-defs.pri
@@ -0,0 +1,28 @@
+
+DEFINES += WTF_EXPORT_PRIVATE="" JS_EXPORT_PRIVATE=""
+
+win*: DEFINES += NOMINMAX
+
+DEFINES += ENABLE_LLINT=0
+DEFINES += ENABLE_DFG_JIT=0
+DEFINES += ENABLE_JIT=1
+DEFINES += ENABLE_JIT_CONSTANT_BLINDING=0
+DEFINES += ENABLE_ASSEMBLER=1
+DEFINES += ENABLE_YARR_JIT=0
+DEFINES += BUILDING_QT__
+
+INCLUDEPATH += $$PWD/jit
+INCLUDEPATH += $$PWD/assembler
+INCLUDEPATH += $$PWD/runtime
+INCLUDEPATH += $$PWD/wtf
+INCLUDEPATH += $$PWD/stubs
+INCLUDEPATH += $$PWD/stubs/wtf
+INCLUDEPATH += $$PWD
+
+if(isEqual(QT_ARCH, "i386")|isEqual(QT_ARCH, "x86_64")):!win*: DEFINES += WTF_USE_UDIS86=1
+else: DEFINES += WTF_USE_UDIS86=0
+
+INCLUDEPATH += $$PWD/disassembler
+INCLUDEPATH += $$PWD/disassembler/udis86
+INCLUDEPATH += $$_OUT_PWD
+
diff --git a/src/3rdparty/masm/masm.pri b/src/3rdparty/masm/masm.pri
new file mode 100644
index 0000000000..683a5b19d3
--- /dev/null
+++ b/src/3rdparty/masm/masm.pri
@@ -0,0 +1,86 @@
+HEADERS += $$PWD/assembler/*.h
+SOURCES += $$PWD/assembler/ARMAssembler.cpp
+SOURCES += $$PWD/assembler/ARMv7Assembler.cpp
+SOURCES += $$PWD/assembler/MacroAssemblerARM.cpp
+SOURCES += $$PWD/assembler/MacroAssemblerSH4.cpp
+SOURCES += $$PWD/assembler/LinkBuffer.cpp
+
+HEADERS += $$PWD/wtf/*.h
+SOURCES += $$PWD/wtf/PrintStream.cpp
+HEADERS += $$PWD/wtf/PrintStream.h
+
+SOURCES += $$PWD/wtf/FilePrintStream.cpp
+HEADERS += $$PWD/wtf/FilePrintStream.h
+
+HEADERS += $$PWD/wtf/RawPointer.h
+
+win32: SOURCES += $$PWD/wtf/OSAllocatorWin.cpp
+else: SOURCES += $$PWD/wtf/OSAllocatorPosix.cpp
+HEADERS += $$PWD/wtf/OSAllocator.h
+
+SOURCES += $$PWD/wtf/PageAllocationAligned.cpp
+HEADERS += $$PWD/wtf/PageAllocationAligned.h
+HEADERS += $$PWD/wtf/PageAllocation.h
+
+SOURCES += $$PWD/wtf/PageBlock.cpp
+HEADERS += $$PWD/wtf/PageBlock.h
+
+HEADERS += $$PWD/wtf/PageReservation.h
+
+SOURCES += $$PWD/stubs/WTFStubs.cpp
+HEADERS += $$PWD/stubs/WTFStubs.h
+
+SOURCES += $$PWD/disassembler/Disassembler.cpp
+SOURCES += $$PWD/disassembler/UDis86Disassembler.cpp
+contains(DEFINES, WTF_USE_UDIS86=1) {
+ SOURCES += $$PWD/disassembler/udis86/udis86.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_decode.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_input.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_itab_holder.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn-att.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn.c
+ SOURCES += $$PWD/disassembler/udis86/udis86_syn-intel.c
+
+ ITAB = $$PWD/disassembler/udis86/optable.xml
+ udis86.output = udis86_itab.h
+ udis86.input = ITAB
+ udis86.CONFIG += no_link
+ udis86.commands = python $$PWD/disassembler/udis86/itab.py ${QMAKE_FILE_IN}
+ QMAKE_EXTRA_COMPILERS += udis86
+
+ udis86_tab_cfile.target = $$OUT_PWD/udis86_itab.c
+ udis86_tab_cfile.depends = udis86_itab.h
+ QMAKE_EXTRA_TARGETS += udis86_tab_cfile
+}
+
+SOURCES += \
+ $$PWD/yarr/YarrCanonicalizeUCS2.cpp \
+ $$PWD/yarr/YarrInterpreter.cpp \
+ $$PWD/yarr/YarrPattern.cpp \
+ $$PWD/yarr/YarrSyntaxChecker.cpp
+
+HEADERS += $$PWD/yarr/*.h
+
+retgen.output = RegExpJitTables.h
+retgen.script = $$PWD/create_regex_tables
+retgen.input = retgen.script
+retgen.CONFIG += no_link
+retgen.commands = python $$retgen.script > ${QMAKE_FILE_OUT}
+QMAKE_EXTRA_COMPILERS += retgen
+
+# Taken from WebKit/Tools/qmake/mkspecs/features/unix/default_post.prf
+linux-g++* {
+ greaterThan(QT_GCC_MAJOR_VERSION, 3):greaterThan(QT_GCC_MINOR_VERSION, 5) {
+ !contains(QMAKE_CXXFLAGS, -std=(c|gnu)\\+\\+(0x|11)) {
+ # We need to deactivate those warnings because some names conflicts with upcoming c++0x types (e.g.nullptr).
+ QMAKE_CXXFLAGS_WARN_ON += -Wno-c++0x-compat
+ QMAKE_CXXFLAGS += -Wno-c++0x-compat
+ }
+ }
+}
+
+# Don't warn about OVERRIDE and FINAL, since they are feature-checked anyways
+*clang:!contains(QMAKE_CXXFLAGS, -std=c++11) {
+ QMAKE_CXXFLAGS += -Wno-c++11-extensions
+ QMAKE_OBJECTIVE_CFLAGS += -Wno-c++11-extensions
+}
diff --git a/src/3rdparty/masm/runtime/MatchResult.h b/src/3rdparty/masm/runtime/MatchResult.h
new file mode 100644
index 0000000000..d87c8516b0
--- /dev/null
+++ b/src/3rdparty/masm/runtime/MatchResult.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MatchResult_h
+#define MatchResult_h
+
+typedef uint64_t EncodedMatchResult;
+
+struct MatchResult {
+ ALWAYS_INLINE MatchResult(size_t start, size_t end)
+ : start(start)
+ , end(end)
+ {
+ }
+
+ explicit ALWAYS_INLINE MatchResult(EncodedMatchResult encoded)
+ {
+ union u {
+ uint64_t encoded;
+ struct s {
+ size_t start;
+ size_t end;
+ } split;
+ } value;
+ value.encoded = encoded;
+ start = value.split.start;
+ end = value.split.end;
+ }
+
+ ALWAYS_INLINE static MatchResult failed()
+ {
+ return MatchResult(WTF::notFound, 0);
+ }
+
+ ALWAYS_INLINE operator bool()
+ {
+ return start != WTF::notFound;
+ }
+
+ ALWAYS_INLINE bool empty()
+ {
+ return start == end;
+ }
+
+ size_t start;
+ size_t end;
+};
+
+#endif
diff --git a/src/3rdparty/masm/stubs/ExecutableAllocator.h b/src/3rdparty/masm/stubs/ExecutableAllocator.h
new file mode 100644
index 0000000000..f4292c791d
--- /dev/null
+++ b/src/3rdparty/masm/stubs/ExecutableAllocator.h
@@ -0,0 +1,120 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef MASM_EXECUTABLEALLOCATOR_H
+#define MASM_EXECUTABLEALLOCATOR_H
+
+#include <RefPtr.h>
+#include <RefCounted.h>
+#include <wtf/PageBlock.h>
+
+#include <qv4executableallocator.h>
+
+#if OS(WINDOWS)
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+namespace JSC {
+
+class JSGlobalData;
+
+struct ExecutableMemoryHandle : public RefCounted<ExecutableMemoryHandle> {
+ ExecutableMemoryHandle(QQmlJS::VM::ExecutableAllocator *allocator, int size)
+ : m_allocator(allocator)
+ , m_size(size)
+ {
+ m_allocation = allocator->allocate(size);
+ }
+ ~ExecutableMemoryHandle()
+ {
+ m_allocator->free(m_allocation);
+ }
+
+ inline void shrink(size_t) {
+ // ### TODO.
+ }
+
+ inline bool isManaged() const { return true; }
+
+ void* start() { return m_allocation->start(); }
+ int sizeInBytes() { return m_size; }
+
+ QQmlJS::VM::ExecutableAllocator *m_allocator;
+ QQmlJS::VM::ExecutableAllocator::Allocation *m_allocation;
+ int m_size;
+};
+
+struct ExecutableAllocator {
+ ExecutableAllocator(QQmlJS::VM::ExecutableAllocator *alloc)
+ : realAllocator(alloc)
+ {}
+
+ PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, int size, void*, int)
+ {
+ return adoptRef(new ExecutableMemoryHandle(realAllocator, size));
+ }
+
+ static void makeWritable(void*, int)
+ {
+ }
+
+ static void makeExecutable(void* addr, int size)
+ {
+ size_t pageSize = WTF::pageSize();
+ size_t iaddr = reinterpret_cast<size_t>(addr);
+ size_t roundAddr = iaddr & ~(pageSize - static_cast<size_t>(1));
+#if OS(WINDOWS)
+ DWORD oldProtect;
+ VirtualProtect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), PAGE_EXECUTE_READWRITE, &oldProtect);
+#else
+ int mode = PROT_READ | PROT_WRITE | PROT_EXEC;
+ mprotect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), mode);
+#endif
+ }
+
+ QQmlJS::VM::ExecutableAllocator *realAllocator;
+};
+
+}
+
+#endif // MASM_EXECUTABLEALLOCATOR_H
diff --git a/src/3rdparty/masm/stubs/JSGlobalData.h b/src/3rdparty/masm/stubs/JSGlobalData.h
new file mode 100644
index 0000000000..281a64de83
--- /dev/null
+++ b/src/3rdparty/masm/stubs/JSGlobalData.h
@@ -0,0 +1,65 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef MASM_JSGLOBALDATA_H
+#define MASM_JSGLOBALDATA_H
+
+#include "ExecutableAllocator.h"
+#include "WeakRandom.h"
+
+namespace QQmlJS {
+namespace VM {
+class ExecutableAllocator;
+}
+}
+
+namespace JSC {
+
+class JSGlobalData {
+public:
+ JSGlobalData(QQmlJS::VM::ExecutableAllocator *realAllocator)
+ : executableAllocator(realAllocator)
+ {}
+ ExecutableAllocator executableAllocator;
+};
+
+}
+
+#endif // MASM_JSGLOBALDATA_H
diff --git a/src/3rdparty/masm/stubs/LLIntData.h b/src/3rdparty/masm/stubs/LLIntData.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/src/3rdparty/masm/stubs/LLIntData.h
diff --git a/src/3rdparty/masm/stubs/Options.h b/src/3rdparty/masm/stubs/Options.h
new file mode 100644
index 0000000000..b95e4354e2
--- /dev/null
+++ b/src/3rdparty/masm/stubs/Options.h
@@ -0,0 +1,53 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef OPTIONS_H
+#define OPTIONS_H
+
+namespace JSC {
+
+struct Options {
+ static bool showDisassembly() { return true; }
+ static bool showDFGDisassembly() { return true; }
+};
+
+}
+
+#endif // MASM_STUBS/OPTIONS_H
diff --git a/src/3rdparty/masm/stubs/WTFStubs.cpp b/src/3rdparty/masm/stubs/WTFStubs.cpp
new file mode 100644
index 0000000000..530804fe3e
--- /dev/null
+++ b/src/3rdparty/masm/stubs/WTFStubs.cpp
@@ -0,0 +1,131 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <config.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <qdebug.h>
+#include <FilePrintStream.h>
+
+namespace WTF {
+
+void* fastMalloc(size_t size)
+{
+ return malloc(size);
+}
+
+void* fastRealloc(void* ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+void fastFree(void* ptr)
+{
+ free(ptr);
+}
+
+uint32_t cryptographicallyRandomNumber()
+{
+ return 0;
+}
+
+static FilePrintStream* s_dataFile;
+
+void setDataFile(FILE* f)
+{
+ delete s_dataFile;
+ s_dataFile = new FilePrintStream(f, FilePrintStream::Borrow);
+}
+
+FilePrintStream& dataFile()
+{
+ if (!s_dataFile)
+ s_dataFile = new FilePrintStream(stderr, FilePrintStream::Borrow);
+ return *s_dataFile;
+}
+
+void dataLogFV(const char* format, va_list args)
+{
+ char buffer[1024];
+ vsnprintf(buffer, sizeof(buffer), format, args);
+ qDebug("%s", buffer);
+}
+
+void dataLogF(const char* format, ...)
+{
+ char buffer[1024];
+ va_list args;
+ va_start(args, format);
+ vsnprintf(buffer, sizeof(buffer), format, args);
+ va_end(args);
+ qDebug("%s", buffer);
+}
+
+void dataLogFString(const char* str)
+{
+ qDebug("%s", str);
+}
+
+}
+
+extern "C" {
+
+void WTFReportAssertionFailure(const char* /*file*/, int /*line*/, const char* /*function*/, const char* /*assertion*/)
+{
+}
+
+void WTFReportBacktrace()
+{
+}
+
+void WTFInvokeCrashHook()
+{
+}
+
+}
+
+
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
+#include <MacroAssemblerX86Common.h>
+
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = JSC::MacroAssemblerX86Common::NotCheckedSSE2;
+#endif
+
diff --git a/src/3rdparty/masm/stubs/WTFStubs.h b/src/3rdparty/masm/stubs/WTFStubs.h
new file mode 100644
index 0000000000..ec77d25da7
--- /dev/null
+++ b/src/3rdparty/masm/stubs/WTFStubs.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef WTFSTUBS_H
+#define WTFSTUBS_H
+
+namespace WTF {
+
+void setDataFile(FILE* f);
+
+}
+
+#endif // WTFSTUBS_H
diff --git a/src/3rdparty/masm/stubs/wtf/FastAllocBase.h b/src/3rdparty/masm/stubs/wtf/FastAllocBase.h
new file mode 100644
index 0000000000..a062a885af
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/FastAllocBase.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef FASTALLOCBASE_H
+#define FASTALLOCBASE_H
+
+/* Dummy empty header file, only needed for #include source compatibility */
+
+#define WTF_MAKE_FAST_ALLOCATED
+
+#endif // FASTALLOCBASE_H
diff --git a/src/3rdparty/masm/stubs/wtf/FastMalloc.h b/src/3rdparty/masm/stubs/wtf/FastMalloc.h
new file mode 100644
index 0000000000..1248c79dec
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/FastMalloc.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef FASTMALLOC_H
+#define FASTMALLOC_H
+
+/* Dummy empty header file, only needed for #include source compatibility */
+
+#endif // FASTMALLOC_H
diff --git a/src/3rdparty/masm/stubs/wtf/Noncopyable.h b/src/3rdparty/masm/stubs/wtf/Noncopyable.h
new file mode 100644
index 0000000000..d3d1eed6d1
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/Noncopyable.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef NONCOPYABLE_H
+#define NONCOPYABLE_H
+
+#include <qglobal.h>
+
+#define WTF_MAKE_NONCOPYABLE(x) Q_DISABLE_COPY(x)
+
+#endif // NONCOPYABLE_H
diff --git a/src/3rdparty/masm/stubs/wtf/OwnPtr.h b/src/3rdparty/masm/stubs/wtf/OwnPtr.h
new file mode 100644
index 0000000000..31d2f1efa3
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/OwnPtr.h
@@ -0,0 +1,46 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef OWNPTR_H
+#define OWNPTR_H
+
+#include "PassOwnPtr.h"
+
+#endif // OWNPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h b/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h
new file mode 100644
index 0000000000..601d278c16
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/PassOwnPtr.h
@@ -0,0 +1,120 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef PASSOWNPTR_H
+#define PASSOWNPTR_H
+
+#include <qscopedpointer.h>
+
+template <typename T> class PassOwnPtr;
+template <typename PtrType> PassOwnPtr<PtrType> adoptPtr(PtrType*);
+
+template <typename T>
+struct OwnPtr : public QScopedPointer<T>
+{
+ OwnPtr() {}
+ OwnPtr(const PassOwnPtr<T> &ptr)
+ : QScopedPointer<T>(ptr.leakRef())
+ {}
+
+ OwnPtr(const OwnPtr<T>& other)
+ : QScopedPointer<T>(const_cast<OwnPtr<T> &>(other).take())
+ {}
+
+ OwnPtr& operator=(const OwnPtr<T>& other)
+ {
+ this->reset(const_cast<OwnPtr<T> &>(other).take());
+ return *this;
+ }
+
+ T* get() const { return this->data(); }
+
+ PassOwnPtr<T> release()
+ {
+ return adoptPtr(this->take());
+ }
+};
+
+template <typename T>
+class PassOwnPtr {
+public:
+ PassOwnPtr() {}
+
+ PassOwnPtr(T* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ PassOwnPtr(const PassOwnPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ PassOwnPtr(const OwnPtr<T>& other)
+ : m_ptr(other.take())
+ {
+ }
+
+ ~PassOwnPtr()
+ {
+ }
+
+ T* operator->() const { return m_ptr.data(); }
+
+ T* leakRef() const { return m_ptr.take(); }
+
+private:
+ template <typename PtrType> friend PassOwnPtr<PtrType> adoptPtr(PtrType*);
+
+ PassOwnPtr<T>& operator=(const PassOwnPtr<T>&)
+ {}
+ mutable QScopedPointer<T> m_ptr;
+};
+
+template <typename T>
+PassOwnPtr<T> adoptPtr(T* ptr)
+{
+ PassOwnPtr<T> result;
+ result.m_ptr.reset(ptr);
+ return result;
+}
+
+
+#endif // PASSOWNPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/PassRefPtr.h b/src/3rdparty/masm/stubs/wtf/PassRefPtr.h
new file mode 100644
index 0000000000..d97be1c330
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/PassRefPtr.h
@@ -0,0 +1,101 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef PASSREFPTR_H
+#define PASSREFPTR_H
+
+template <typename T> class RefPtr;
+
+template <typename T>
+class PassRefPtr {
+public:
+ PassRefPtr() : m_ptr(0) {}
+
+ PassRefPtr(T* ptr)
+ : m_ptr(ptr)
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ PassRefPtr(const PassRefPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ PassRefPtr(const RefPtr<T>& other)
+ : m_ptr(other.get())
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ ~PassRefPtr()
+ {
+ if (m_ptr)
+ m_ptr->deref();
+ }
+
+ T* operator->() const { return m_ptr; }
+
+ T* leakRef() const
+ {
+ T* result = m_ptr;
+ m_ptr = 0;
+ return result;
+ }
+
+private:
+ PassRefPtr<T>& operator=(const PassRefPtr<T>&)
+ {}
+
+ template <typename PtrType> friend PassRefPtr<PtrType> adoptRef(PtrType*);
+ mutable T* m_ptr;
+};
+
+template <typename T>
+PassRefPtr<T> adoptRef(T* ptr)
+{
+ PassRefPtr<T> result;
+ result.m_ptr = ptr;
+ return result;
+}
+
+#endif // PASSREFPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/RefCounted.h b/src/3rdparty/masm/stubs/wtf/RefCounted.h
new file mode 100644
index 0000000000..4fc9ad9074
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/RefCounted.h
@@ -0,0 +1,70 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef REFCOUNTED_H
+#define REFCOUNTED_H
+
+#include "PassRefPtr.h"
+
+template <typename Base>
+class RefCounted {
+public:
+ RefCounted() : m_refCount(1) {}
+ ~RefCounted()
+ {
+ deref();
+ }
+
+ void ref()
+ {
+ ++m_refCount;
+ }
+
+ void deref()
+ {
+ if (!--m_refCount)
+ delete static_cast<Base*>(this);
+ }
+
+protected:
+ int m_refCount;
+};
+
+#endif // REFCOUNTED_H
diff --git a/src/3rdparty/masm/stubs/wtf/RefPtr.h b/src/3rdparty/masm/stubs/wtf/RefPtr.h
new file mode 100644
index 0000000000..929b493b4b
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/RefPtr.h
@@ -0,0 +1,93 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef REFPTR_H
+#define REFPTR_H
+
+#include "PassRefPtr.h"
+
+template <typename T>
+class RefPtr {
+public:
+ RefPtr() : m_ptr(0) {}
+ RefPtr(const RefPtr<T> &other)
+ : m_ptr(other.m_ptr)
+ {
+ if (m_ptr)
+ m_ptr->ref();
+ }
+
+ RefPtr<T>& operator=(const RefPtr<T>& other)
+ {
+ if (other.m_ptr)
+ other.m_ptr->ref();
+ if (m_ptr)
+ m_ptr->deref();
+ m_ptr = other.m_ptr;
+ return *this;
+ }
+
+ RefPtr(const PassRefPtr<T>& other)
+ : m_ptr(other.leakRef())
+ {
+ }
+
+ ~RefPtr()
+ {
+ if (m_ptr)
+ m_ptr->deref();
+ }
+
+ T* operator->() const { return m_ptr; }
+ T* get() const { return m_ptr; }
+ bool operator!() const { return !m_ptr; }
+
+ PassRefPtr<T> release()
+ {
+ T* ptr = m_ptr;
+ m_ptr = 0;
+ return adoptRef(ptr);
+ }
+
+private:
+ T* m_ptr;
+};
+
+#endif // REFPTR_H
diff --git a/src/3rdparty/masm/stubs/wtf/TypeTraits.h b/src/3rdparty/masm/stubs/wtf/TypeTraits.h
new file mode 100644
index 0000000000..9b626a7a53
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/TypeTraits.h
@@ -0,0 +1,58 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef TYPETRAITS_H
+#define TYPETRAITS_H
+
+namespace WTF {
+
+template <typename A, typename B>
+struct IsSameType {
+ static const bool value = false;
+};
+
+template <typename A>
+struct IsSameType<A, A> {
+ static const bool value = true;
+};
+
+}
+
+#endif // TYPETRAITS_H
diff --git a/src/3rdparty/masm/stubs/wtf/UnusedParam.h b/src/3rdparty/masm/stubs/wtf/UnusedParam.h
new file mode 100644
index 0000000000..a676bdf303
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/UnusedParam.h
@@ -0,0 +1,48 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef UNUSEDPARAM_H
+#define UNUSEDPARAM_H
+
+#include <qglobal.h>
+
+#define UNUSED_PARAM(x) Q_UNUSED(x)
+
+#endif // UNUSEDPARAM_H
diff --git a/src/3rdparty/masm/stubs/wtf/Vector.h b/src/3rdparty/masm/stubs/wtf/Vector.h
new file mode 100644
index 0000000000..39742d8ab0
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/Vector.h
@@ -0,0 +1,104 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef VECTOR_H
+#define VECTOR_H
+
+#include <vector>
+#include <wtf/Assertions.h>
+#include <wtf/NotFound.h>
+#include <qalgorithms.h>
+
+enum WTF_UnusedOverflowMode {
+ UnsafeVectorOverflow
+};
+
+namespace WTF {
+
+template <typename T, int capacity = 1, int overflowMode = UnsafeVectorOverflow>
+class Vector : public std::vector<T> {
+public:
+ Vector() {}
+ Vector(int initialSize) : std::vector<T>(initialSize) {}
+
+ inline void append(const T& value)
+ { this->push_back(value); }
+
+ inline void append(const Vector<T>& vector)
+ {
+ this->insert(this->end(), vector.begin(), vector.end());
+ }
+
+ using std::vector<T>::insert;
+
+ inline void reserveInitialCapacity(size_t size) { this->reserve(size); }
+
+ inline void insert(size_t position, T value)
+ { this->insert(this->begin() + position, value); }
+
+ inline void grow(size_t size)
+ { this->resize(size); }
+
+ inline void shrink(size_t size)
+ { this->erase(this->begin() + size, this->end()); }
+
+ inline void shrinkToFit()
+ { this->shrink_to_fit(); }
+
+ inline void remove(size_t position)
+ { this->erase(this->begin() + position); }
+
+ inline bool isEmpty() const { return this->empty(); }
+
+ inline T &last() { return *(this->begin() + this->size() - 1); }
+};
+
+template <typename T, int capacity>
+void deleteAllValues(const Vector<T, capacity> &vector)
+{
+ qDeleteAll(vector);
+}
+
+}
+
+using WTF::Vector;
+using WTF::deleteAllValues;
+
+#endif // VECTOR_H
diff --git a/src/3rdparty/masm/stubs/wtf/text/CString.h b/src/3rdparty/masm/stubs/wtf/text/CString.h
new file mode 100644
index 0000000000..c9a65e5c0b
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/text/CString.h
@@ -0,0 +1,44 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef CSTRING_H
+#define CSTRING_H
+
+#endif // CSTRING_H
diff --git a/src/3rdparty/masm/stubs/wtf/text/WTFString.h b/src/3rdparty/masm/stubs/wtf/text/WTFString.h
new file mode 100644
index 0000000000..d157dc7adc
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/text/WTFString.h
@@ -0,0 +1,75 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef WTFSTRING_H
+#define WTFSTRING_H
+
+#include <QString>
+#include <wtf/ASCIICType.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace WTF {
+
+class String : public QString
+{
+public:
+ String(const QString& s) : QString(s) {}
+ bool is8Bit() const { return false; }
+ const unsigned char *characters8() const { return 0; }
+ const UChar *characters16() const { return reinterpret_cast<const UChar*>(constData()); }
+
+ template <typename T>
+ const T* getCharacters() const;
+
+};
+
+template <>
+inline const unsigned char* String::getCharacters<unsigned char>() const { return characters8(); }
+template <>
+inline const UChar* String::getCharacters<UChar>() const { return characters16(); }
+
+}
+
+// Don't import WTF::String into the global namespace to avoid conflicts with QQmlJS::VM::String
+namespace JSC {
+ using WTF::String;
+}
+
+#endif // WTFSTRING_H
diff --git a/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h b/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h
new file mode 100644
index 0000000000..9e7427e8ac
--- /dev/null
+++ b/src/3rdparty/masm/stubs/wtf/unicode/Unicode.h
@@ -0,0 +1,59 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef UNICODE_H
+#define UNICODE_H
+
+#include <QChar>
+
+typedef unsigned char LChar;
+typedef unsigned short UChar;
+
+namespace Unicode {
+ inline UChar toLower(UChar ch) {
+ return QChar::toLower(ch);
+ }
+
+ inline UChar toUpper(UChar ch) {
+ return QChar::toUpper(ch);
+ }
+}
+
+#endif // UNICODE_H
diff --git a/src/3rdparty/masm/wtf/ASCIICType.h b/src/3rdparty/masm/wtf/ASCIICType.h
new file mode 100644
index 0000000000..18e108e1bf
--- /dev/null
+++ b/src/3rdparty/masm/wtf/ASCIICType.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_ASCIICType_h
+#define WTF_ASCIICType_h
+
+#include <wtf/Assertions.h>
+
+// The behavior of many of the functions in the <ctype.h> header is dependent
+// on the current locale. But in the WebKit project, all uses of those functions
+// are in code processing something that's not locale-specific. These equivalents
+// for some of the <ctype.h> functions are named more explicitly, not dependent
+// on the C library locale, and we should also optimize them as needed.
+
+// All functions return false or leave the character unchanged if passed a character
+// that is outside the range 0-7F. So they can be used on Unicode strings or
+// characters if the intent is to do processing only if the character is ASCII.
+
+namespace WTF {
+
+template<typename CharType> inline bool isASCII(CharType c)
+{
+ return !(c & ~0x7F);
+}
+
+template<typename CharType> inline bool isASCIIAlpha(CharType c)
+{
+ return (c | 0x20) >= 'a' && (c | 0x20) <= 'z';
+}
+
+template<typename CharType> inline bool isASCIIDigit(CharType c)
+{
+ return c >= '0' && c <= '9';
+}
+
+template<typename CharType> inline bool isASCIIAlphanumeric(CharType c)
+{
+ return isASCIIDigit(c) || isASCIIAlpha(c);
+}
+
+template<typename CharType> inline bool isASCIIHexDigit(CharType c)
+{
+ return isASCIIDigit(c) || ((c | 0x20) >= 'a' && (c | 0x20) <= 'f');
+}
+
+template<typename CharType> inline bool isASCIILower(CharType c)
+{
+ return c >= 'a' && c <= 'z';
+}
+
+template<typename CharType> inline bool isASCIIOctalDigit(CharType c)
+{
+ return (c >= '0') & (c <= '7');
+}
+
+template<typename CharType> inline bool isASCIIPrintable(CharType c)
+{
+ return c >= ' ' && c <= '~';
+}
+
+/*
+ Statistics from a run of Apple's page load test for callers of isASCIISpace:
+
+ character count
+ --------- -----
+ non-spaces 689383
+ 20 space 294720
+ 0A \n 89059
+ 09 \t 28320
+ 0D \r 0
+ 0C \f 0
+ 0B \v 0
+ */
+template<typename CharType> inline bool isASCIISpace(CharType c)
+{
+ return c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+}
+
+template<typename CharType> inline bool isASCIIUpper(CharType c)
+{
+ return c >= 'A' && c <= 'Z';
+}
+
+template<typename CharType> inline CharType toASCIILower(CharType c)
+{
+ return c | ((c >= 'A' && c <= 'Z') << 5);
+}
+
+template<typename CharType> inline CharType toASCIILowerUnchecked(CharType character)
+{
+ // This function can be used for comparing any input character
+ // to a lowercase English character. The isASCIIAlphaCaselessEqual
+ // below should be used for regular comparison of ASCII alpha
+ // characters, but switch statements in CSS tokenizer require
+ // direct use of this function.
+ return character | 0x20;
+}
+
+template<typename CharType> inline CharType toASCIIUpper(CharType c)
+{
+ return c & ~((c >= 'a' && c <= 'z') << 5);
+}
+
+template<typename CharType> inline int toASCIIHexValue(CharType c)
+{
+ ASSERT(isASCIIHexDigit(c));
+ return c < 'A' ? c - '0' : (c - 'A' + 10) & 0xF;
+}
+
+template<typename CharType> inline int toASCIIHexValue(CharType upperValue, CharType lowerValue)
+{
+ ASSERT(isASCIIHexDigit(upperValue) && isASCIIHexDigit(lowerValue));
+ return ((toASCIIHexValue(upperValue) << 4) & 0xF0) | toASCIIHexValue(lowerValue);
+}
+
+inline char lowerNibbleToASCIIHexDigit(char c)
+{
+ char nibble = c & 0xF;
+ return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+}
+
+inline char upperNibbleToASCIIHexDigit(char c)
+{
+ char nibble = (c >> 4) & 0xF;
+ return nibble < 10 ? '0' + nibble : 'A' + nibble - 10;
+}
+
+template<typename CharType> inline bool isASCIIAlphaCaselessEqual(CharType cssCharacter, char character)
+{
+ // This function compares a (preferrably) constant ASCII
+ // lowercase letter to any input character.
+ ASSERT(character >= 'a' && character <= 'z');
+ return LIKELY(toASCIILowerUnchecked(cssCharacter) == character);
+}
+
+}
+
+using WTF::isASCII;
+using WTF::isASCIIAlpha;
+using WTF::isASCIIAlphanumeric;
+using WTF::isASCIIDigit;
+using WTF::isASCIIHexDigit;
+using WTF::isASCIILower;
+using WTF::isASCIIOctalDigit;
+using WTF::isASCIIPrintable;
+using WTF::isASCIISpace;
+using WTF::isASCIIUpper;
+using WTF::toASCIIHexValue;
+using WTF::toASCIILower;
+using WTF::toASCIILowerUnchecked;
+using WTF::toASCIIUpper;
+using WTF::lowerNibbleToASCIIHexDigit;
+using WTF::upperNibbleToASCIIHexDigit;
+using WTF::isASCIIAlphaCaselessEqual;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/Assertions.h b/src/3rdparty/masm/wtf/Assertions.h
new file mode 100644
index 0000000000..6263e50ed9
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Assertions.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (C) 2003, 2006, 2007 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Assertions_h
+#define WTF_Assertions_h
+
+/*
+ no namespaces because this file has to be includable from C and Objective-C
+
+ Note, this file uses many GCC extensions, but it should be compatible with
+ C, Objective C, C++, and Objective C++.
+
+ For non-debug builds, everything is disabled by default.
+ Defining any of the symbols explicitly prevents this from having any effect.
+
+ MSVC7 note: variadic macro support was added in MSVC8, so for now we disable
+ those macros in MSVC7. For more info, see the MSDN document on variadic
+ macros here:
+
+ http://msdn2.microsoft.com/en-us/library/ms177415(VS.80).aspx
+*/
+
+#include <wtf/Platform.h>
+
+#include <stddef.h>
+
+#if !COMPILER(MSVC)
+#include <inttypes.h>
+#endif
+
+#ifdef NDEBUG
+/* Disable ASSERT* macros in release mode. */
+#define ASSERTIONS_DISABLED_DEFAULT 1
+#else
+#define ASSERTIONS_DISABLED_DEFAULT 0
+#endif
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define HAVE_VARIADIC_MACRO 0
+#else
+#define HAVE_VARIADIC_MACRO 1
+#endif
+
+#ifndef BACKTRACE_DISABLED
+#define BACKTRACE_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ASSERT_DISABLED
+#define ASSERT_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef ASSERT_MSG_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define ASSERT_MSG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define ASSERT_MSG_DISABLED 1
+#endif
+#endif
+
+#ifndef ASSERT_ARG_DISABLED
+#define ASSERT_ARG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#endif
+
+#ifndef FATAL_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define FATAL_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define FATAL_DISABLED 1
+#endif
+#endif
+
+#ifndef ERROR_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define ERROR_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define ERROR_DISABLED 1
+#endif
+#endif
+
+#ifndef LOG_DISABLED
+#if HAVE(VARIADIC_MACRO)
+#define LOG_DISABLED ASSERTIONS_DISABLED_DEFAULT
+#else
+#define LOG_DISABLED 1
+#endif
+#endif
+
+#if COMPILER(GCC)
+#define WTF_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#define WTF_PRETTY_FUNCTION __FUNCTION__
+#endif
+
+/* WTF logging functions can process %@ in the format string to log a NSObject* but the printf format attribute
+ emits a warning when %@ is used in the format string. Until <rdar://problem/5195437> is resolved we can't include
+ the attribute when being used from Objective-C code in case it decides to use %@. */
+#if COMPILER(GCC) && !defined(__OBJC__)
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
+#else
+#define WTF_ATTRIBUTE_PRINTF(formatStringArgument, extraArguments)
+#endif
+
+/* These helper functions are always declared, but not necessarily always defined if the corresponding function is disabled. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum { WTFLogChannelOff, WTFLogChannelOn } WTFLogChannelState;
+
+typedef struct {
+ unsigned mask;
+ const char *defaultName;
+ WTFLogChannelState state;
+} WTFLogChannel;
+
+WTF_EXPORT_PRIVATE void WTFReportAssertionFailure(const char* file, int line, const char* function, const char* assertion);
+WTF_EXPORT_PRIVATE void WTFReportAssertionFailureWithMessage(const char* file, int line, const char* function, const char* assertion, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+WTF_EXPORT_PRIVATE void WTFReportArgumentAssertionFailure(const char* file, int line, const char* function, const char* argName, const char* assertion);
+WTF_EXPORT_PRIVATE void WTFReportFatalError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+WTF_EXPORT_PRIVATE void WTFReportError(const char* file, int line, const char* function, const char* format, ...) WTF_ATTRIBUTE_PRINTF(4, 5);
+WTF_EXPORT_PRIVATE void WTFLog(WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+WTF_EXPORT_PRIVATE void WTFLogVerbose(const char* file, int line, const char* function, WTFLogChannel*, const char* format, ...) WTF_ATTRIBUTE_PRINTF(5, 6);
+WTF_EXPORT_PRIVATE void WTFLogAlways(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
+
+WTF_EXPORT_PRIVATE void WTFGetBacktrace(void** stack, int* size);
+WTF_EXPORT_PRIVATE void WTFReportBacktrace();
+WTF_EXPORT_PRIVATE void WTFPrintBacktrace(void** stack, int size);
+
+typedef void (*WTFCrashHookFunction)();
+WTF_EXPORT_PRIVATE void WTFSetCrashHook(WTFCrashHookFunction);
+WTF_EXPORT_PRIVATE void WTFInvokeCrashHook();
+WTF_EXPORT_PRIVATE void WTFInstallReportBacktraceOnCrashHook();
+
+#ifdef __cplusplus
+}
+#endif
+
+/* CRASH() - Raises a fatal error resulting in program termination and triggering either the debugger or the crash reporter.
+
+ Use CRASH() in response to known, unrecoverable errors like out-of-memory.
+ Macro is enabled in both debug and release mode.
+ To test for unknown errors and verify assumptions, use ASSERT instead, to avoid impacting performance in release builds.
+
+ Signals are ignored by the crash reporter on OS X so we must do better.
+*/
+#ifndef CRASH
+#if COMPILER(CLANG)
+#define CRASH() \
+ (WTFReportBacktrace(), \
+ WTFInvokeCrashHook(), \
+ (*(int *)(uintptr_t)0xbbadbeef = 0), \
+ __builtin_trap())
+#else
+#define CRASH() \
+ (WTFReportBacktrace(), \
+ WTFInvokeCrashHook(), \
+ (*(int *)(uintptr_t)0xbbadbeef = 0), \
+ ((void(*)())0)() /* More reliable, but doesn't say BBADBEEF */ \
+ )
+#endif
+#endif
+
+#if COMPILER(CLANG)
+#define NO_RETURN_DUE_TO_CRASH NO_RETURN
+#else
+#define NO_RETURN_DUE_TO_CRASH
+#endif
+
+
+/* BACKTRACE
+
+ Print a backtrace to the same location as ASSERT messages.
+*/
+
+#if BACKTRACE_DISABLED
+
+#define BACKTRACE() ((void)0)
+
+#else
+
+#define BACKTRACE() do { \
+ WTFReportBacktrace(); \
+} while(false)
+
+#endif
+
+/* ASSERT, ASSERT_NOT_REACHED, ASSERT_UNUSED
+
+ These macros are compiled out of release builds.
+ Expressions inside them are evaluated in debug builds only.
+*/
+
+#if OS(WINCE)
+/* FIXME: We include this here only to avoid a conflict with the ASSERT macro. */
+#include <windows.h>
+#undef min
+#undef max
+#undef ERROR
+#endif
+
+#if OS(WINDOWS)
+/* FIXME: Change to use something other than ASSERT to avoid this conflict with the underlying platform */
+#undef ASSERT
+#endif
+
+#if ASSERT_DISABLED
+
+#define ASSERT(assertion) ((void)0)
+#define ASSERT_AT(assertion, file, line, function) ((void)0)
+#define ASSERT_NOT_REACHED() ((void)0)
+#define NO_RETURN_DUE_TO_ASSERT
+
+#if COMPILER(INTEL) && !OS(WINDOWS) || COMPILER(RVCT)
+template<typename T>
+inline void assertUnused(T& x) { (void)x; }
+#define ASSERT_UNUSED(variable, assertion) (assertUnused(variable))
+#else
+#define ASSERT_UNUSED(variable, assertion) ((void)variable)
+#endif
+
+#else
+
+#define ASSERT(assertion) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#define ASSERT_AT(assertion, file, line, function) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(file, line, function, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#define ASSERT_NOT_REACHED() do { \
+ WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 0); \
+ CRASH(); \
+} while (0)
+
+#define ASSERT_UNUSED(variable, assertion) ASSERT(assertion)
+
+#define NO_RETURN_DUE_TO_ASSERT NO_RETURN_DUE_TO_CRASH
+
+#endif
+
+/* ASSERT_WITH_SECURITY_IMPLICATION
+
+ Failure of this assertion indicates a possible security vulnerability.
+ Class of vulnerabilities that it tests include bad casts, out of bounds
+ accesses, use-after-frees, etc. Please file a bug using the security
+ template - https://bugs.webkit.org/enter_bug.cgi?product=Security.
+
+*/
+#ifdef ADDRESS_SANITIZER
+
+#define ASSERT_WITH_SECURITY_IMPLICATION(assertion) \
+ (!(assertion) ? \
+ (WTFReportAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion), \
+ CRASH()) : \
+ (void)0)
+
+#else
+
+#define ASSERT_WITH_SECURITY_IMPLICATION(assertion) ASSERT(assertion)
+
+#endif
+
+/* ASSERT_WITH_MESSAGE */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define ASSERT_WITH_MESSAGE(assertion) ((void)0)
+#elif ASSERT_MSG_DISABLED
+#define ASSERT_WITH_MESSAGE(assertion, ...) ((void)0)
+#else
+#define ASSERT_WITH_MESSAGE(assertion, ...) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
+ CRASH(); \
+ } \
+while (0)
+#endif
+
+/* ASSERT_WITH_MESSAGE_UNUSED */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion) ((void)0)
+#elif ASSERT_MSG_DISABLED
+#if COMPILER(INTEL) && !OS(WINDOWS) || COMPILER(RVCT)
+template<typename T>
+inline void assertWithMessageUnused(T& x) { (void)x; }
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) (assertWithMessageUnused(variable))
+#else
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) ((void)variable)
+#endif
+#else
+#define ASSERT_WITH_MESSAGE_UNUSED(variable, assertion, ...) do \
+ if (!(assertion)) { \
+ WTFReportAssertionFailureWithMessage(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion, __VA_ARGS__); \
+ CRASH(); \
+ } \
+while (0)
+#endif
+
+
+/* ASSERT_ARG */
+
+#if ASSERT_ARG_DISABLED
+
+#define ASSERT_ARG(argName, assertion) ((void)0)
+
+#else
+
+#define ASSERT_ARG(argName, assertion) do \
+ if (!(assertion)) { \
+ WTFReportArgumentAssertionFailure(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, #argName, #assertion); \
+ CRASH(); \
+ } \
+while (0)
+
+#endif
+
+/* COMPILE_ASSERT */
+#ifndef COMPILE_ASSERT
+#if COMPILER_SUPPORTS(C_STATIC_ASSERT)
+/* Unlike static_assert below, this also works in plain C code. */
+#define COMPILE_ASSERT(exp, name) _Static_assert((exp), #name)
+#elif COMPILER_SUPPORTS(CXX_STATIC_ASSERT)
+#define COMPILE_ASSERT(exp, name) static_assert((exp), #name)
+#else
+#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]
+#endif
+#endif
+
+/* FATAL */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define FATAL() ((void)0)
+#elif FATAL_DISABLED
+#define FATAL(...) ((void)0)
+#else
+#define FATAL(...) do { \
+ WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__); \
+ CRASH(); \
+} while (0)
+#endif
+
+/* LOG_ERROR */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG_ERROR() ((void)0)
+#elif ERROR_DISABLED
+#define LOG_ERROR(...) ((void)0)
+#else
+#define LOG_ERROR(...) WTFReportError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, __VA_ARGS__)
+#endif
+
+/* LOG */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG() ((void)0)
+#elif LOG_DISABLED
+#define LOG(channel, ...) ((void)0)
+#else
+#define LOG(channel, ...) WTFLog(&JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX(prefix, channel) JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel)
+#define JOIN_LOG_CHANNEL_WITH_PREFIX_LEVEL_2(prefix, channel) prefix ## channel
+#endif
+
+/* LOG_VERBOSE */
+
+#if COMPILER(MSVC7_OR_LOWER)
+#define LOG_VERBOSE(channel) ((void)0)
+#elif LOG_DISABLED
+#define LOG_VERBOSE(channel, ...) ((void)0)
+#else
+#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
+#endif
+
+/* UNREACHABLE_FOR_PLATFORM */
+
+#if COMPILER(CLANG)
+// This would be a macro except that its use of #pragma works best around
+// a function. Hence it uses macro naming convention.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+static inline void UNREACHABLE_FOR_PLATFORM()
+{
+ ASSERT_NOT_REACHED();
+}
+#pragma clang diagnostic pop
+#else
+#define UNREACHABLE_FOR_PLATFORM() ASSERT_NOT_REACHED()
+#endif
+
+#if ASSERT_DISABLED
+#define RELEASE_ASSERT(assertion) (UNLIKELY(!(assertion)) ? (CRASH()) : (void)0)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) RELEASE_ASSERT(assertion)
+#define RELEASE_ASSERT_NOT_REACHED() CRASH()
+#else
+#define RELEASE_ASSERT(assertion) ASSERT(assertion)
+#define RELEASE_ASSERT_WITH_MESSAGE(assertion, ...) ASSERT_WITH_MESSAGE(assertion, __VA_ARGS__)
+#define RELEASE_ASSERT_NOT_REACHED() ASSERT_NOT_REACHED()
+#endif
+
+#endif /* WTF_Assertions_h */
diff --git a/src/3rdparty/masm/wtf/Atomics.h b/src/3rdparty/masm/wtf/Atomics.h
new file mode 100644
index 0000000000..df5abec81d
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Atomics.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Note: The implementations of InterlockedIncrement and InterlockedDecrement are based
+ * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. The license
+ * is virtually identical to the Apple license above but is included here for completeness.
+ *
+ * Boost Software License - Version 1.0 - August 17th, 2003
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare derivative works of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef Atomics_h
+#define Atomics_h
+
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/UnusedParam.h>
+
+#if OS(WINDOWS)
+#include <windows.h>
+#elif OS(QNX)
+#include <atomic.h>
+#endif
+
+namespace WTF {
+
+#if OS(WINDOWS)
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+#if OS(WINCE)
+inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpret_cast<long*>(addend)); }
+inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); }
+#elif COMPILER(MINGW) || COMPILER(MSVC7_OR_LOWER)
+inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpret_cast<long*>(addend)); }
+inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpret_cast<long*>(addend)); }
+
+inline int64_t atomicIncrement(int64_t* addend) { return InterlockedIncrement64(reinterpret_cast<long long*>(addend)); }
+inline int64_t atomicDecrement(int64_t* addend) { return InterlockedDecrement64(reinterpret_cast<long long*>(addend)); }
+#else
+inline int atomicIncrement(int volatile* addend) { return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); }
+inline int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); }
+
+inline int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); }
+inline int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); }
+#endif
+
+#elif OS(QNX)
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+// Note, atomic_{add, sub}_value() return the previous value of addend's content.
+inline int atomicIncrement(int volatile* addend) { return static_cast<int>(atomic_add_value(reinterpret_cast<unsigned volatile*>(addend), 1)) + 1; }
+inline int atomicDecrement(int volatile* addend) { return static_cast<int>(atomic_sub_value(reinterpret_cast<unsigned volatile*>(addend), 1)) - 1; }
+
+#elif COMPILER(GCC) && !CPU(SPARC64) // sizeof(_Atomic_word) != sizeof(int) on sparc64 gcc
+#define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1
+
+inline int atomicIncrement(int volatile* addend) { return __sync_add_and_fetch(addend, 1); }
+inline int atomicDecrement(int volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
+
+inline int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and_fetch(addend, 1); }
+inline int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
+
+#endif
+
+#if OS(WINDOWS)
+inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+{
+#if OS(WINCE)
+ return InterlockedCompareExchange(reinterpret_cast<LONG*>(const_cast<unsigned*>(location)), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
+#else
+ return InterlockedCompareExchange(reinterpret_cast<LONG volatile*>(location), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>(expected);
+#endif
+}
+
+inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+{
+ return InterlockedCompareExchangePointer(location, newValue, expected) == expected;
+}
+#else // OS(WINDOWS) --> not windows
+#if COMPILER(GCC) && !COMPILER(CLANG) // Work around a gcc bug
+inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, unsigned newValue)
+#else
+inline bool weakCompareAndSwap(unsigned* location, unsigned expected, unsigned newValue)
+#endif
+{
+#if ENABLE(COMPARE_AND_SWAP)
+#if CPU(X86) || CPU(X86_64)
+ unsigned char result;
+ asm volatile(
+ "lock; cmpxchgl %3, %2\n\t"
+ "sete %1"
+ : "+a"(expected), "=q"(result), "+m"(*location)
+ : "r"(newValue)
+ : "memory"
+ );
+#elif CPU(ARM_THUMB2)
+ unsigned tmp;
+ unsigned result;
+ asm volatile(
+ "movw %1, #1\n\t"
+ "ldrex %2, %0\n\t"
+ "cmp %3, %2\n\t"
+ "bne.n 0f\n\t"
+ "strex %1, %4, %0\n\t"
+ "0:"
+ : "+Q"(*location), "=&r"(result), "=&r"(tmp)
+ : "r"(expected), "r"(newValue)
+ : "memory");
+ result = !result;
+#else
+#error "Bad architecture for compare and swap."
+#endif
+ return result;
+#else
+ UNUSED_PARAM(location);
+ UNUSED_PARAM(expected);
+ UNUSED_PARAM(newValue);
+ CRASH();
+ return false;
+#endif
+}
+
+inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* newValue)
+{
+#if ENABLE(COMPARE_AND_SWAP)
+#if CPU(X86_64)
+ bool result;
+ asm volatile(
+ "lock; cmpxchgq %3, %2\n\t"
+ "sete %1"
+ : "+a"(expected), "=q"(result), "+m"(*location)
+ : "r"(newValue)
+ : "memory"
+ );
+ return result;
+#else
+ return weakCompareAndSwap(bitwise_cast<unsigned*>(location), bitwise_cast<unsigned>(expected), bitwise_cast<unsigned>(newValue));
+#endif
+#else // ENABLE(COMPARE_AND_SWAP)
+ UNUSED_PARAM(location);
+ UNUSED_PARAM(expected);
+ UNUSED_PARAM(newValue);
+ CRASH();
+ return 0;
+#endif // ENABLE(COMPARE_AND_SWAP)
+}
+#endif // OS(WINDOWS) (end of the not-windows case)
+
+inline bool weakCompareAndSwapUIntPtr(volatile uintptr_t* location, uintptr_t expected, uintptr_t newValue)
+{
+ return weakCompareAndSwap(reinterpret_cast<void*volatile*>(location), reinterpret_cast<void*>(expected), reinterpret_cast<void*>(newValue));
+}
+
+#if CPU(ARM_THUMB2)
+
+inline void memoryBarrierAfterLock()
+{
+ asm volatile("dmb" ::: "memory");
+}
+
+inline void memoryBarrierBeforeUnlock()
+{
+ asm volatile("dmb" ::: "memory");
+}
+
+#else
+
+inline void memoryBarrierAfterLock() { }
+inline void memoryBarrierBeforeUnlock() { }
+
+#endif
+
+} // namespace WTF
+
+#if USE(LOCKFREE_THREADSAFEREFCOUNTED)
+using WTF::atomicDecrement;
+using WTF::atomicIncrement;
+#endif
+
+#endif // Atomics_h
diff --git a/src/3rdparty/masm/wtf/BumpPointerAllocator.h b/src/3rdparty/masm/wtf/BumpPointerAllocator.h
new file mode 100644
index 0000000000..3b2cfd974a
--- /dev/null
+++ b/src/3rdparty/masm/wtf/BumpPointerAllocator.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BumpPointerAllocator_h
+#define BumpPointerAllocator_h
+
+#include <algorithm>
+#include <wtf/PageAllocation.h>
+#include <wtf/PageBlock.h>
+
+namespace WTF {
+
+#define MINIMUM_BUMP_POOL_SIZE 0x1000
+
+class BumpPointerPool {
+public:
+ // ensureCapacity will check whether the current pool has capacity to
+ // allocate 'size' bytes of memory If it does not, it will attempt to
+ // allocate a new pool (which will be added to this one in a chain).
+ //
+ // If allocation fails (out of memory) this method will return null.
+ // If the return value is non-null, then callers should update any
+ // references they have to this current (possibly full) BumpPointerPool
+ // to instead point to the newly returned BumpPointerPool.
+ BumpPointerPool* ensureCapacity(size_t size)
+ {
+ void* allocationEnd = static_cast<char*>(m_current) + size;
+ ASSERT(allocationEnd > m_current); // check for overflow
+ if (allocationEnd <= static_cast<void*>(this))
+ return this;
+ return ensureCapacityCrossPool(this, size);
+ }
+
+ // alloc should only be called after calling ensureCapacity; as such
+ // alloc will never fail.
+ void* alloc(size_t size)
+ {
+ void* current = m_current;
+ void* allocationEnd = static_cast<char*>(current) + size;
+ ASSERT(allocationEnd > current); // check for overflow
+ ASSERT(allocationEnd <= static_cast<void*>(this));
+ m_current = allocationEnd;
+ return current;
+ }
+
+ // The dealloc method releases memory allocated using alloc. Memory
+ // must be released in a LIFO fashion, e.g. if the client calls alloc
+ // four times, returning pointer A, B, C, D, then the only valid order
+ // in which these may be deallocaed is D, C, B, A.
+ //
+ // The client may optionally skip some deallocations. In the example
+ // above, it would be valid to only explicitly dealloc C, A (D being
+ // dealloced along with C, B along with A).
+ //
+ // If pointer was not allocated from this pool (or pools) then dealloc
+ // will CRASH(). Callers should update any references they have to
+ // this current BumpPointerPool to instead point to the returned
+ // BumpPointerPool.
+ BumpPointerPool* dealloc(void* position)
+ {
+ if ((position >= m_start) && (position <= static_cast<void*>(this))) {
+ ASSERT(position <= m_current);
+ m_current = position;
+ return this;
+ }
+ return deallocCrossPool(this, position);
+ }
+
+private:
+ // Placement operator new, returns the last 'size' bytes of allocation for use as this.
+ void* operator new(size_t size, const PageAllocation& allocation)
+ {
+ ASSERT(size < allocation.size());
+ return reinterpret_cast<char*>(reinterpret_cast<intptr_t>(allocation.base()) + allocation.size()) - size;
+ }
+
+ BumpPointerPool(const PageAllocation& allocation)
+ : m_current(allocation.base())
+ , m_start(allocation.base())
+ , m_next(0)
+ , m_previous(0)
+ , m_allocation(allocation)
+ {
+ }
+
+ static BumpPointerPool* create(size_t minimumCapacity = 0)
+ {
+ // Add size of BumpPointerPool object, check for overflow.
+ minimumCapacity += sizeof(BumpPointerPool);
+ if (minimumCapacity < sizeof(BumpPointerPool))
+ return 0;
+
+ size_t poolSize = std::max(static_cast<size_t>(MINIMUM_BUMP_POOL_SIZE), WTF::pageSize());
+ while (poolSize < minimumCapacity) {
+ poolSize <<= 1;
+ // The following if check relies on MINIMUM_BUMP_POOL_SIZE being a power of 2!
+ ASSERT(!(MINIMUM_BUMP_POOL_SIZE & (MINIMUM_BUMP_POOL_SIZE - 1)));
+ if (!poolSize)
+ return 0;
+ }
+
+ PageAllocation allocation = PageAllocation::allocate(poolSize);
+ if (!!allocation)
+ return new (allocation) BumpPointerPool(allocation);
+ return 0;
+ }
+
+ void shrink()
+ {
+ ASSERT(!m_previous);
+ m_current = m_start;
+ while (m_next) {
+ BumpPointerPool* nextNext = m_next->m_next;
+ m_next->destroy();
+ m_next = nextNext;
+ }
+ }
+
+ void destroy()
+ {
+ m_allocation.deallocate();
+ }
+
+ static BumpPointerPool* ensureCapacityCrossPool(BumpPointerPool* previousPool, size_t size)
+ {
+ // The pool passed should not have capacity, so we'll start with the next one.
+ ASSERT(previousPool);
+ ASSERT((static_cast<char*>(previousPool->m_current) + size) > previousPool->m_current); // check for overflow
+ ASSERT((static_cast<char*>(previousPool->m_current) + size) > static_cast<void*>(previousPool));
+ BumpPointerPool* pool = previousPool->m_next;
+
+ while (true) {
+ if (!pool) {
+ // We've run to the end; allocate a new pool.
+ pool = BumpPointerPool::create(size);
+ previousPool->m_next = pool;
+ pool->m_previous = previousPool;
+ return pool;
+ }
+
+ //
+ void* current = pool->m_current;
+ void* allocationEnd = static_cast<char*>(current) + size;
+ ASSERT(allocationEnd > current); // check for overflow
+ if (allocationEnd <= static_cast<void*>(pool))
+ return pool;
+ }
+ }
+
+ static BumpPointerPool* deallocCrossPool(BumpPointerPool* pool, void* position)
+ {
+ // Should only be called if position is not in the current pool.
+ ASSERT((position < pool->m_start) || (position > static_cast<void*>(pool)));
+
+ while (true) {
+ // Unwind the current pool to the start, move back in the chain to the previous pool.
+ pool->m_current = pool->m_start;
+ pool = pool->m_previous;
+
+ // position was nowhere in the chain!
+ if (!pool)
+ CRASH();
+
+ if ((position >= pool->m_start) && (position <= static_cast<void*>(pool))) {
+ ASSERT(position <= pool->m_current);
+ pool->m_current = position;
+ return pool;
+ }
+ }
+ }
+
+ void* m_current;
+ void* m_start;
+ BumpPointerPool* m_next;
+ BumpPointerPool* m_previous;
+ PageAllocation m_allocation;
+
+ friend class BumpPointerAllocator;
+};
+
+// A BumpPointerAllocator manages a set of BumpPointerPool objects, which
+// can be used for LIFO (stack like) allocation.
+//
+// To begin allocating using this class call startAllocator(). The result
+// of this method will be null if the initial pool allocation fails, or a
+// pointer to a BumpPointerPool object that can be used to perform
+// allocations. Whilst running no memory will be released until
+// stopAllocator() is called. At this point all allocations made through
+// this allocator will be reaped, and underlying memory may be freed.
+//
+// (In practice we will still hold on to the initial pool to allow allocation
+// to be quickly restared, but aditional pools will be freed).
+//
+// This allocator is non-renetrant, it is encumbant on the clients to ensure
+// startAllocator() is not called again until stopAllocator() has been called.
+class BumpPointerAllocator {
+public:
+ BumpPointerAllocator()
+ : m_head(0)
+ {
+ }
+
+ ~BumpPointerAllocator()
+ {
+ if (m_head)
+ m_head->destroy();
+ }
+
+ BumpPointerPool* startAllocator()
+ {
+ if (!m_head)
+ m_head = BumpPointerPool::create();
+ return m_head;
+ }
+
+ void stopAllocator()
+ {
+ if (m_head)
+ m_head->shrink();
+ }
+
+private:
+ BumpPointerPool* m_head;
+};
+
+}
+
+using WTF::BumpPointerAllocator;
+
+#endif // BumpPointerAllocator_h
diff --git a/src/3rdparty/masm/wtf/CheckedArithmetic.h b/src/3rdparty/masm/wtf/CheckedArithmetic.h
new file mode 100644
index 0000000000..dd4acbb9b5
--- /dev/null
+++ b/src/3rdparty/masm/wtf/CheckedArithmetic.h
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CheckedArithmetic_h
+#define CheckedArithmetic_h
+
+#include <wtf/Assertions.h>
+#include <wtf/EnumClass.h>
+#include <wtf/TypeTraits.h>
+
+#include <limits>
+#include <stdint.h>
+
+/* Checked<T>
+ *
+ * This class provides a mechanism to perform overflow-safe integer arithmetic
+ * without having to manually ensure that you have all the required bounds checks
+ * directly in your code.
+ *
+ * There are two modes of operation:
+ * - The default is Checked<T, CrashOnOverflow>, and crashes at the point
+ * and overflow has occurred.
+ * - The alternative is Checked<T, RecordOverflow>, which uses an additional
+ * byte of storage to track whether an overflow has occurred, subsequent
+ * unchecked operations will crash if an overflow has occured
+ *
+ * It is possible to provide a custom overflow handler, in which case you need
+ * to support these functions:
+ * - void overflowed();
+ * This function is called when an operation has produced an overflow.
+ * - bool hasOverflowed();
+ * This function must return true if overflowed() has been called on an
+ * instance and false if it has not.
+ * - void clearOverflow();
+ * Used to reset overflow tracking when a value is being overwritten with
+ * a new value.
+ *
+ * Checked<T> works for all integer types, with the following caveats:
+ * - Mixing signedness of operands is only supported for types narrower than
+ * 64bits.
+ * - It does have a performance impact, so tight loops may want to be careful
+ * when using it.
+ *
+ */
+
+namespace WTF {
+
+ENUM_CLASS(CheckedState)
+{
+ DidOverflow,
+ DidNotOverflow
+} ENUM_CLASS_END(CheckedState);
+
+class CrashOnOverflow {
+public:
+ static NO_RETURN_DUE_TO_CRASH void overflowed()
+ {
+ CRASH();
+ }
+
+ void clearOverflow() { }
+
+public:
+ bool hasOverflowed() const { return false; }
+};
+
+class RecordOverflow {
+protected:
+ RecordOverflow()
+ : m_overflowed(false)
+ {
+ }
+
+ void overflowed()
+ {
+ m_overflowed = true;
+ }
+
+ void clearOverflow()
+ {
+ m_overflowed = false;
+ }
+
+public:
+ bool hasOverflowed() const { return m_overflowed; }
+
+private:
+ unsigned char m_overflowed;
+};
+
+template <typename T, class OverflowHandler = CrashOnOverflow> class Checked;
+template <typename T> struct RemoveChecked;
+template <typename T> struct RemoveChecked<Checked<T> >;
+
+template <typename Target, typename Source, bool targetSigned = std::numeric_limits<Target>::is_signed, bool sourceSigned = std::numeric_limits<Source>::is_signed> struct BoundsChecker;
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, false> {
+ static bool inBounds(Source value)
+ {
+ // Same signedness so implicit type conversion will always increase precision
+ // to widest type
+ return value <= std::numeric_limits<Target>::max();
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, true> {
+ static bool inBounds(Source value)
+ {
+ // Same signedness so implicit type conversion will always increase precision
+ // to widest type
+ return std::numeric_limits<Target>::min() <= value && value <= std::numeric_limits<Target>::max();
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, false, true> {
+ static bool inBounds(Source value)
+ {
+ // Target is unsigned so any value less than zero is clearly unsafe
+ if (value < 0)
+ return false;
+ // If our (unsigned) Target is the same or greater width we can
+ // convert value to type Target without losing precision
+ if (sizeof(Target) >= sizeof(Source))
+ return static_cast<Target>(value) <= std::numeric_limits<Target>::max();
+ // The signed Source type has greater precision than the target so
+ // max(Target) -> Source will widen.
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ }
+};
+
+template <typename Target, typename Source> struct BoundsChecker<Target, Source, true, false> {
+ static bool inBounds(Source value)
+ {
+ // Signed target with an unsigned source
+ if (sizeof(Target) <= sizeof(Source))
+ return value <= static_cast<Source>(std::numeric_limits<Target>::max());
+ // Target is Wider than Source so we're guaranteed to fit any value in
+ // unsigned Source
+ return true;
+ }
+};
+
+template <typename Target, typename Source, bool CanElide = IsSameType<Target, Source>::value || (sizeof(Target) > sizeof(Source)) > struct BoundsCheckElider;
+template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, true> {
+ static bool inBounds(Source) { return true; }
+};
+template <typename Target, typename Source> struct BoundsCheckElider<Target, Source, false> : public BoundsChecker<Target, Source> {
+};
+
+template <typename Target, typename Source> static inline bool isInBounds(Source value)
+{
+ return BoundsCheckElider<Target, Source>::inBounds(value);
+}
+
+template <typename T> struct RemoveChecked {
+ typedef T CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+template <typename T> struct RemoveChecked<Checked<T, CrashOnOverflow> > {
+ typedef typename RemoveChecked<T>::CleanType CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+template <typename T> struct RemoveChecked<Checked<T, RecordOverflow> > {
+ typedef typename RemoveChecked<T>::CleanType CleanType;
+ static const CleanType DefaultValue = 0;
+};
+
+// The ResultBase and SignednessSelector are used to workaround typeof not being
+// available in MSVC
+template <typename U, typename V, bool uIsBigger = (sizeof(U) > sizeof(V)), bool sameSize = (sizeof(U) == sizeof(V))> struct ResultBase;
+template <typename U, typename V> struct ResultBase<U, V, true, false> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct ResultBase<U, V, false, false> {
+ typedef V ResultType;
+};
+
+template <typename U> struct ResultBase<U, U, false, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V, bool uIsSigned = std::numeric_limits<U>::is_signed, bool vIsSigned = std::numeric_limits<V>::is_signed> struct SignednessSelector;
+template <typename U, typename V> struct SignednessSelector<U, V, true, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, false, false> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, true, false> {
+ typedef V ResultType;
+};
+
+template <typename U, typename V> struct SignednessSelector<U, V, false, true> {
+ typedef U ResultType;
+};
+
+template <typename U, typename V> struct ResultBase<U, V, false, true> {
+ typedef typename SignednessSelector<U, V>::ResultType ResultType;
+};
+
+template <typename U, typename V> struct Result : ResultBase<typename RemoveChecked<U>::CleanType, typename RemoveChecked<V>::CleanType> {
+};
+
+template <typename LHS, typename RHS, typename ResultType = typename Result<LHS, RHS>::ResultType,
+ bool lhsSigned = std::numeric_limits<LHS>::is_signed, bool rhsSigned = std::numeric_limits<RHS>::is_signed> struct ArithmeticOperations;
+
+template <typename LHS, typename RHS, typename ResultType> struct ArithmeticOperations<LHS, RHS, ResultType, true, true> {
+ // LHS and RHS are signed types
+
+ // Helper function
+ static inline bool signsMatch(LHS lhs, RHS rhs)
+ {
+ return (lhs ^ rhs) >= 0;
+ }
+
+ static inline bool add(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if ((std::numeric_limits<ResultType>::max() - rhs) < lhs)
+ return false;
+ } else {
+ ResultType temp = lhs - std::numeric_limits<ResultType>::min();
+ if (rhs < -temp)
+ return false;
+ }
+ } // if the signs do not match this operation can't overflow
+ result = lhs + rhs;
+ return true;
+ }
+
+ static inline bool sub(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (!signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if (lhs > std::numeric_limits<ResultType>::max() + rhs)
+ return false;
+ } else {
+ if (rhs > std::numeric_limits<ResultType>::max() + lhs)
+ return false;
+ }
+ } // if the signs match this operation can't overflow
+ result = lhs - rhs;
+ return true;
+ }
+
+ static inline bool multiply(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (signsMatch(lhs, rhs)) {
+ if (lhs >= 0) {
+ if (lhs && (std::numeric_limits<ResultType>::max() / lhs) < rhs)
+ return false;
+ } else {
+ if (static_cast<ResultType>(lhs) == std::numeric_limits<ResultType>::min() || static_cast<ResultType>(rhs) == std::numeric_limits<ResultType>::min())
+ return false;
+ if ((std::numeric_limits<ResultType>::max() / -lhs) < -rhs)
+ return false;
+ }
+ } else {
+ if (lhs < 0) {
+ if (rhs && lhs < (std::numeric_limits<ResultType>::min() / rhs))
+ return false;
+ } else {
+ if (lhs && rhs < (std::numeric_limits<ResultType>::min() / lhs))
+ return false;
+ }
+ }
+ result = lhs * rhs;
+ return true;
+ }
+
+ static inline bool equals(LHS lhs, RHS rhs) { return lhs == rhs; }
+
+};
+
+template <typename LHS, typename RHS, typename ResultType> struct ArithmeticOperations<LHS, RHS, ResultType, false, false> {
+ // LHS and RHS are unsigned types so bounds checks are nice and easy
+ static inline bool add(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ ResultType temp = lhs + rhs;
+ if (temp < lhs)
+ return false;
+ result = temp;
+ return true;
+ }
+
+ static inline bool sub(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ ResultType temp = lhs - rhs;
+ if (temp > lhs)
+ return false;
+ result = temp;
+ return true;
+ }
+
+ static inline bool multiply(LHS lhs, RHS rhs, ResultType& result) WARN_UNUSED_RETURN
+ {
+ if (!lhs || !rhs) {
+ result = 0;
+ return true;
+ }
+ if (std::numeric_limits<ResultType>::max() / lhs < rhs)
+ return false;
+ result = lhs * rhs;
+ return true;
+ }
+
+ static inline bool equals(LHS lhs, RHS rhs) { return lhs == rhs; }
+
+};
+
+template <typename ResultType> struct ArithmeticOperations<int, unsigned, ResultType, true, false> {
+ static inline bool add(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs + rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool sub(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs - rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool multiply(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ int64_t temp = lhs * rhs;
+ if (temp < std::numeric_limits<ResultType>::min())
+ return false;
+ if (temp > std::numeric_limits<ResultType>::max())
+ return false;
+ result = static_cast<ResultType>(temp);
+ return true;
+ }
+
+ static inline bool equals(int lhs, unsigned rhs)
+ {
+ return static_cast<int64_t>(lhs) == static_cast<int64_t>(rhs);
+ }
+};
+
+template <typename ResultType> struct ArithmeticOperations<unsigned, int, ResultType, false, true> {
+ static inline bool add(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::add(rhs, lhs, result);
+ }
+
+ static inline bool sub(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::sub(lhs, rhs, result);
+ }
+
+ static inline bool multiply(int64_t lhs, int64_t rhs, ResultType& result)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::multiply(rhs, lhs, result);
+ }
+
+ static inline bool equals(unsigned lhs, int rhs)
+ {
+ return ArithmeticOperations<int, unsigned, ResultType>::equals(rhs, lhs);
+ }
+};
+
+template <typename U, typename V, typename R> static inline bool safeAdd(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::add(lhs, rhs, result);
+}
+
+template <typename U, typename V, typename R> static inline bool safeSub(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::sub(lhs, rhs, result);
+}
+
+template <typename U, typename V, typename R> static inline bool safeMultiply(U lhs, V rhs, R& result)
+{
+ return ArithmeticOperations<U, V, R>::multiply(lhs, rhs, result);
+}
+
+template <typename U, typename V> static inline bool safeEquals(U lhs, V rhs)
+{
+ return ArithmeticOperations<U, V>::equals(lhs, rhs);
+}
+
+enum ResultOverflowedTag { ResultOverflowed };
+
+// FIXME: Needed to workaround http://llvm.org/bugs/show_bug.cgi?id=10801
+static inline bool workAroundClangBug() { return true; }
+
+template <typename T, class OverflowHandler> class Checked : public OverflowHandler {
+public:
+ template <typename _T, class _OverflowHandler> friend class Checked;
+ Checked()
+ : m_value(0)
+ {
+ }
+
+ Checked(ResultOverflowedTag)
+ : m_value(0)
+ {
+ // FIXME: Remove this when clang fixes http://llvm.org/bugs/show_bug.cgi?id=10801
+ if (workAroundClangBug())
+ this->overflowed();
+ }
+
+ template <typename U> Checked(U value)
+ {
+ if (!isInBounds<T>(value))
+ this->overflowed();
+ m_value = static_cast<T>(value);
+ }
+
+ template <typename V> Checked(const Checked<T, V>& rhs)
+ : m_value(rhs.m_value)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ }
+
+ template <typename U> Checked(const Checked<U, OverflowHandler>& rhs)
+ : OverflowHandler(rhs)
+ {
+ if (!isInBounds<T>(rhs.m_value))
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ }
+
+ template <typename U, typename V> Checked(const Checked<U, V>& rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ if (!isInBounds<T>(rhs.m_value))
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ }
+
+ const Checked& operator=(Checked rhs)
+ {
+ this->clearOverflow();
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ m_value = static_cast<T>(rhs.m_value);
+ return *this;
+ }
+
+ template <typename U> const Checked& operator=(U value)
+ {
+ return *this = Checked(value);
+ }
+
+ template <typename U, typename V> const Checked& operator=(const Checked<U, V>& rhs)
+ {
+ return *this = Checked(rhs);
+ }
+
+ // prefix
+ const Checked& operator++()
+ {
+ if (m_value == std::numeric_limits<T>::max())
+ this->overflowed();
+ m_value++;
+ return *this;
+ }
+
+ const Checked& operator--()
+ {
+ if (m_value == std::numeric_limits<T>::min())
+ this->overflowed();
+ m_value--;
+ return *this;
+ }
+
+ // postfix operators
+ const Checked operator++(int)
+ {
+ if (m_value == std::numeric_limits<T>::max())
+ this->overflowed();
+ return Checked(m_value++);
+ }
+
+ const Checked operator--(int)
+ {
+ if (m_value == std::numeric_limits<T>::min())
+ this->overflowed();
+ return Checked(m_value--);
+ }
+
+ // Boolean operators
+ bool operator!() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return !m_value;
+ }
+
+ typedef void* (Checked::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType*() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return (m_value) ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0;
+ }
+
+ // Value accessors. unsafeGet() will crash if there's been an overflow.
+ T unsafeGet() const
+ {
+ if (this->hasOverflowed())
+ CRASH();
+ return m_value;
+ }
+
+ inline CheckedState safeGet(T& value) const WARN_UNUSED_RETURN
+ {
+ value = m_value;
+ if (this->hasOverflowed())
+ return CheckedState::DidOverflow;
+ return CheckedState::DidNotOverflow;
+ }
+
+ // Mutating assignment
+ template <typename U> const Checked operator+=(U rhs)
+ {
+ if (!safeAdd(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ template <typename U> const Checked operator-=(U rhs)
+ {
+ if (!safeSub(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ template <typename U> const Checked operator*=(U rhs)
+ {
+ if (!safeMultiply(m_value, rhs, m_value))
+ this->overflowed();
+ return *this;
+ }
+
+ const Checked operator*=(double rhs)
+ {
+ double result = rhs * m_value;
+ // Handle +/- infinity and NaN
+ if (!(std::numeric_limits<T>::min() <= result && std::numeric_limits<T>::max() >= result))
+ this->overflowed();
+ m_value = (T)result;
+ return *this;
+ }
+
+ const Checked operator*=(float rhs)
+ {
+ return *this *= (double)rhs;
+ }
+
+ template <typename U, typename V> const Checked operator+=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this += rhs.m_value;
+ }
+
+ template <typename U, typename V> const Checked operator-=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this -= rhs.m_value;
+ }
+
+ template <typename U, typename V> const Checked operator*=(Checked<U, V> rhs)
+ {
+ if (rhs.hasOverflowed())
+ this->overflowed();
+ return *this *= rhs.m_value;
+ }
+
+ // Equality comparisons
+ template <typename V> bool operator==(Checked<T, V> rhs)
+ {
+ return unsafeGet() == rhs.unsafeGet();
+ }
+
+ template <typename U> bool operator==(U rhs)
+ {
+ if (this->hasOverflowed())
+ this->overflowed();
+ return safeEquals(m_value, rhs);
+ }
+
+ template <typename U, typename V> const Checked operator==(Checked<U, V> rhs)
+ {
+ return unsafeGet() == Checked(rhs.unsafeGet());
+ }
+
+ template <typename U> bool operator!=(U rhs)
+ {
+ return !(*this == rhs);
+ }
+
+private:
+ // Disallow implicit conversion of floating point to integer types
+ Checked(float);
+ Checked(double);
+ void operator=(float);
+ void operator=(double);
+ void operator+=(float);
+ void operator+=(double);
+ void operator-=(float);
+ void operator-=(double);
+ T m_value;
+};
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeAdd(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeSub(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(Checked<U, OverflowHandler> lhs, Checked<V, OverflowHandler> rhs)
+{
+ U x = 0;
+ V y = 0;
+ bool overflowed = lhs.safeGet(x) == CheckedState::DidOverflow || rhs.safeGet(y) == CheckedState::DidOverflow;
+ typename Result<U, V>::ResultType result = 0;
+ overflowed |= !safeMultiply(x, y, result);
+ if (overflowed)
+ return ResultOverflowed;
+ return result;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs + Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs - Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(Checked<U, OverflowHandler> lhs, V rhs)
+{
+ return lhs * Checked<V, OverflowHandler>(rhs);
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator+(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) + rhs;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator-(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) - rhs;
+}
+
+template <typename U, typename V, typename OverflowHandler> static inline Checked<typename Result<U, V>::ResultType, OverflowHandler> operator*(U lhs, Checked<V, OverflowHandler> rhs)
+{
+ return Checked<U, OverflowHandler>(lhs) * rhs;
+}
+
+}
+
+using WTF::Checked;
+using WTF::CheckedState;
+using WTF::RecordOverflow;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/Compiler.h b/src/3rdparty/masm/wtf/Compiler.h
new file mode 100644
index 0000000000..b886f37151
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Compiler.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Compiler_h
+#define WTF_Compiler_h
+
+/* COMPILER() - the compiler being used to build the project */
+#define COMPILER(WTF_FEATURE) (defined WTF_COMPILER_##WTF_FEATURE && WTF_COMPILER_##WTF_FEATURE)
+
+/* COMPILER_SUPPORTS() - whether the compiler being used to build the project supports the given feature. */
+#define COMPILER_SUPPORTS(WTF_COMPILER_FEATURE) (defined WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE && WTF_COMPILER_SUPPORTS_##WTF_COMPILER_FEATURE)
+
+/* COMPILER_QUIRK() - whether the compiler being used to build the project requires a given quirk. */
+#define COMPILER_QUIRK(WTF_COMPILER_QUIRK) (defined WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK && WTF_COMPILER_QUIRK_##WTF_COMPILER_QUIRK)
+
+/* ==== COMPILER() - the compiler being used to build the project ==== */
+
+/* COMPILER(CLANG) - Clang */
+#if defined(__clang__)
+#define WTF_COMPILER_CLANG 1
+
+#ifndef __has_extension
+#define __has_extension __has_feature /* Compatibility with older versions of clang */
+#endif
+
+#define CLANG_PRAGMA(PRAGMA) _Pragma(PRAGMA)
+
+/* Specific compiler features */
+#define WTF_COMPILER_SUPPORTS_CXX_VARIADIC_TEMPLATES __has_extension(cxx_variadic_templates)
+
+/* There is a bug in clang that comes with Xcode 4.2 where AtomicStrings can't be implicitly converted to Strings
+ in the presence of move constructors and/or move assignment operators. This bug has been fixed in Xcode 4.3 clang, so we
+ check for both cxx_rvalue_references as well as the unrelated cxx_nonstatic_member_init feature which we know was added in 4.3 */
+#define WTF_COMPILER_SUPPORTS_CXX_RVALUE_REFERENCES __has_extension(cxx_rvalue_references) && __has_extension(cxx_nonstatic_member_init)
+
+#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS __has_extension(cxx_deleted_functions)
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR __has_feature(cxx_nullptr)
+#define WTF_COMPILER_SUPPORTS_CXX_EXPLICIT_CONVERSIONS __has_feature(cxx_explicit_conversions)
+#define WTF_COMPILER_SUPPORTS_BLOCKS __has_feature(blocks)
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT __has_extension(c_static_assert)
+#define WTF_COMPILER_SUPPORTS_CXX_STATIC_ASSERT __has_extension(cxx_static_assert)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL __has_extension(cxx_override_control)
+#define WTF_COMPILER_SUPPORTS_HAS_TRIVIAL_DESTRUCTOR __has_extension(has_trivial_destructor)
+#define WTF_COMPILER_SUPPORTS_CXX_STRONG_ENUMS __has_extension(cxx_strong_enums)
+
+#endif
+
+#ifndef CLANG_PRAGMA
+#define CLANG_PRAGMA(PRAGMA)
+#endif
+
+/* COMPILER(MSVC) - Microsoft Visual C++ */
+/* COMPILER(MSVC7_OR_LOWER) - Microsoft Visual C++ 2003 or lower*/
+/* COMPILER(MSVC9_OR_LOWER) - Microsoft Visual C++ 2008 or lower*/
+#if defined(_MSC_VER)
+#define WTF_COMPILER_MSVC 1
+#if _MSC_VER < 1400
+#define WTF_COMPILER_MSVC7_OR_LOWER 1
+#elif _MSC_VER < 1600
+#define WTF_COMPILER_MSVC9_OR_LOWER 1
+#endif
+
+/* Specific compiler features */
+#if !COMPILER(CLANG) && _MSC_VER >= 1600
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR 1
+#endif
+
+#if !COMPILER(CLANG)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL 1
+#define WTF_COMPILER_QUIRK_FINAL_IS_CALLED_SEALED 1
+#endif
+
+#endif
+
+/* COMPILER(RVCT) - ARM RealView Compilation Tools */
+#if defined(__CC_ARM) || defined(__ARMCC__)
+#define WTF_COMPILER_RVCT 1
+#define RVCT_VERSION_AT_LEAST(major, minor, patch, build) (__ARMCC_VERSION >= (major * 100000 + minor * 10000 + patch * 1000 + build))
+#else
+/* Define this for !RVCT compilers, just so we can write things like RVCT_VERSION_AT_LEAST(3, 0, 0, 0). */
+#define RVCT_VERSION_AT_LEAST(major, minor, patch, build) 0
+#endif
+
+/* COMPILER(GCCE) - GNU Compiler Collection for Embedded */
+#if defined(__GCCE__)
+#define WTF_COMPILER_GCCE 1
+#define GCCE_VERSION (__GCCE__ * 10000 + __GCCE_MINOR__ * 100 + __GCCE_PATCHLEVEL__)
+#define GCCE_VERSION_AT_LEAST(major, minor, patch) (GCCE_VERSION >= (major * 10000 + minor * 100 + patch))
+#endif
+
+/* COMPILER(GCC) - GNU Compiler Collection */
+/* --gnu option of the RVCT compiler also defines __GNUC__ */
+#if defined(__GNUC__) && !COMPILER(RVCT)
+#define WTF_COMPILER_GCC 1
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION_AT_LEAST(major, minor, patch) (GCC_VERSION >= (major * 10000 + minor * 100 + patch))
+#else
+/* Define this for !GCC compilers, just so we can write things like GCC_VERSION_AT_LEAST(4, 1, 0). */
+#define GCC_VERSION_AT_LEAST(major, minor, patch) 0
+#endif
+
+/* Specific compiler features */
+#if COMPILER(GCC) && !COMPILER(CLANG)
+#if GCC_VERSION_AT_LEAST(4, 8, 0)
+#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
+#endif
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+/* C11 support */
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT 1
+#endif
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && __cplusplus >= 201103L)
+/* C++11 support */
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_RVALUE_REFERENCES 1
+#define WTF_COMPILER_SUPPORTS_CXX_STATIC_ASSERT 1
+#define WTF_COMPILER_SUPPORTS_CXX_VARIADIC_TEMPLATES 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 4, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 5, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_EXPLICIT_CONVERSIONS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 6, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR 1
+/* Strong enums should work from gcc 4.4, but doesn't seem to support some operators */
+#define WTF_COMPILER_SUPPORTS_CXX_STRONG_ENUMS 1
+#endif
+#if GCC_VERSION_AT_LEAST(4, 7, 0)
+#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL 1
+#endif
+#endif /* defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(__cplusplus) && __cplusplus >= 201103L) */
+#endif /* COMPILER(GCC) */
+
+/* COMPILER(MINGW) - MinGW GCC */
+/* COMPILER(MINGW64) - mingw-w64 GCC - only used as additional check to exclude mingw.org specific functions */
+#if defined(__MINGW32__)
+#define WTF_COMPILER_MINGW 1
+#include <_mingw.h> /* private MinGW header */
+ #if defined(__MINGW64_VERSION_MAJOR) /* best way to check for mingw-w64 vs mingw.org */
+ #define WTF_COMPILER_MINGW64 1
+ #endif /* __MINGW64_VERSION_MAJOR */
+#endif /* __MINGW32__ */
+
+/* COMPILER(INTEL) - Intel C++ Compiler */
+#if defined(__INTEL_COMPILER)
+#define WTF_COMPILER_INTEL 1
+#endif
+
+/* COMPILER(SUNCC) */
+#if defined(__SUNPRO_CC) || defined(__SUNPRO_C)
+#define WTF_COMPILER_SUNCC 1
+#endif
+
+/* ==== Compiler features ==== */
+
+
+/* ALWAYS_INLINE */
+
+#ifndef ALWAYS_INLINE
+#if COMPILER(GCC) && defined(NDEBUG) && !COMPILER(MINGW)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif (COMPILER(MSVC) || COMPILER(RVCT)) && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+#endif
+
+
+/* NEVER_INLINE */
+
+#ifndef NEVER_INLINE
+#if COMPILER(GCC)
+#define NEVER_INLINE __attribute__((__noinline__))
+#elif COMPILER(RVCT)
+#define NEVER_INLINE __declspec(noinline)
+#else
+#define NEVER_INLINE
+#endif
+#endif
+
+
+/* UNLIKELY */
+
+#ifndef UNLIKELY
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define UNLIKELY(x) __builtin_expect((x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif
+#endif
+
+
+/* LIKELY */
+
+#ifndef LIKELY
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define LIKELY(x) __builtin_expect((x), 1)
+#else
+#define LIKELY(x) (x)
+#endif
+#endif
+
+
+/* NO_RETURN */
+
+
+#ifndef NO_RETURN
+#if COMPILER(GCC)
+#define NO_RETURN __attribute((__noreturn__))
+#elif COMPILER(MSVC) || COMPILER(RVCT)
+#define NO_RETURN __declspec(noreturn)
+#else
+#define NO_RETURN
+#endif
+#endif
+
+
+/* NO_RETURN_WITH_VALUE */
+
+#ifndef NO_RETURN_WITH_VALUE
+#if !COMPILER(MSVC)
+#define NO_RETURN_WITH_VALUE NO_RETURN
+#else
+#define NO_RETURN_WITH_VALUE
+#endif
+#endif
+
+
+/* WARN_UNUSED_RETURN */
+
+#if COMPILER(GCC)
+#define WARN_UNUSED_RETURN __attribute__ ((warn_unused_result))
+#else
+#define WARN_UNUSED_RETURN
+#endif
+
+/* OVERRIDE and FINAL */
+
+#if COMPILER_SUPPORTS(CXX_OVERRIDE_CONTROL)
+#define OVERRIDE override
+
+#if COMPILER_QUIRK(FINAL_IS_CALLED_SEALED)
+#define FINAL sealed
+#else
+#define FINAL final
+#endif
+
+#else
+#define OVERRIDE
+#define FINAL
+#endif
+
+/* REFERENCED_FROM_ASM */
+
+#ifndef REFERENCED_FROM_ASM
+#if COMPILER(GCC)
+#define REFERENCED_FROM_ASM __attribute__((used))
+#else
+#define REFERENCED_FROM_ASM
+#endif
+#endif
+
+/* OBJC_CLASS */
+
+#ifndef OBJC_CLASS
+#ifdef __OBJC__
+#define OBJC_CLASS @class
+#else
+#define OBJC_CLASS class
+#endif
+#endif
+
+/* ABI */
+#if defined(__ARM_EABI__) || defined(__EABI__)
+#define WTF_COMPILER_SUPPORTS_EABI 1
+#endif
+
+#endif /* WTF_Compiler_h */
diff --git a/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h b/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h
new file mode 100644
index 0000000000..2262b6c3b3
--- /dev/null
+++ b/src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 Torch Mobile Inc. All rights reserved. (http://www.torchmobile.com/)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_CryptographicallyRandomNumber_h
+#define WTF_CryptographicallyRandomNumber_h
+
+#include <stdint.h>
+
+namespace WTF {
+
+#if USE(OS_RANDOMNESS)
+WTF_EXPORT_PRIVATE uint32_t cryptographicallyRandomNumber();
+WTF_EXPORT_PRIVATE void cryptographicallyRandomValues(void* buffer, size_t length);
+#endif
+
+}
+
+#if USE(OS_RANDOMNESS)
+using WTF::cryptographicallyRandomNumber;
+using WTF::cryptographicallyRandomValues;
+#endif
+
+#endif
diff --git a/src/3rdparty/masm/wtf/DataLog.h b/src/3rdparty/masm/wtf/DataLog.h
new file mode 100644
index 0000000000..0bd8efe727
--- /dev/null
+++ b/src/3rdparty/masm/wtf/DataLog.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DataLog_h
+#define DataLog_h
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <wtf/FilePrintStream.h>
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE FilePrintStream& dataFile();
+
+WTF_EXPORT_PRIVATE void dataLogFV(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(1, 0);
+WTF_EXPORT_PRIVATE void dataLogF(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
+WTF_EXPORT_PRIVATE void dataLogFString(const char*);
+
+template<typename T>
+void dataLog(const T& value)
+{
+ dataFile().print(value);
+}
+
+template<typename T1, typename T2>
+void dataLog(const T1& value1, const T2& value2)
+{
+ dataFile().print(value1, value2);
+}
+
+template<typename T1, typename T2, typename T3>
+void dataLog(const T1& value1, const T2& value2, const T3& value3)
+{
+ dataFile().print(value1, value2, value3);
+}
+
+template<typename T1, typename T2, typename T3, typename T4>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
+{
+ dataFile().print(value1, value2, value3, value4);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
+{
+ dataFile().print(value1, value2, value3, value4, value5);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12);
+}
+
+template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
+void dataLog(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
+{
+ dataFile().print(value1, value2, value3, value4, value5, value6, value7, value8, value9, value10, value11, value12, value13);
+}
+
+} // namespace WTF
+
+using WTF::dataLog;
+using WTF::dataLogF;
+using WTF::dataLogFString;
+
+#endif // DataLog_h
+
diff --git a/src/3rdparty/masm/wtf/DynamicAnnotations.h b/src/3rdparty/masm/wtf/DynamicAnnotations.h
new file mode 100644
index 0000000000..38acce35e6
--- /dev/null
+++ b/src/3rdparty/masm/wtf/DynamicAnnotations.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_DynamicAnnotations_h
+#define WTF_DynamicAnnotations_h
+
+/* This file defines dynamic annotations for use with dynamic analysis
+ * tool such as ThreadSanitizer, Valgrind, etc.
+ *
+ * Dynamic annotation is a source code annotation that affects
+ * the generated code (that is, the annotation is not a comment).
+ * Each such annotation is attached to a particular
+ * instruction and/or to a particular object (address) in the program.
+ *
+ * By using dynamic annotations a developer can give more details to the dynamic
+ * analysis tool to improve its precision.
+ *
+ * In C/C++ program the annotations are represented as C macros.
+ * With the default build flags, these macros are empty, hence don't affect
+ * performance of a compiled binary.
+ * If dynamic annotations are enabled, they just call no-op functions.
+ * The dynamic analysis tools can intercept these functions and replace them
+ * with their own implementations.
+ *
+ * See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations for more information.
+ */
+
+#if USE(DYNAMIC_ANNOTATIONS)
+/* Tell data race detector that we're not interested in reports on the given address range. */
+#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
+#define WTF_ANNOTATE_BENIGN_RACE(pointer, description) WTFAnnotateBenignRaceSized(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+/* Annotations for user-defined synchronization mechanisms.
+ * These annotations can be used to define happens-before arcs in user-defined
+ * synchronization mechanisms: the race detector will infer an arc from
+ * the former to the latter when they share the same argument pointer.
+ *
+ * The most common case requiring annotations is atomic reference counting:
+ * bool deref() {
+ * ANNOTATE_HAPPENS_BEFORE(&m_refCount);
+ * if (!atomicDecrement(&m_refCount)) {
+ * // m_refCount is now 0
+ * ANNOTATE_HAPPENS_AFTER(&m_refCount);
+ * // "return true; happens-after each atomicDecrement of m_refCount"
+ * return true;
+ * }
+ * return false;
+ * }
+ */
+#define WTF_ANNOTATE_HAPPENS_BEFORE(address) WTFAnnotateHappensBefore(__FILE__, __LINE__, address)
+#define WTF_ANNOTATE_HAPPENS_AFTER(address) WTFAnnotateHappensAfter(__FILE__, __LINE__, address)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Don't use these directly, use the above macros instead. */
+void WTFAnnotateBenignRaceSized(const char* file, int line, const volatile void* memory, long size, const char* description);
+void WTFAnnotateHappensBefore(const char* file, int line, const volatile void* address);
+void WTFAnnotateHappensAfter(const char* file, int line, const volatile void* address);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#else // USE(DYNAMIC_ANNOTATIONS)
+/* These macros are empty when dynamic annotations are not enabled so you can
+ * use them without affecting the performance of release binaries. */
+#define WTF_ANNOTATE_BENIGN_RACE_SIZED(address, size, description)
+#define WTF_ANNOTATE_BENIGN_RACE(pointer, description)
+#define WTF_ANNOTATE_HAPPENS_BEFORE(address)
+#define WTF_ANNOTATE_HAPPENS_AFTER(address)
+#endif // USE(DYNAMIC_ANNOTATIONS)
+
+#endif // WTF_DynamicAnnotations_h
diff --git a/src/3rdparty/masm/wtf/EnumClass.h b/src/3rdparty/masm/wtf/EnumClass.h
new file mode 100644
index 0000000000..a5729b3b97
--- /dev/null
+++ b/src/3rdparty/masm/wtf/EnumClass.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_EnumClass_h
+#define WTF_EnumClass_h
+
+#include <wtf/Compiler.h>
+
+namespace WTF {
+
+// How to define a type safe enum list using the ENUM_CLASS macros?
+// ===============================================================
+// To get an enum list like this:
+//
+// enum class MyEnums {
+// Value1,
+// Value2,
+// ...
+// ValueN
+// };
+//
+// ... write this:
+//
+// ENUM_CLASS(MyEnums) {
+// Value1,
+// Value2,
+// ...
+// ValueN
+// } ENUM_CLASS_END(MyEnums);
+//
+// The ENUM_CLASS macros will use C++11's enum class if the compiler supports it.
+// Otherwise, it will use the EnumClass template below.
+
+#if COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+#define ENUM_CLASS(__enumName) \
+ enum class __enumName
+
+#define ENUM_CLASS_END(__enumName)
+
+#else // !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+// How to define a type safe enum list using the EnumClass template?
+// ================================================================
+// Definition should be a struct that encapsulates an enum list.
+// The enum list should be names Enums.
+//
+// Here's an example of how to define a type safe enum named MyEnum using
+// the EnumClass template:
+//
+// struct MyEnumDefinition {
+// enum Enums {
+// ValueDefault,
+// Value1,
+// ...
+// ValueN
+// };
+// };
+// typedef EnumClass<MyEnumDefinition, MyEnumDefinition::ValueDefault> MyEnum;
+//
+// With that, you can now use MyEnum enum values as follow:
+//
+// MyEnum value1; // value1 is assigned MyEnum::ValueDefault by default.
+// MyEnum value2 = MyEnum::Value1; // value2 is assigned MyEnum::Value1;
+
+template <typename Definition>
+class EnumClass : public Definition {
+ typedef enum Definition::Enums Value;
+public:
+ ALWAYS_INLINE EnumClass() { }
+ ALWAYS_INLINE EnumClass(Value value) : m_value(value) { }
+
+ ALWAYS_INLINE Value value() const { return m_value; }
+
+ ALWAYS_INLINE bool operator==(const EnumClass other) { return m_value == other.m_value; }
+ ALWAYS_INLINE bool operator!=(const EnumClass other) { return m_value != other.m_value; }
+ ALWAYS_INLINE bool operator<(const EnumClass other) { return m_value < other.m_value; }
+ ALWAYS_INLINE bool operator<=(const EnumClass other) { return m_value <= other.m_value; }
+ ALWAYS_INLINE bool operator>(const EnumClass other) { return m_value > other.m_value; }
+ ALWAYS_INLINE bool operator>=(const EnumClass other) { return m_value >= other.m_value; }
+
+ ALWAYS_INLINE bool operator==(const Value value) { return m_value == value; }
+ ALWAYS_INLINE bool operator!=(const Value value) { return m_value != value; }
+ ALWAYS_INLINE bool operator<(const Value value) { return m_value < value; }
+ ALWAYS_INLINE bool operator<=(const Value value) { return m_value <= value; }
+ ALWAYS_INLINE bool operator>(const Value value) { return m_value > value; }
+ ALWAYS_INLINE bool operator>=(const Value value) { return m_value >= value; }
+
+ ALWAYS_INLINE operator Value() { return m_value; }
+
+private:
+ Value m_value;
+};
+
+#define ENUM_CLASS(__enumName) \
+ struct __enumName ## Definition { \
+ enum Enums
+
+#define ENUM_CLASS_END(__enumName) \
+ ; \
+ }; \
+ typedef EnumClass< __enumName ## Definition > __enumName
+
+#endif // !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+
+} // namespace WTF
+
+#if !COMPILER_SUPPORTS(CXX_STRONG_ENUMS)
+using WTF::EnumClass;
+#endif
+
+#endif // WTF_EnumClass_h
diff --git a/src/3rdparty/masm/wtf/FeatureDefines.h b/src/3rdparty/masm/wtf/FeatureDefines.h
new file mode 100644
index 0000000000..afad174658
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FeatureDefines.h
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
+ * Copyright (C) 2013 Samsung Electronics. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_FeatureDefines_h
+#define WTF_FeatureDefines_h
+
+/* Use this file to list _all_ ENABLE() macros. Define the macros to be one of the following values:
+ * - "0" disables the feature by default. The feature can still be enabled for a specific port or environment.
+ * - "1" enables the feature by default. The feature can still be disabled for a specific port or environment.
+ *
+ * The feature defaults in this file are only taken into account if the (port specific) build system
+ * has not enabled or disabled a particular feature.
+ *
+ * Use this file to define ENABLE() macros only. Do not use this file to define USE() or macros !
+ *
+ * Only define a macro if it was not defined before - always check for !defined first.
+ *
+ * Keep the file sorted by the name of the defines. As an exception you can change the order
+ * to allow interdependencies between the default values.
+ *
+ * Below are a few potential commands to take advantage of this file running from the Source/WTF directory
+ *
+ * Get the list of feature defines: grep -o "ENABLE_\(\w\+\)" wtf/FeatureDefines.h | sort | uniq
+ * Get the list of features enabled by default for a PLATFORM(XXX): gcc -E -dM -I. -DWTF_PLATFORM_XXX "wtf/Platform.h" | grep "ENABLE_\w\+ 1" | cut -d' ' -f2 | sort
+ */
+
+/* FIXME: Move out the PLATFORM specific rules into platform specific files. */
+
+/* --------- Apple IOS (but not MAC) port --------- */
+/* PLATFORM(IOS) is a specialization of PLATFORM(MAC). */
+/* PLATFORM(MAC) is always enabled when PLATFORM(IOS) is enabled. */
+#if PLATFORM(IOS)
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 1
+#endif
+
+#if !defined(ENABLE_CONTEXT_MENUS)
+#define ENABLE_CONTEXT_MENUS 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 1
+#endif
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 1
+#endif
+
+#if !defined(ENABLE_ICONDATABASE)
+#define ENABLE_ICONDATABASE 0
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
+#define ENABLE_NETSCAPE_PLUGIN_API 0
+#endif
+
+#if !defined(ENABLE_ORIENTATION_EVENTS)
+#define ENABLE_ORIENTATION_EVENTS 1
+#endif
+
+#if !defined(ENABLE_REPAINT_THROTTLING)
+#define ENABLE_REPAINT_THROTTLING 1
+#endif
+
+#if !defined(ENABLE_TEXT_CARET)
+#define ENABLE_TEXT_CARET 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 0
+#endif
+
+#if !defined(ENABLE_WEBGL)
+#define ENABLE_WEBGL 1
+#endif
+
+#endif /* PLATFORM(IOS) */
+
+/* --------- Apple MAC port (not IOS) --------- */
+#if PLATFORM(MAC) && !PLATFORM(IOS)
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 1
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 1
+#endif
+
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 1
+#endif
+
+#if !defined(ENABLE_DELETION_UI)
+#define ENABLE_DELETION_UI 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#if !defined(ENABLE_ENCRYPTED_MEDIA)
+#define ENABLE_ENCRYPTED_MEDIA 1
+#endif
+#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
+#define ENABLE_ENCRYPTED_MEDIA_V2 1
+#endif
+#endif
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#if !defined(ENABLE_GESTURE_EVENTS)
+#define ENABLE_GESTURE_EVENTS 1
+#endif
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#if !defined(ENABLE_RUBBER_BANDING)
+#define ENABLE_RUBBER_BANDING 1
+#endif
+#endif
+
+#if !defined(ENABLE_SMOOTH_SCROLLING)
+#define ENABLE_SMOOTH_SCROLLING 1
+#endif
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#if !defined(ENABLE_THREADED_SCROLLING)
+#define ENABLE_THREADED_SCROLLING 1
+#endif
+#endif
+
+#if ENABLE(VIDEO)
+#if !defined(ENABLE_VIDEO_TRACK)
+#define ENABLE_VIDEO_TRACK 1
+#endif
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#if !defined(ENABLE_WEB_AUDIO)
+#define ENABLE_WEB_AUDIO 1
+#endif
+
+#if !defined(ENABLE_CURSOR_VISIBILITY)
+#define ENABLE_CURSOR_VISIBILITY 1
+#endif
+
+#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
+
+/* --------- Apple Windows port --------- */
+#if PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 1
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#endif /* PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO) */
+
+/* --------- WinCE port --------- */
+/* WinCE port is a specialization of PLATFORM(WIN). */
+/* PLATFORM(WIN) is always enabled when building for the WinCE port. */
+#if PLATFORM(WIN) && OS(WINCE)
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_FTPDIR)
+#define ENABLE_FTPDIR 0
+#endif
+
+#if !defined(ENABLE_INSPECTOR)
+#define ENABLE_INSPECTOR 0
+#endif
+
+#endif /* PLATFORM(WIN) && OS(WINCE) */
+
+/* --------- Windows CAIRO port --------- */
+/* PLATFORM(WIN_CAIRO) is a specialization of PLATFORM(WIN). */
+/* PLATFORM(WIN) is always enabled when PLATFORM(WIN_CAIRO) is enabled. */
+#if PLATFORM(WIN_CAIRO)
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+
+#endif /* PLATFORM(WIN_CAIRO) */
+
+/* --------- WX port (Mac OS and Windows) --------- */
+#if PLATFORM(WX)
+
+#if OS(DARWIN)
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 1
+#endif
+#endif
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(WX) */
+
+/* --------- EFL port (Unix) --------- */
+#if PLATFORM(EFL)
+
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+
+#if !defined(ENABLE_SUBPIXEL_LAYOUT)
+#define ENABLE_SUBPIXEL_LAYOUT 1
+#endif
+
+#endif /* PLATFORM(EFL) */
+
+/* --------- Gtk port (Unix, Windows, Mac) --------- */
+#if PLATFORM(GTK)
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(GTK) */
+
+/* --------- Qt port (Unix, Windows, Mac, WinCE) --------- */
+#if PLATFORM(QT)
+
+#if OS(UNIX)
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 1
+#endif
+#endif
+
+#endif /* PLATFORM(QT) */
+
+/* --------- Blackberry port (QNX) --------- */
+#if PLATFORM(BLACKBERRY)
+
+#if !defined(ENABLE_BLACKBERRY_CREDENTIAL_PERSIST)
+#define ENABLE_BLACKBERRY_CREDENTIAL_PERSIST 1
+#endif
+
+#endif /* PLATFORM(BLACKBERRY) */
+
+/* ENABLE macro defaults for WebCore */
+/* Do not use PLATFORM() tests in this section ! */
+
+#if !defined(ENABLE_3D_RENDERING)
+#define ENABLE_3D_RENDERING 0
+#endif
+
+#if !defined(ENABLE_8BIT_TEXTRUN)
+#define ENABLE_8BIT_TEXTRUN 0
+#endif
+
+#if !defined(ENABLE_ACCELERATED_2D_CANVAS)
+#define ENABLE_ACCELERATED_2D_CANVAS 0
+#endif
+
+#if !defined(ENABLE_ACCELERATED_OVERFLOW_SCROLLING)
+#define ENABLE_ACCELERATED_OVERFLOW_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_BATTERY_STATUS)
+#define ENABLE_BATTERY_STATUS 0
+#endif
+
+#if !defined(ENABLE_BLOB)
+#define ENABLE_BLOB 0
+#endif
+
+#if !defined(ENABLE_CALENDAR_PICKER)
+#define ENABLE_CALENDAR_PICKER 0
+#endif
+
+#if !defined(ENABLE_CANVAS_PATH)
+#define ENABLE_CANVAS_PATH 1
+#endif
+
+#if !defined(ENABLE_CANVAS_PROXY)
+#define ENABLE_CANVAS_PROXY 0
+#endif
+
+#if !defined(ENABLE_CHANNEL_MESSAGING)
+#define ENABLE_CHANNEL_MESSAGING 1
+#endif
+
+#if !defined(ENABLE_CONTEXT_MENUS)
+#define ENABLE_CONTEXT_MENUS 1
+#endif
+
+#if !defined(ENABLE_CSP_NEXT)
+#define ENABLE_CSP_NEXT 0
+#endif
+
+#if !defined(ENABLE_CSS3_CONDITIONAL_RULES)
+#define ENABLE_CSS3_CONDITIONAL_RULES 0
+#endif
+
+#if !defined(ENABLE_CSS3_TEXT)
+#define ENABLE_CSS3_TEXT 0
+#endif
+
+#if !defined(ENABLE_CSS_BOX_DECORATION_BREAK)
+#define ENABLE_CSS_BOX_DECORATION_BREAK 1
+#endif
+
+#if !defined(ENABLE_CSS_DEVICE_ADAPTATION)
+#define ENABLE_CSS_DEVICE_ADAPTATION 0
+#endif
+
+#if !defined(ENABLE_CSS_COMPOSITING)
+#define ENABLE_CSS_COMPOSITING 0
+#endif
+
+#if !defined(ENABLE_CSS_FILTERS)
+#define ENABLE_CSS_FILTERS 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_ORIENTATION)
+#define ENABLE_CSS_IMAGE_ORIENTATION 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_RESOLUTION)
+#define ENABLE_CSS_IMAGE_RESOLUTION 0
+#endif
+
+#if !defined(ENABLE_CSS_IMAGE_SET)
+#define ENABLE_CSS_IMAGE_SET 0
+#endif
+
+#if !defined(ENABLE_CSS_SHADERS)
+#define ENABLE_CSS_SHADERS 0
+#endif
+
+#if !defined(ENABLE_CSS_STICKY_POSITION)
+#define ENABLE_CSS_STICKY_POSITION 0
+#endif
+
+#if !defined(ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED)
+#define ENABLE_CSS_TRANSFORMS_ANIMATIONS_TRANSITIONS_UNPREFIXED 0
+#endif
+
+#if !defined(ENABLE_CSS_VARIABLES)
+#define ENABLE_CSS_VARIABLES 0
+#endif
+
+#if !defined(ENABLE_CUSTOM_SCHEME_HANDLER)
+#define ENABLE_CUSTOM_SCHEME_HANDLER 0
+#endif
+
+#if !defined(ENABLE_DASHBOARD_SUPPORT)
+#define ENABLE_DASHBOARD_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_DATALIST_ELEMENT)
+#define ENABLE_DATALIST_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_DATA_TRANSFER_ITEMS)
+#define ENABLE_DATA_TRANSFER_ITEMS 0
+#endif
+
+#if !defined(ENABLE_DELETION_UI)
+#define ENABLE_DELETION_UI 0
+#endif
+
+#if !defined(ENABLE_DETAILS_ELEMENT)
+#define ENABLE_DETAILS_ELEMENT 1
+#endif
+
+#if !defined(ENABLE_DEVICE_ORIENTATION)
+#define ENABLE_DEVICE_ORIENTATION 0
+#endif
+
+#if !defined(ENABLE_DIALOG_ELEMENT)
+#define ENABLE_DIALOG_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_DIRECTORY_UPLOAD)
+#define ENABLE_DIRECTORY_UPLOAD 0
+#endif
+
+#if !defined(ENABLE_DOWNLOAD_ATTRIBUTE)
+#define ENABLE_DOWNLOAD_ATTRIBUTE 0
+#endif
+
+#if !defined(ENABLE_DRAGGABLE_REGION)
+#define ENABLE_DRAGGABLE_REGION 0
+#endif
+
+#if !defined(ENABLE_DRAG_SUPPORT)
+#define ENABLE_DRAG_SUPPORT 1
+#endif
+
+#if !defined(ENABLE_ENCRYPTED_MEDIA)
+#define ENABLE_ENCRYPTED_MEDIA 0
+#endif
+
+#if !defined(ENABLE_ENCRYPTED_MEDIA_V2)
+#define ENABLE_ENCRYPTED_MEDIA_V2 0
+#endif
+
+#if !defined(ENABLE_FAST_MOBILE_SCROLLING)
+#define ENABLE_FAST_MOBILE_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_FILE_SYSTEM)
+#define ENABLE_FILE_SYSTEM 0
+#endif
+
+#if !defined(ENABLE_FILTERS)
+#define ENABLE_FILTERS 0
+#endif
+
+#if !defined(ENABLE_FONT_LOAD_EVENTS)
+#define ENABLE_FONT_LOAD_EVENTS 0
+#endif
+
+#if !defined(ENABLE_FTPDIR)
+#define ENABLE_FTPDIR 1
+#endif
+
+#if !defined(ENABLE_FULLSCREEN_API)
+#define ENABLE_FULLSCREEN_API 0
+#endif
+
+#if !defined(ENABLE_GAMEPAD)
+#define ENABLE_GAMEPAD 0
+#endif
+
+#if !defined(ENABLE_GEOLOCATION)
+#define ENABLE_GEOLOCATION 0
+#endif
+
+#if !defined(ENABLE_GESTURE_EVENTS)
+#define ENABLE_GESTURE_EVENTS 0
+#endif
+
+#if !defined(ENABLE_GLIB_SUPPORT)
+#define ENABLE_GLIB_SUPPORT 0
+#endif
+
+#if !defined(ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING)
+#define ENABLE_HIDDEN_PAGE_DOM_TIMER_THROTTLING 0
+#endif
+
+#if !defined(ENABLE_HIGH_DPI_CANVAS)
+#define ENABLE_HIGH_DPI_CANVAS 0
+#endif
+
+#if !defined(ENABLE_ICONDATABASE)
+#define ENABLE_ICONDATABASE 1
+#endif
+
+#if !defined(ENABLE_IFRAME_SEAMLESS)
+#define ENABLE_IFRAME_SEAMLESS 1
+#endif
+
+#if !defined(ENABLE_IMAGE_DECODER_DOWN_SAMPLING)
+#define ENABLE_IMAGE_DECODER_DOWN_SAMPLING 0
+#endif
+
+#if !defined(ENABLE_INDEXED_DATABASE)
+#define ENABLE_INDEXED_DATABASE 0
+#endif
+
+#if !defined(ENABLE_INPUT_MULTIPLE_FIELDS_UI)
+#define ENABLE_INPUT_MULTIPLE_FIELDS_UI 0
+#endif
+
+#if !defined(ENABLE_INPUT_SPEECH)
+#define ENABLE_INPUT_SPEECH 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_COLOR)
+#define ENABLE_INPUT_TYPE_COLOR 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATE)
+#define ENABLE_INPUT_TYPE_DATE 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATETIME_INCOMPLETE)
+#define ENABLE_INPUT_TYPE_DATETIME_INCOMPLETE 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_DATETIMELOCAL)
+#define ENABLE_INPUT_TYPE_DATETIMELOCAL 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_MONTH)
+#define ENABLE_INPUT_TYPE_MONTH 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_TIME)
+#define ENABLE_INPUT_TYPE_TIME 0
+#endif
+
+#if !defined(ENABLE_INPUT_TYPE_WEEK)
+#define ENABLE_INPUT_TYPE_WEEK 0
+#endif
+
+#if ENABLE(INPUT_TYPE_DATE) || ENABLE(INPUT_TYPE_DATETIME_INCOMPLETE) || ENABLE(INPUT_TYPE_DATETIMELOCAL) || ENABLE(INPUT_TYPE_MONTH) || ENABLE(INPUT_TYPE_TIME) || ENABLE(INPUT_TYPE_WEEK)
+#if !defined(ENABLE_DATE_AND_TIME_INPUT_TYPES)
+#define ENABLE_DATE_AND_TIME_INPUT_TYPES 1
+#endif
+#endif
+
+#if !defined(ENABLE_INSPECTOR)
+#define ENABLE_INSPECTOR 1
+#endif
+
+#if !defined(ENABLE_JAVASCRIPT_DEBUGGER)
+#define ENABLE_JAVASCRIPT_DEBUGGER 1
+#endif
+
+#if !defined(ENABLE_JAVASCRIPT_I18N_API)
+#define ENABLE_JAVASCRIPT_I18N_API 0
+#endif
+
+#if !defined(ENABLE_LEGACY_CSS_VENDOR_PREFIXES)
+#define ENABLE_LEGACY_CSS_VENDOR_PREFIXES 0
+#endif
+
+#if !defined(ENABLE_LEGACY_NOTIFICATIONS)
+#define ENABLE_LEGACY_NOTIFICATIONS 0
+#endif
+
+#if !defined(ENABLE_LEGACY_VENDOR_PREFIXES)
+#define ENABLE_LEGACY_VENDOR_PREFIXES 0
+#endif
+
+#if !defined(ENABLE_LEGACY_VIEWPORT_ADAPTION)
+#define ENABLE_LEGACY_VIEWPORT_ADAPTION 0
+#endif
+
+#if !defined(ENABLE_LINK_PREFETCH)
+#define ENABLE_LINK_PREFETCH 0
+#endif
+
+#if !defined(ENABLE_LINK_PRERENDER)
+#define ENABLE_LINK_PRERENDER 0
+#endif
+
+#if !defined(ENABLE_MATHML)
+#define ENABLE_MATHML 1
+#endif
+
+#if !defined(ENABLE_MEDIA_CAPTURE)
+#define ENABLE_MEDIA_CAPTURE 0
+#endif
+
+#if !defined(ENABLE_MEDIA_SOURCE)
+#define ENABLE_MEDIA_SOURCE 0
+#endif
+
+#if !defined(ENABLE_MEDIA_STATISTICS)
+#define ENABLE_MEDIA_STATISTICS 0
+#endif
+
+#if !defined(ENABLE_MEDIA_STREAM)
+#define ENABLE_MEDIA_STREAM 0
+#endif
+
+#if !defined(ENABLE_METER_ELEMENT)
+#define ENABLE_METER_ELEMENT 1
+#endif
+
+#if !defined(ENABLE_MHTML)
+#define ENABLE_MHTML 0
+#endif
+
+#if !defined(ENABLE_MICRODATA)
+#define ENABLE_MICRODATA 0
+#endif
+
+#if !defined(ENABLE_MOUSE_CURSOR_SCALE)
+#define ENABLE_MOUSE_CURSOR_SCALE 0
+#endif
+
+#if !defined(ENABLE_NAVIGATOR_CONTENT_UTILS)
+#define ENABLE_NAVIGATOR_CONTENT_UTILS 0
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_API)
+#define ENABLE_NETSCAPE_PLUGIN_API 1
+#endif
+
+#if !defined(ENABLE_NETSCAPE_PLUGIN_METADATA_CACHE)
+#define ENABLE_NETSCAPE_PLUGIN_METADATA_CACHE 0
+#endif
+
+#if !defined(ENABLE_NETWORK_INFO)
+#define ENABLE_NETWORK_INFO 0
+#endif
+
+#if !defined(ENABLE_NOTIFICATIONS)
+#define ENABLE_NOTIFICATIONS 0
+#endif
+
+#if !defined(ENABLE_OBJECT_MARK_LOGGING)
+#define ENABLE_OBJECT_MARK_LOGGING 0
+#endif
+
+#if !defined(ENABLE_OPENCL)
+#define ENABLE_OPENCL 0
+#endif
+
+#if !defined(ENABLE_OPENTYPE_VERTICAL)
+#define ENABLE_OPENTYPE_VERTICAL 0
+#endif
+
+#if !defined(ENABLE_ORIENTATION_EVENTS)
+#define ENABLE_ORIENTATION_EVENTS 0
+#endif
+
+#if !defined(ENABLE_PAGE_POPUP)
+#define ENABLE_PAGE_POPUP 0
+#endif
+
+#if !defined(ENABLE_PAGE_VISIBILITY_API)
+#define ENABLE_PAGE_VISIBILITY_API 0
+#endif
+
+#if OS(WINDOWS)
+#if !defined(ENABLE_PAN_SCROLLING)
+#define ENABLE_PAN_SCROLLING 1
+#endif
+#endif
+
+#if !defined(ENABLE_PARSED_STYLE_SHEET_CACHING)
+#define ENABLE_PARSED_STYLE_SHEET_CACHING 1
+#endif
+
+#if !defined(ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH)
+#define ENABLE_PLUGIN_PACKAGE_SIMPLE_HASH 0
+#endif
+
+#if !defined(ENABLE_PLUGIN_PROXY_FOR_VIDEO)
+#define ENABLE_PLUGIN_PROXY_FOR_VIDEO 0
+#endif
+
+#if !defined(ENABLE_POINTER_LOCK)
+#define ENABLE_POINTER_LOCK 0
+#endif
+
+#if !defined(ENABLE_PROGRESS_ELEMENT)
+#define ENABLE_PROGRESS_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_PROXIMITY_EVENTS)
+#define ENABLE_PROXIMITY_EVENTS 0
+#endif
+
+#if !defined(ENABLE_QUOTA)
+#define ENABLE_QUOTA 0
+#endif
+
+#if !defined(ENABLE_REPAINT_THROTTLING)
+#define ENABLE_REPAINT_THROTTLING 0
+#endif
+
+#if !defined(ENABLE_REQUEST_ANIMATION_FRAME)
+#define ENABLE_REQUEST_ANIMATION_FRAME 0
+#endif
+
+#if !defined(ENABLE_REQUEST_AUTOCOMPLETE)
+#define ENABLE_REQUEST_AUTOCOMPLETE 0
+#endif
+
+#if !defined(ENABLE_RUBBER_BANDING)
+#define ENABLE_RUBBER_BANDING 0
+#endif
+
+#if !defined(ENABLE_SATURATED_LAYOUT_ARITHMETIC)
+#define ENABLE_SATURATED_LAYOUT_ARITHMETIC 0
+#endif
+
+#if !defined(ENABLE_SCRIPTED_SPEECH)
+#define ENABLE_SCRIPTED_SPEECH 0
+#endif
+
+#if !defined(ENABLE_SHADOW_DOM)
+#define ENABLE_SHADOW_DOM 0
+#endif
+
+#if !defined(ENABLE_SHARED_WORKERS)
+#define ENABLE_SHARED_WORKERS 0
+#endif
+
+#if !defined(ENABLE_SMOOTH_SCROLLING)
+#define ENABLE_SMOOTH_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_SPEECH_SYNTHESIS)
+#define ENABLE_SPEECH_SYNTHESIS 0
+#endif
+
+#if !defined(ENABLE_SPELLCHECK)
+#define ENABLE_SPELLCHECK 0
+#endif
+
+#if !defined(ENABLE_SQL_DATABASE)
+#define ENABLE_SQL_DATABASE 1
+#endif
+
+#if !defined(ENABLE_STYLE_SCOPED)
+#define ENABLE_STYLE_SCOPED 0
+#endif
+
+#if !defined(ENABLE_SUBPIXEL_LAYOUT)
+#define ENABLE_SUBPIXEL_LAYOUT 0
+#endif
+
+#if !defined(ENABLE_SVG)
+#define ENABLE_SVG 1
+#endif
+
+#if ENABLE(SVG)
+#if !defined(ENABLE_SVG_FONTS)
+#define ENABLE_SVG_FONTS 1
+#endif
+#endif
+
+#if !defined(ENABLE_TEMPLATE_ELEMENT)
+#define ENABLE_TEMPLATE_ELEMENT 0
+#endif
+
+#if !defined(ENABLE_TEXT_AUTOSIZING)
+#define ENABLE_TEXT_AUTOSIZING 0
+#endif
+
+#if !defined(ENABLE_TEXT_CARET)
+#define ENABLE_TEXT_CARET 1
+#endif
+
+#if !defined(ENABLE_THREADED_HTML_PARSER)
+#define ENABLE_THREADED_HTML_PARSER 0
+#endif
+
+#if !defined(ENABLE_THREADED_SCROLLING)
+#define ENABLE_THREADED_SCROLLING 0
+#endif
+
+#if !defined(ENABLE_TOUCH_EVENTS)
+#define ENABLE_TOUCH_EVENTS 0
+#endif
+
+#if !defined(ENABLE_TOUCH_ICON_LOADING)
+#define ENABLE_TOUCH_ICON_LOADING 0
+#endif
+
+#if !defined(ENABLE_VIBRATION)
+#define ENABLE_VIBRATION 0
+#endif
+
+#if !defined(ENABLE_VIDEO)
+#define ENABLE_VIDEO 0
+#endif
+
+#if !defined(ENABLE_VIDEO_TRACK)
+#define ENABLE_VIDEO_TRACK 0
+#endif
+
+#if !defined(ENABLE_VIEWPORT)
+#define ENABLE_VIEWPORT 0
+#endif
+
+#if !defined(ENABLE_VIEWSOURCE_ATTRIBUTE)
+#define ENABLE_VIEWSOURCE_ATTRIBUTE 1
+#endif
+
+#if !defined(ENABLE_VIEW_MODE_CSS_MEDIA)
+#define ENABLE_VIEW_MODE_CSS_MEDIA 1
+#endif
+
+#if !defined(ENABLE_WEBGL)
+#define ENABLE_WEBGL 0
+#endif
+
+#if !defined(ENABLE_WEB_ARCHIVE)
+#define ENABLE_WEB_ARCHIVE 0
+#endif
+
+#if !defined(ENABLE_WEB_AUDIO)
+#define ENABLE_WEB_AUDIO 0
+#endif
+
+#if !defined(ENABLE_WEB_SOCKETS)
+#define ENABLE_WEB_SOCKETS 1
+#endif
+
+#if !defined(ENABLE_WEB_TIMING)
+#define ENABLE_WEB_TIMING 0
+#endif
+
+#if !defined(ENABLE_WORKERS)
+#define ENABLE_WORKERS 0
+#endif
+
+#if !defined(ENABLE_XHR_TIMEOUT)
+#define ENABLE_XHR_TIMEOUT 0
+#endif
+
+#if !defined(ENABLE_XSLT)
+#define ENABLE_XSLT 1
+#endif
+
+/* Asserts, invariants for macro definitions */
+
+#if ENABLE(SATURATED_LAYOUT_ARITHMETIC) && !ENABLE(SUBPIXEL_LAYOUT)
+#error "ENABLE(SATURATED_LAYOUT_ARITHMETIC) requires ENABLE(SUBPIXEL_LAYOUT)"
+#endif
+
+#if ENABLE(SVG_FONTS) && !ENABLE(SVG)
+#error "ENABLE(SVG_FONTS) requires ENABLE(SVG)"
+#endif
+
+#if ENABLE(VIDEO_TRACK) && !ENABLE(VIDEO)
+#error "ENABLE(VIDEO_TRACK) requires ENABLE(VIDEO)"
+#endif
+
+#endif /* WTF_FeatureDefines_h */
diff --git a/src/3rdparty/masm/wtf/FilePrintStream.cpp b/src/3rdparty/masm/wtf/FilePrintStream.cpp
new file mode 100644
index 0000000000..b5ab25e0bf
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FilePrintStream.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FilePrintStream.h"
+
+namespace WTF {
+
+FilePrintStream::FilePrintStream(FILE* file, AdoptionMode adoptionMode)
+ : m_file(file)
+ , m_adoptionMode(adoptionMode)
+{
+}
+
+FilePrintStream::~FilePrintStream()
+{
+ if (m_adoptionMode == Borrow)
+ return;
+ fclose(m_file);
+}
+
+PassOwnPtr<FilePrintStream> FilePrintStream::open(const char* filename, const char* mode)
+{
+ FILE* file = fopen(filename, mode);
+ if (!file)
+ return PassOwnPtr<FilePrintStream>();
+
+ return adoptPtr(new FilePrintStream(file));
+}
+
+void FilePrintStream::vprintf(const char* format, va_list argList)
+{
+ vfprintf(m_file, format, argList);
+}
+
+void FilePrintStream::flush()
+{
+ fflush(m_file);
+}
+
+} // namespace WTF
+
diff --git a/src/3rdparty/masm/wtf/FilePrintStream.h b/src/3rdparty/masm/wtf/FilePrintStream.h
new file mode 100644
index 0000000000..bdeab4c479
--- /dev/null
+++ b/src/3rdparty/masm/wtf/FilePrintStream.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FilePrintStream_h
+#define FilePrintStream_h
+
+#include <stdio.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+class FilePrintStream : public PrintStream {
+public:
+ enum AdoptionMode {
+ Adopt,
+ Borrow
+ };
+
+ FilePrintStream(FILE*, AdoptionMode = Adopt);
+ virtual ~FilePrintStream();
+
+ static PassOwnPtr<FilePrintStream> open(const char* filename, const char* mode);
+
+ FILE* file() { return m_file; }
+
+ void vprintf(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(2, 0);
+ void flush();
+
+private:
+ FILE* m_file;
+ AdoptionMode m_adoptionMode;
+};
+
+} // namespace WTF
+
+using WTF::FilePrintStream;
+
+#endif // FilePrintStream_h
+
diff --git a/src/3rdparty/masm/wtf/Locker.h b/src/3rdparty/masm/wtf/Locker.h
new file mode 100644
index 0000000000..c465b99ea4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Locker.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef Locker_h
+#define Locker_h
+
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+template <typename T> class Locker {
+ WTF_MAKE_NONCOPYABLE(Locker);
+public:
+ Locker(T& lockable) : m_lockable(lockable) { m_lockable.lock(); }
+ ~Locker() { m_lockable.unlock(); }
+private:
+ T& m_lockable;
+};
+
+}
+
+using WTF::Locker;
+
+#endif
diff --git a/src/3rdparty/masm/wtf/MathExtras.h b/src/3rdparty/masm/wtf/MathExtras.h
new file mode 100644
index 0000000000..b70e468dfa
--- /dev/null
+++ b/src/3rdparty/masm/wtf/MathExtras.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MathExtras_h
+#define WTF_MathExtras_h
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <limits>
+#include <stdint.h>
+#include <stdlib.h>
+#include <wtf/StdLibExtras.h>
+
+#if OS(SOLARIS)
+#include <ieeefp.h>
+#endif
+
+#if OS(OPENBSD)
+#include <sys/types.h>
+#include <machine/ieee.h>
+#endif
+
+#if OS(QNX)
+// FIXME: Look into a way to have cmath import its functions into both the standard and global
+// namespace. For now, we include math.h since the QNX cmath header only imports its functions
+// into the standard namespace.
+#include <math.h>
+// These macros from math.h conflict with the real functions in the std namespace.
+#undef signbit
+#undef isnan
+#undef isinf
+#undef isfinite
+#endif
+
+#ifndef M_PI
+const double piDouble = 3.14159265358979323846;
+const float piFloat = 3.14159265358979323846f;
+#else
+const double piDouble = M_PI;
+const float piFloat = static_cast<float>(M_PI);
+#endif
+
+#ifndef M_PI_2
+const double piOverTwoDouble = 1.57079632679489661923;
+const float piOverTwoFloat = 1.57079632679489661923f;
+#else
+const double piOverTwoDouble = M_PI_2;
+const float piOverTwoFloat = static_cast<float>(M_PI_2);
+#endif
+
+#ifndef M_PI_4
+const double piOverFourDouble = 0.785398163397448309616;
+const float piOverFourFloat = 0.785398163397448309616f;
+#else
+const double piOverFourDouble = M_PI_4;
+const float piOverFourFloat = static_cast<float>(M_PI_4);
+#endif
+
+#if OS(DARWIN)
+
+// Work around a bug in the Mac OS X libc where ceil(-0.1) return +0.
+inline double wtf_ceil(double x) { return copysign(ceil(x), x); }
+
+#define ceil(x) wtf_ceil(x)
+
+#endif
+
+#if OS(SOLARIS)
+
+namespace std {
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x) && !isnand(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { return copysign(1.0, x) < 0; }
+#endif
+#ifndef isinf
+inline bool isinf(double x) { return !finite(x) && !isnand(x); }
+#endif
+
+} // namespace std
+
+#endif
+
+#if OS(OPENBSD)
+
+namespace std {
+
+#ifndef isfinite
+inline bool isfinite(double x) { return finite(x); }
+#endif
+#ifndef signbit
+inline bool signbit(double x) { struct ieee_double *p = (struct ieee_double *)&x; return p->dbl_sign; }
+#endif
+
+} // namespace std
+
+#endif
+
+#if COMPILER(MSVC)
+
+// We must not do 'num + 0.5' or 'num - 0.5' because they can cause precision loss.
+static double round(double num)
+{
+ double integer = ceil(num);
+ if (num > 0)
+ return integer - num > 0.5 ? integer - 1.0 : integer;
+ return integer - num >= 0.5 ? integer - 1.0 : integer;
+}
+static float roundf(float num)
+{
+ float integer = ceilf(num);
+ if (num > 0)
+ return integer - num > 0.5f ? integer - 1.0f : integer;
+ return integer - num >= 0.5f ? integer - 1.0f : integer;
+}
+inline long long llround(double num) { return static_cast<long long>(round(num)); }
+inline long long llroundf(float num) { return static_cast<long long>(roundf(num)); }
+inline long lround(double num) { return static_cast<long>(round(num)); }
+inline long lroundf(float num) { return static_cast<long>(roundf(num)); }
+inline double trunc(double num) { return num > 0 ? floor(num) : ceil(num); }
+
+#endif
+
+#if COMPILER(GCC) && OS(QNX)
+// The stdlib on QNX doesn't contain long abs(long). See PR #104666.
+inline long long abs(long num) { return labs(num); }
+#endif
+
+#if COMPILER(MSVC)
+// MSVC's math.h does not currently supply log2 or log2f.
+inline double log2(double num)
+{
+ // This constant is roughly M_LN2, which is not provided by default on Windows.
+ return log(num) / 0.693147180559945309417232121458176568;
+}
+
+inline float log2f(float num)
+{
+ // This constant is roughly M_LN2, which is not provided by default on Windows.
+ return logf(num) / 0.693147180559945309417232121458176568f;
+}
+#endif
+
+#if COMPILER(MSVC)
+// The 64bit version of abs() is already defined in stdlib.h which comes with VC10
+#if COMPILER(MSVC9_OR_LOWER)
+inline long long abs(long long num) { return _abs64(num); }
+#endif
+
+namespace std {
+
+inline bool isinf(double num) { return !_finite(num) && !_isnan(num); }
+inline bool isnan(double num) { return !!_isnan(num); }
+inline bool isfinite(double x) { return _finite(x); }
+inline bool signbit(double num) { return _copysign(1.0, num) < 0; }
+
+} // namespace std
+
+inline double nextafter(double x, double y) { return _nextafter(x, y); }
+inline float nextafterf(float x, float y) { return x > y ? x - FLT_EPSILON : x + FLT_EPSILON; }
+
+inline double copysign(double x, double y) { return _copysign(x, y); }
+
+// Work around a bug in Win, where atan2(+-infinity, +-infinity) yields NaN instead of specific values.
+inline double wtf_atan2(double x, double y)
+{
+ double posInf = std::numeric_limits<double>::infinity();
+ double negInf = -std::numeric_limits<double>::infinity();
+ double nan = std::numeric_limits<double>::quiet_NaN();
+
+ double result = nan;
+
+ if (x == posInf && y == posInf)
+ result = piOverFourDouble;
+ else if (x == posInf && y == negInf)
+ result = 3 * piOverFourDouble;
+ else if (x == negInf && y == posInf)
+ result = -piOverFourDouble;
+ else if (x == negInf && y == negInf)
+ result = -3 * piOverFourDouble;
+ else
+ result = ::atan2(x, y);
+
+ return result;
+}
+
+// Work around a bug in the Microsoft CRT, where fmod(x, +-infinity) yields NaN instead of x.
+inline double wtf_fmod(double x, double y) { return (!std::isinf(x) && std::isinf(y)) ? x : fmod(x, y); }
+
+// Work around a bug in the Microsoft CRT, where pow(NaN, 0) yields NaN instead of 1.
+inline double wtf_pow(double x, double y) { return y == 0 ? 1 : pow(x, y); }
+
+#define atan2(x, y) wtf_atan2(x, y)
+#define fmod(x, y) wtf_fmod(x, y)
+#define pow(x, y) wtf_pow(x, y)
+
+// MSVC's math functions do not bring lrint.
+inline long int lrint(double flt)
+{
+ int64_t intgr;
+#if CPU(X86)
+ __asm {
+ fld flt
+ fistp intgr
+ };
+#else
+ ASSERT(std::isfinite(flt));
+ double rounded = round(flt);
+ intgr = static_cast<int64_t>(rounded);
+ // If the fractional part is exactly 0.5, we need to check whether
+ // the rounded result is even. If it is not we need to add 1 to
+ // negative values and subtract one from positive values.
+ if ((fabs(intgr - flt) == 0.5) & intgr)
+ intgr -= ((intgr >> 62) | 1); // 1 with the sign of result, i.e. -1 or 1.
+#endif
+ return static_cast<long int>(intgr);
+}
+
+#endif // COMPILER(MSVC)
+
+inline double deg2rad(double d) { return d * piDouble / 180.0; }
+inline double rad2deg(double r) { return r * 180.0 / piDouble; }
+inline double deg2grad(double d) { return d * 400.0 / 360.0; }
+inline double grad2deg(double g) { return g * 360.0 / 400.0; }
+inline double turn2deg(double t) { return t * 360.0; }
+inline double deg2turn(double d) { return d / 360.0; }
+inline double rad2grad(double r) { return r * 200.0 / piDouble; }
+inline double grad2rad(double g) { return g * piDouble / 200.0; }
+
+inline float deg2rad(float d) { return d * piFloat / 180.0f; }
+inline float rad2deg(float r) { return r * 180.0f / piFloat; }
+inline float deg2grad(float d) { return d * 400.0f / 360.0f; }
+inline float grad2deg(float g) { return g * 360.0f / 400.0f; }
+inline float turn2deg(float t) { return t * 360.0f; }
+inline float deg2turn(float d) { return d / 360.0f; }
+inline float rad2grad(float r) { return r * 200.0f / piFloat; }
+inline float grad2rad(float g) { return g * piFloat / 200.0f; }
+
+// std::numeric_limits<T>::min() returns the smallest positive value for floating point types
+template<typename T> inline T defaultMinimumForClamp() { return std::numeric_limits<T>::min(); }
+template<> inline float defaultMinimumForClamp() { return -std::numeric_limits<float>::max(); }
+template<> inline double defaultMinimumForClamp() { return -std::numeric_limits<double>::max(); }
+template<typename T> inline T defaultMaximumForClamp() { return std::numeric_limits<T>::max(); }
+
+template<typename T> inline T clampTo(double value, T min = defaultMinimumForClamp<T>(), T max = defaultMaximumForClamp<T>())
+{
+ if (value >= static_cast<double>(max))
+ return max;
+ if (value <= static_cast<double>(min))
+ return min;
+ return static_cast<T>(value);
+}
+template<> inline long long int clampTo(double, long long int, long long int); // clampTo does not support long long ints.
+
+inline int clampToInteger(double value)
+{
+ return clampTo<int>(value);
+}
+
+inline float clampToFloat(double value)
+{
+ return clampTo<float>(value);
+}
+
+inline int clampToPositiveInteger(double value)
+{
+ return clampTo<int>(value, 0);
+}
+
+inline int clampToInteger(float value)
+{
+ return clampTo<int>(value);
+}
+
+inline int clampToInteger(unsigned x)
+{
+ const unsigned intMax = static_cast<unsigned>(std::numeric_limits<int>::max());
+
+ if (x >= intMax)
+ return std::numeric_limits<int>::max();
+ return static_cast<int>(x);
+}
+
+inline bool isWithinIntRange(float x)
+{
+ return x > static_cast<float>(std::numeric_limits<int>::min()) && x < static_cast<float>(std::numeric_limits<int>::max());
+}
+
+template<typename T> inline bool hasOneBitSet(T value)
+{
+ return !((value - 1) & value) && value;
+}
+
+template<typename T> inline bool hasZeroOrOneBitsSet(T value)
+{
+ return !((value - 1) & value);
+}
+
+template<typename T> inline bool hasTwoOrMoreBitsSet(T value)
+{
+ return !hasZeroOrOneBitsSet(value);
+}
+
+template <typename T> inline unsigned getLSBSet(T value)
+{
+ unsigned result = 0;
+
+ while (value >>= 1)
+ ++result;
+
+ return result;
+}
+
+template<typename T> inline T timesThreePlusOneDividedByTwo(T value)
+{
+ // Mathematically equivalent to:
+ // (value * 3 + 1) / 2;
+ // or:
+ // (unsigned)ceil(value * 1.5));
+ // This form is not prone to internal overflow.
+ return value + (value >> 1) + (value & 1);
+}
+
+#ifndef UINT64_C
+#if COMPILER(MSVC)
+#define UINT64_C(c) c ## ui64
+#else
+#define UINT64_C(c) c ## ull
+#endif
+#endif
+
+#if COMPILER(MINGW64) && (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
+inline double wtf_pow(double x, double y)
+{
+ // MinGW-w64 has a custom implementation for pow.
+ // This handles certain special cases that are different.
+ if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
+ double f;
+ if (modf(y, &f) != 0.0)
+ return ((x == 0.0) ^ (y > 0.0)) ? std::numeric_limits<double>::infinity() : 0.0;
+ }
+
+ if (x == 2.0) {
+ int yInt = static_cast<int>(y);
+ if (y == yInt)
+ return ldexp(1.0, yInt);
+ }
+
+ return pow(x, y);
+}
+#define pow(x, y) wtf_pow(x, y)
+#endif // COMPILER(MINGW64) && (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
+
+
+// decompose 'number' to its sign, exponent, and mantissa components.
+// The result is interpreted as:
+// (sign ? -1 : 1) * pow(2, exponent) * (mantissa / (1 << 52))
+inline void decomposeDouble(double number, bool& sign, int32_t& exponent, uint64_t& mantissa)
+{
+ ASSERT(std::isfinite(number));
+
+ sign = std::signbit(number);
+
+ uint64_t bits = WTF::bitwise_cast<uint64_t>(number);
+ exponent = (static_cast<int32_t>(bits >> 52) & 0x7ff) - 0x3ff;
+ mantissa = bits & 0xFFFFFFFFFFFFFull;
+
+ // Check for zero/denormal values; if so, adjust the exponent,
+ // if not insert the implicit, omitted leading 1 bit.
+ if (exponent == -0x3ff)
+ exponent = mantissa ? -0x3fe : 0;
+ else
+ mantissa |= 0x10000000000000ull;
+}
+
+// Calculate d % 2^{64}.
+inline void doubleToInteger(double d, unsigned long long& value)
+{
+ if (std::isnan(d) || std::isinf(d))
+ value = 0;
+ else {
+ // -2^{64} < fmodValue < 2^{64}.
+ double fmodValue = fmod(trunc(d), std::numeric_limits<unsigned long long>::max() + 1.0);
+ if (fmodValue >= 0) {
+ // 0 <= fmodValue < 2^{64}.
+ // 0 <= value < 2^{64}. This cast causes no loss.
+ value = static_cast<unsigned long long>(fmodValue);
+ } else {
+ // -2^{64} < fmodValue < 0.
+ // 0 < fmodValueInUnsignedLongLong < 2^{64}. This cast causes no loss.
+ unsigned long long fmodValueInUnsignedLongLong = static_cast<unsigned long long>(-fmodValue);
+ // -1 < (std::numeric_limits<unsigned long long>::max() - fmodValueInUnsignedLongLong) < 2^{64} - 1.
+ // 0 < value < 2^{64}.
+ value = std::numeric_limits<unsigned long long>::max() - fmodValueInUnsignedLongLong + 1;
+ }
+ }
+}
+
+namespace WTF {
+
+// From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+inline uint32_t roundUpToPowerOfTwo(uint32_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+
+inline unsigned fastLog2(unsigned i)
+{
+ unsigned log2 = 0;
+ if (i & (i - 1))
+ log2 += 1;
+ if (i >> 16)
+ log2 += 16, i >>= 16;
+ if (i >> 8)
+ log2 += 8, i >>= 8;
+ if (i >> 4)
+ log2 += 4, i >>= 4;
+ if (i >> 2)
+ log2 += 2, i >>= 2;
+ if (i >> 1)
+ log2 += 1;
+ return log2;
+}
+
+} // namespace WTF
+
+#endif // #ifndef WTF_MathExtras_h
diff --git a/src/3rdparty/masm/wtf/NotFound.h b/src/3rdparty/masm/wtf/NotFound.h
new file mode 100644
index 0000000000..4263bcecab
--- /dev/null
+++ b/src/3rdparty/masm/wtf/NotFound.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef NotFound_h
+#define NotFound_h
+
+namespace WTF {
+
+ const size_t notFound = static_cast<size_t>(-1);
+
+} // namespace WTF
+
+using WTF::notFound;
+
+#endif // NotFound_h
diff --git a/src/3rdparty/masm/wtf/NullPtr.h b/src/3rdparty/masm/wtf/NullPtr.h
new file mode 100644
index 0000000000..98c05140d8
--- /dev/null
+++ b/src/3rdparty/masm/wtf/NullPtr.h
@@ -0,0 +1,56 @@
+/*
+
+Copyright (C) 2010 Apple Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef NullPtr_h
+#define NullPtr_h
+
+// For compilers and standard libraries that do not yet include it, this adds the
+// nullptr_t type and nullptr object. They are defined in the same namespaces they
+// would be in compiler and library that had the support.
+
+#include <ciso646>
+
+#if COMPILER_SUPPORTS(CXX_NULLPTR) || defined(_LIBCPP_VERSION)
+
+#include <cstddef>
+
+// libstdc++ supports nullptr_t starting with gcc 4.6.
+#if defined(__GLIBCXX__) && __GLIBCXX__ < 20110325
+namespace std {
+typedef decltype(nullptr) nullptr_t;
+}
+#endif
+
+#else
+
+namespace std {
+class WTF_EXPORT_PRIVATE nullptr_t { };
+}
+extern WTF_EXPORT_PRIVATE std::nullptr_t nullptr;
+
+#endif
+
+#endif
diff --git a/src/3rdparty/masm/wtf/OSAllocator.h b/src/3rdparty/masm/wtf/OSAllocator.h
new file mode 100644
index 0000000000..a12a467497
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocator.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef OSAllocator_h
+#define OSAllocator_h
+
+#include <algorithm>
+#include <wtf/UnusedParam.h>
+#include <wtf/VMTags.h>
+
+namespace WTF {
+
+class OSAllocator {
+public:
+ enum Usage {
+ UnknownUsage = -1,
+ FastMallocPages = VM_TAG_FOR_TCMALLOC_MEMORY,
+ JSGCHeapPages = VM_TAG_FOR_COLLECTOR_MEMORY,
+ JSVMStackPages = VM_TAG_FOR_REGISTERFILE_MEMORY,
+ JSJITCodePages = VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY,
+ };
+
+ // These methods are symmetric; reserveUncommitted allocates VM in an uncommitted state,
+ // releaseDecommitted should be called on a region of VM allocated by a single reservation,
+ // the memory must all currently be in a decommitted state.
+ static void* reserveUncommitted(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false);
+ WTF_EXPORT_PRIVATE static void releaseDecommitted(void*, size_t);
+
+ // These methods are symmetric; they commit or decommit a region of VM (uncommitted VM should
+ // never be accessed, since the OS may not have attached physical memory for these regions).
+ // Clients should only call commit on uncommitted regions and decommit on committed regions.
+ static void commit(void*, size_t, bool writable, bool executable);
+ static void decommit(void*, size_t);
+
+ // These methods are symmetric; reserveAndCommit allocates VM in an committed state,
+ // decommitAndRelease should be called on a region of VM allocated by a single reservation,
+ // the memory must all currently be in a committed state.
+ WTF_EXPORT_PRIVATE static void* reserveAndCommit(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false);
+ static void decommitAndRelease(void* base, size_t size);
+
+ // These methods are akin to reserveAndCommit/decommitAndRelease, above - however rather than
+ // committing/decommitting the entire region additional parameters allow a subregion to be
+ // specified.
+ static void* reserveAndCommit(size_t reserveSize, size_t commitSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
+ static void decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize);
+
+ // Reallocate an existing, committed allocation.
+ // The prior allocation must be fully comitted, and the new size will also be fully committed.
+ // This interface is provided since it may be possible to optimize this operation on some platforms.
+ template<typename T>
+ static T* reallocateCommitted(T*, size_t oldSize, size_t newSize, Usage = UnknownUsage, bool writable = true, bool executable = false);
+};
+
+inline void* OSAllocator::reserveAndCommit(size_t reserveSize, size_t commitSize, Usage usage, bool writable, bool executable)
+{
+ void* base = reserveUncommitted(reserveSize, usage, writable, executable);
+ commit(base, commitSize, writable, executable);
+ return base;
+}
+
+inline void OSAllocator::decommitAndRelease(void* releaseBase, size_t releaseSize, void* decommitBase, size_t decommitSize)
+{
+ ASSERT(decommitBase >= releaseBase && (static_cast<char*>(decommitBase) + decommitSize) <= (static_cast<char*>(releaseBase) + releaseSize));
+#if OS(WINCE)
+ // On most platforms we can actually skip this final decommit; releasing the VM will
+ // implicitly decommit any physical memory in the region. This is not true on WINCE.
+ decommit(decommitBase, decommitSize);
+#else
+ UNUSED_PARAM(decommitBase);
+ UNUSED_PARAM(decommitSize);
+#endif
+ releaseDecommitted(releaseBase, releaseSize);
+}
+
+inline void OSAllocator::decommitAndRelease(void* base, size_t size)
+{
+ decommitAndRelease(base, size, base, size);
+}
+
+template<typename T>
+inline T* OSAllocator::reallocateCommitted(T* oldBase, size_t oldSize, size_t newSize, Usage usage, bool writable, bool executable)
+{
+ void* newBase = reserveAndCommit(newSize, usage, writable, executable);
+ memcpy(newBase, oldBase, std::min(oldSize, newSize));
+ decommitAndRelease(oldBase, oldSize);
+ return static_cast<T*>(newBase);
+}
+
+} // namespace WTF
+
+using WTF::OSAllocator;
+
+#endif // OSAllocator_h
diff --git a/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
new file mode 100644
index 0000000000..7b2a55c6b6
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorPosix.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#if OS(UNIX)
+
+#include "PageAllocation.h"
+#include <errno.h>
+#include <sys/mman.h>
+#include <wtf/Assertions.h>
+#include <wtf/UnusedParam.h>
+
+namespace WTF {
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+#if OS(QNX)
+ // Reserve memory with PROT_NONE and MAP_LAZY so it isn't committed now.
+ void* result = mmap(0, bytes, PROT_NONE, MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+#elif OS(LINUX)
+ UNUSED_PARAM(usage);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ UNUSED_PARAM(includesGuardPages);
+
+ void* result = mmap(0, bytes, PROT_NONE, MAP_NORESERVE | MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (result == MAP_FAILED)
+ CRASH();
+ madvise(result, bytes, MADV_DONTNEED);
+#else
+ void* result = reserveAndCommit(bytes, usage, writable, executable, includesGuardPages);
+#if HAVE(MADV_FREE_REUSE)
+ // To support the "reserve then commit" model, we have to initially decommit.
+ while (madvise(result, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#endif
+
+#endif // OS(QNX)
+
+ return result;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+ // All POSIX reservations start out logically committed.
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+
+ int flags = MAP_PRIVATE | MAP_ANON;
+#if PLATFORM(IOS)
+ if (executable)
+ flags |= MAP_JIT;
+#endif
+
+#if OS(DARWIN)
+ int fd = usage;
+#else
+ UNUSED_PARAM(usage);
+ int fd = -1;
+#endif
+
+ void* result = 0;
+#if (OS(DARWIN) && CPU(X86_64))
+ if (executable) {
+ ASSERT(includesGuardPages);
+ // Cook up an address to allocate at, using the following recipe:
+ // 17 bits of zero, stay in userspace kids.
+ // 26 bits of randomness for ASLR.
+ // 21 bits of zero, at least stay aligned within one level of the pagetables.
+ //
+ // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
+ // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
+ // 2^24, which should put up somewhere in the middle of userspace (in the address range
+ // 0x200000000000 .. 0x5fffffffffff).
+ intptr_t randomLocation = 0;
+ randomLocation = arc4random() & ((1 << 25) - 1);
+ randomLocation += (1 << 24);
+ randomLocation <<= 21;
+ result = reinterpret_cast<void*>(randomLocation);
+ }
+#endif
+
+ result = mmap(result, bytes, protection, flags, fd, 0);
+ if (result == MAP_FAILED) {
+#if ENABLE(LLINT)
+ if (executable)
+ result = 0;
+ else
+#endif
+ CRASH();
+ }
+ if (result && includesGuardPages) {
+ // We use mmap to remap the guardpages rather than using mprotect as
+ // mprotect results in multiple references to the code region. This
+ // breaks the madvise based mechanism we use to return physical memory
+ // to the OS.
+ mmap(result, pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ mmap(static_cast<char*>(result) + bytes - pageSize(), pageSize(), PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, fd, 0);
+ }
+ return result;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+#if OS(QNX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (MAP_FAILED == mmap(address, bytes, protection, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0))
+ CRASH();
+#elif OS(LINUX)
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+ if (mprotect(address, bytes, protection))
+ CRASH();
+ madvise(address, bytes, MADV_WILLNEED);
+#elif HAVE(MADV_FREE_REUSE)
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+ while (madvise(address, bytes, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+#else
+ // Non-MADV_FREE_REUSE reservations automatically commit on demand.
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+ UNUSED_PARAM(writable);
+ UNUSED_PARAM(executable);
+#endif
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+#if OS(QNX)
+ // Use PROT_NONE and MAP_LAZY to decommit the pages.
+ mmap(address, bytes, PROT_NONE, MAP_FIXED | MAP_LAZY | MAP_PRIVATE | MAP_ANON, -1, 0);
+#elif OS(LINUX)
+ madvise(address, bytes, MADV_DONTNEED);
+ if (mprotect(address, bytes, PROT_NONE))
+ CRASH();
+#elif HAVE(MADV_FREE_REUSE)
+ while (madvise(address, bytes, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_FREE)
+ while (madvise(address, bytes, MADV_FREE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_DONTNEED)
+ while (madvise(address, bytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(address);
+ UNUSED_PARAM(bytes);
+#endif
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ int result = munmap(address, bytes);
+ if (result == -1)
+ CRASH();
+}
+
+} // namespace WTF
+
+#endif // OS(UNIX)
diff --git a/src/3rdparty/masm/wtf/OSAllocatorWin.cpp b/src/3rdparty/masm/wtf/OSAllocatorWin.cpp
new file mode 100644
index 0000000000..78300dc715
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorWin.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#if OS(WINDOWS)
+
+#include "windows.h"
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+static inline DWORD protection(bool writable, bool executable)
+{
+ return executable ?
+ (writable ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ) :
+ (writable ? PAGE_READWRITE : PAGE_READONLY);
+}
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage, bool writable, bool executable, bool)
+{
+ void* result = VirtualAlloc(0, bytes, MEM_RESERVE, protection(writable, executable));
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage, bool writable, bool executable, bool)
+{
+ void* result = VirtualAlloc(0, bytes, MEM_RESERVE | MEM_COMMIT, protection(writable, executable));
+ if (!result)
+ CRASH();
+ return result;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+ void* result = VirtualAlloc(address, bytes, MEM_COMMIT, protection(writable, executable));
+ if (!result)
+ CRASH();
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+ bool result = VirtualFree(address, bytes, MEM_DECOMMIT);
+ if (!result)
+ CRASH();
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ // According to http://msdn.microsoft.com/en-us/library/aa366892(VS.85).aspx,
+ // dwSize must be 0 if dwFreeType is MEM_RELEASE.
+ bool result = VirtualFree(address, 0, MEM_RELEASE);
+ if (!result)
+ CRASH();
+}
+
+} // namespace WTF
+
+#endif // OS(WINDOWS)
diff --git a/src/3rdparty/masm/wtf/PageAllocation.h b/src/3rdparty/masm/wtf/PageAllocation.h
new file mode 100644
index 0000000000..18d31880c0
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocation.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageAllocation_h
+#define PageAllocation_h
+
+#include <wtf/Assertions.h>
+#include <wtf/OSAllocator.h>
+#include <wtf/PageBlock.h>
+#include <wtf/UnusedParam.h>
+#include <wtf/VMTags.h>
+#include <algorithm>
+
+#if OS(DARWIN)
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#if OS(WINDOWS)
+#include <malloc.h>
+#include <windows.h>
+#endif
+
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#endif
+
+#if HAVE(MMAP)
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+namespace WTF {
+
+/*
+ PageAllocation
+
+ The PageAllocation class provides a cross-platform memory allocation interface
+ with similar capabilities to posix mmap/munmap. Memory is allocated by calling
+ PageAllocation::allocate, and deallocated by calling deallocate on the
+ PageAllocation object. The PageAllocation holds the allocation's base pointer
+ and size.
+
+ The allocate method is passed the size required (which must be a multiple of
+ the system page size, which can be accessed using PageAllocation::pageSize).
+ Callers may also optinally provide a flag indicating the usage (for use by
+ system memory usage tracking tools, where implemented), and boolean values
+ specifying the required protection (defaulting to writable, non-executable).
+*/
+
+class PageAllocation : private PageBlock {
+public:
+ PageAllocation()
+ {
+ }
+
+ using PageBlock::size;
+ using PageBlock::base;
+
+#ifndef __clang__
+ using PageBlock::operator bool;
+#else
+ // FIXME: This is a workaround for <rdar://problem/8876150>, wherein Clang incorrectly emits an access
+ // control warning when a client tries to use operator bool exposed above via "using PageBlock::operator bool".
+ operator bool() const { return PageBlock::operator bool(); }
+#endif
+
+ static PageAllocation allocate(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageAllocation(OSAllocator::reserveAndCommit(size, usage, writable, executable), size);
+ }
+
+ void deallocate()
+ {
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageAllocation tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+ OSAllocator::decommitAndRelease(tmp.base(), tmp.size());
+ }
+
+private:
+ PageAllocation(void* base, size_t size)
+ : PageBlock(base, size, false)
+ {
+ }
+};
+
+} // namespace WTF
+
+using WTF::PageAllocation;
+
+#endif // PageAllocation_h
diff --git a/src/3rdparty/masm/wtf/PageAllocationAligned.cpp b/src/3rdparty/masm/wtf/PageAllocationAligned.cpp
new file mode 100644
index 0000000000..bdb976b1b7
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocationAligned.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PageAllocationAligned.h"
+
+namespace WTF {
+
+PageAllocationAligned PageAllocationAligned::allocate(size_t size, size_t alignment, OSAllocator::Usage usage, bool writable)
+{
+ ASSERT(isPageAligned(size));
+ ASSERT(isPageAligned(alignment));
+ ASSERT(isPowerOfTwo(alignment));
+ ASSERT(size >= alignment);
+ size_t alignmentMask = alignment - 1;
+
+#if OS(DARWIN)
+ int flags = VM_FLAGS_ANYWHERE;
+ if (usage != OSAllocator::UnknownUsage)
+ flags |= usage;
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+
+ vm_address_t address = 0;
+ vm_map(current_task(), &address, size, alignmentMask, flags, MEMORY_OBJECT_NULL, 0, FALSE, protection, PROT_READ | PROT_WRITE, VM_INHERIT_DEFAULT);
+ return PageAllocationAligned(reinterpret_cast<void*>(address), size);
+#else
+ size_t alignmentDelta = alignment - pageSize();
+
+ // Resererve with suffcient additional VM to correctly align.
+ size_t reservationSize = size + alignmentDelta;
+ void* reservationBase = OSAllocator::reserveUncommitted(reservationSize, usage, writable, false);
+
+ // Select an aligned region within the reservation and commit.
+ void* alignedBase = reinterpret_cast<uintptr_t>(reservationBase) & alignmentMask
+ ? reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(reservationBase) & ~alignmentMask) + alignment)
+ : reservationBase;
+ OSAllocator::commit(alignedBase, size, writable, false);
+
+ return PageAllocationAligned(alignedBase, size, reservationBase, reservationSize);
+#endif
+}
+
+void PageAllocationAligned::deallocate()
+{
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageAllocationAligned tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+#if OS(DARWIN)
+ vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(tmp.base()), tmp.size());
+#else
+ ASSERT(tmp.m_reservation.contains(tmp.base(), tmp.size()));
+ OSAllocator::decommitAndRelease(tmp.m_reservation.base(), tmp.m_reservation.size(), tmp.base(), tmp.size());
+#endif
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/masm/wtf/PageAllocationAligned.h b/src/3rdparty/masm/wtf/PageAllocationAligned.h
new file mode 100644
index 0000000000..211a61b8b5
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageAllocationAligned.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageAllocationAligned_h
+#define PageAllocationAligned_h
+
+#include <wtf/OSAllocator.h>
+#include <wtf/PageReservation.h>
+
+namespace WTF {
+
+class PageAllocationAligned : private PageBlock {
+public:
+ PageAllocationAligned()
+ {
+ }
+
+ using PageBlock::operator bool;
+ using PageBlock::size;
+ using PageBlock::base;
+
+ static PageAllocationAligned allocate(size_t size, size_t alignment, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true);
+
+ void deallocate();
+
+private:
+#if OS(DARWIN)
+ PageAllocationAligned(void* base, size_t size)
+ : PageBlock(base, size, false)
+ {
+ }
+#else
+ PageAllocationAligned(void* base, size_t size, void* reservationBase, size_t reservationSize)
+ : PageBlock(base, size, false)
+ , m_reservation(reservationBase, reservationSize, false)
+ {
+ }
+
+ PageBlock m_reservation;
+#endif
+};
+
+
+} // namespace WTF
+
+using WTF::PageAllocationAligned;
+
+#endif // PageAllocationAligned_h
diff --git a/src/3rdparty/masm/wtf/PageBlock.cpp b/src/3rdparty/masm/wtf/PageBlock.cpp
new file mode 100644
index 0000000000..8bbd7eb600
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageBlock.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PageBlock.h"
+
+#if OS(UNIX)
+#include <unistd.h>
+#endif
+
+#if OS(WINDOWS)
+#include <malloc.h>
+#include <windows.h>
+#endif
+
+namespace WTF {
+
+static size_t s_pageSize;
+static size_t s_pageMask;
+
+#if OS(UNIX)
+
+inline size_t systemPageSize()
+{
+ return getpagesize();
+}
+
+#elif OS(WINDOWS)
+
+inline size_t systemPageSize()
+{
+ static size_t size = 0;
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ size = system_info.dwPageSize;
+ return size;
+}
+
+#endif
+
+size_t pageSize()
+{
+ if (!s_pageSize)
+ s_pageSize = systemPageSize();
+ ASSERT(isPowerOfTwo(s_pageSize));
+ return s_pageSize;
+}
+
+size_t pageMask()
+{
+ if (!s_pageMask)
+ s_pageMask = ~(pageSize() - 1);
+ return s_pageMask;
+}
+
+} // namespace WTF
diff --git a/src/3rdparty/masm/wtf/PageBlock.h b/src/3rdparty/masm/wtf/PageBlock.h
new file mode 100644
index 0000000000..56e5570178
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageBlock.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageBlock_h
+#define PageBlock_h
+
+namespace WTF {
+
+WTF_EXPORT_PRIVATE size_t pageSize();
+WTF_EXPORT_PRIVATE size_t pageMask();
+inline bool isPageAligned(void* address) { return !(reinterpret_cast<intptr_t>(address) & (pageSize() - 1)); }
+inline bool isPageAligned(size_t size) { return !(size & (pageSize() - 1)); }
+inline bool isPowerOfTwo(size_t size) { return !(size & (size - 1)); }
+
+class PageBlock {
+public:
+ PageBlock();
+ PageBlock(const PageBlock&);
+ PageBlock(void*, size_t, bool hasGuardPages);
+
+ void* base() const { return m_base; }
+ size_t size() const { return m_size; }
+
+ operator bool() const { return !!m_realBase; }
+
+ bool contains(void* containedBase, size_t containedSize)
+ {
+ return containedBase >= m_base
+ && (static_cast<char*>(containedBase) + containedSize) <= (static_cast<char*>(m_base) + m_size);
+ }
+
+private:
+ void* m_realBase;
+ void* m_base;
+ size_t m_size;
+};
+
+inline PageBlock::PageBlock()
+ : m_realBase(0)
+ , m_base(0)
+ , m_size(0)
+{
+}
+
+inline PageBlock::PageBlock(const PageBlock& other)
+ : m_realBase(other.m_realBase)
+ , m_base(other.m_base)
+ , m_size(other.m_size)
+{
+}
+
+inline PageBlock::PageBlock(void* base, size_t size, bool hasGuardPages)
+ : m_realBase(base)
+ , m_base(static_cast<char*>(base) + ((base && hasGuardPages) ? pageSize() : 0))
+ , m_size(size)
+{
+}
+
+} // namespace WTF
+
+using WTF::pageSize;
+using WTF::isPageAligned;
+using WTF::isPageAligned;
+using WTF::isPowerOfTwo;
+
+#endif // PageBlock_h
diff --git a/src/3rdparty/masm/wtf/PageReservation.h b/src/3rdparty/masm/wtf/PageReservation.h
new file mode 100644
index 0000000000..77783ebcc4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PageReservation.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageReservation_h
+#define PageReservation_h
+
+#include <wtf/PageAllocation.h>
+
+namespace WTF {
+
+/*
+ PageReservation
+
+ Like PageAllocation, the PageReservation class provides a cross-platform memory
+ allocation interface, but with a set of capabilities more similar to that of
+ VirtualAlloc than posix mmap. PageReservation can be used to allocate virtual
+ memory without committing physical memory pages using PageReservation::reserve.
+ Following a call to reserve all memory in the region is in a decommited state,
+ in which the memory should not be used (accessing the memory may cause a fault).
+
+ Before using memory it must be committed by calling commit, which is passed start
+ and size values (both of which require system page size granularity). One the
+ committed memory is no longer needed 'decommit' may be called to return the
+ memory to its devommitted state. Commit should only be called on memory that is
+ currently decommitted, and decommit should only be called on memory regions that
+ are currently committed. All memory should be decommited before the reservation
+ is deallocated. Values in memory may not be retained accross a pair of calls if
+ the region of memory is decommitted and then committed again.
+
+ Memory protection should not be changed on decommitted memory, and if protection
+ is changed on memory while it is committed it should be returned to the orignal
+ protection before decommit is called.
+*/
+
+class PageReservation : private PageBlock {
+public:
+ PageReservation()
+ : m_committed(0)
+ , m_writable(false)
+ , m_executable(false)
+ {
+ }
+
+ using PageBlock::base;
+ using PageBlock::size;
+
+#ifndef __clang__
+ using PageBlock::operator bool;
+#else
+ // FIXME: This is a workaround for <rdar://problem/8876150>, wherein Clang incorrectly emits an access
+ // control warning when a client tries to use operator bool exposed above via "using PageBlock::operator bool".
+ operator bool() const { return PageBlock::operator bool(); }
+#endif
+
+ void commit(void* start, size_t size)
+ {
+ ASSERT(*this);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+ ASSERT(contains(start, size));
+
+ m_committed += size;
+ OSAllocator::commit(start, size, m_writable, m_executable);
+ }
+
+ void decommit(void* start, size_t size)
+ {
+ ASSERT(*this);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+ ASSERT(contains(start, size));
+
+ m_committed -= size;
+ OSAllocator::decommit(start, size);
+ }
+
+ size_t committed()
+ {
+ return m_committed;
+ }
+
+ static PageReservation reserve(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageReservation(OSAllocator::reserveUncommitted(size, usage, writable, executable), size, writable, executable, false);
+ }
+
+ static PageReservation reserveWithGuardPages(size_t size, OSAllocator::Usage usage = OSAllocator::UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return PageReservation(OSAllocator::reserveUncommitted(size + pageSize() * 2, usage, writable, executable, true), size, writable, executable, true);
+ }
+
+ void deallocate()
+ {
+ ASSERT(!m_committed);
+
+ // Clear base & size before calling release; if this is *inside* allocation
+ // then we won't be able to clear then after deallocating the memory.
+ PageReservation tmp;
+ std::swap(tmp, *this);
+
+ ASSERT(tmp);
+ ASSERT(!*this);
+
+ OSAllocator::releaseDecommitted(tmp.base(), tmp.size());
+ }
+
+private:
+ PageReservation(void* base, size_t size, bool writable, bool executable, bool hasGuardPages)
+ : PageBlock(base, size, hasGuardPages)
+ , m_committed(0)
+ , m_writable(writable)
+ , m_executable(executable)
+ {
+ }
+
+ size_t m_committed;
+ bool m_writable;
+ bool m_executable;
+};
+
+}
+
+using WTF::PageReservation;
+
+#endif // PageReservation_h
diff --git a/src/3rdparty/masm/wtf/Platform.h b/src/3rdparty/masm/wtf/Platform.h
new file mode 100644
index 0000000000..5c85c15634
--- /dev/null
+++ b/src/3rdparty/masm/wtf/Platform.h
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (C) 2006, 2007, 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Torch Mobile, Inc.
+ * Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_Platform_h
+#define WTF_Platform_h
+
+/* Include compiler specific macros */
+#include <wtf/Compiler.h>
+
+/* ==== PLATFORM handles OS, operating environment, graphics API, and
+ CPU. This macro will be phased out in favor of platform adaptation
+ macros, policy decision macros, and top-level port definitions. ==== */
+#define PLATFORM(WTF_FEATURE) (defined WTF_PLATFORM_##WTF_FEATURE && WTF_PLATFORM_##WTF_FEATURE)
+
+
+/* ==== Platform adaptation macros: these describe properties of the target environment. ==== */
+
+/* CPU() - the target CPU architecture */
+#define CPU(WTF_FEATURE) (defined WTF_CPU_##WTF_FEATURE && WTF_CPU_##WTF_FEATURE)
+/* HAVE() - specific system features (headers, functions or similar) that are present or not */
+#define HAVE(WTF_FEATURE) (defined HAVE_##WTF_FEATURE && HAVE_##WTF_FEATURE)
+/* OS() - underlying operating system; only to be used for mandated low-level services like
+ virtual memory, not to choose a GUI toolkit */
+#define OS(WTF_FEATURE) (defined WTF_OS_##WTF_FEATURE && WTF_OS_##WTF_FEATURE)
+
+
+/* ==== Policy decision macros: these define policy choices for a particular port. ==== */
+
+/* USE() - use a particular third-party library or optional OS service */
+#define USE(WTF_FEATURE) (defined WTF_USE_##WTF_FEATURE && WTF_USE_##WTF_FEATURE)
+/* ENABLE() - turn on a specific feature of WebKit */
+#define ENABLE(WTF_FEATURE) (defined ENABLE_##WTF_FEATURE && ENABLE_##WTF_FEATURE)
+
+
+/* ==== CPU() - the target CPU architecture ==== */
+
+/* This also defines CPU(BIG_ENDIAN) or CPU(MIDDLE_ENDIAN) or neither, as appropriate. */
+
+/* CPU(ALPHA) - DEC Alpha */
+#if defined(__alpha__)
+#define WTF_CPU_ALPHA 1
+#endif
+
+/* CPU(IA64) - Itanium / IA-64 */
+#if defined(__ia64__)
+#define WTF_CPU_IA64 1
+/* 32-bit mode on Itanium */
+#if !defined(__LP64__)
+#define WTF_CPU_IA64_32 1
+#endif
+#endif
+
+/* CPU(MIPS) - MIPS 32-bit */
+/* Note: Only O32 ABI is tested, so we enable it for O32 ABI for now. */
+#if (defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_)) \
+ && defined(_ABIO32)
+#define WTF_CPU_MIPS 1
+#if defined(__MIPSEB__)
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+#define WTF_MIPS_PIC (defined __PIC__)
+#define WTF_MIPS_ARCH __mips
+#define WTF_MIPS_ISA(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH == v)
+#define WTF_MIPS_ISA_AT_LEAST(v) (defined WTF_MIPS_ARCH && WTF_MIPS_ARCH >= v)
+#define WTF_MIPS_ARCH_REV __mips_isa_rev
+#define WTF_MIPS_ISA_REV(v) (defined WTF_MIPS_ARCH_REV && WTF_MIPS_ARCH_REV == v)
+#define WTF_MIPS_DOUBLE_FLOAT (defined __mips_hard_float && !defined __mips_single_float)
+#define WTF_MIPS_FP64 (defined __mips_fpr && __mips_fpr == 64)
+/* MIPS requires allocators to use aligned memory */
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#endif /* MIPS */
+
+/* CPU(PPC) - PowerPC 32-bit */
+#if defined(__ppc__) \
+ || defined(__PPC__) \
+ || defined(__powerpc__) \
+ || defined(__powerpc) \
+ || defined(__POWERPC__) \
+ || defined(_M_PPC) \
+ || defined(__PPC)
+#define WTF_CPU_PPC 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(PPC64) - PowerPC 64-bit */
+#if defined(__ppc64__) \
+ || defined(__PPC64__)
+#define WTF_CPU_PPC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SH4) - SuperH SH-4 */
+#if defined(__SH4__)
+#define WTF_CPU_SH4 1
+#endif
+
+/* CPU(SPARC32) - SPARC 32-bit */
+#if defined(__sparc) && !defined(__arch64__) || defined(__sparcv8)
+#define WTF_CPU_SPARC32 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC64) - SPARC 64-bit */
+#if defined(__sparc__) && defined(__arch64__) || defined (__sparcv9)
+#define WTF_CPU_SPARC64 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(SPARC) - any SPARC, true for CPU(SPARC32) and CPU(SPARC64) */
+#if CPU(SPARC32) || CPU(SPARC64)
+#define WTF_CPU_SPARC 1
+#endif
+
+/* CPU(S390X) - S390 64-bit */
+#if defined(__s390x__)
+#define WTF_CPU_S390X 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(S390) - S390 32-bit */
+#if defined(__s390__)
+#define WTF_CPU_S390 1
+#define WTF_CPU_BIG_ENDIAN 1
+#endif
+
+/* CPU(X86) - i386 / x86 32-bit */
+#if defined(__i386__) \
+ || defined(i386) \
+ || defined(_M_IX86) \
+ || defined(_X86_) \
+ || defined(__THW_INTEL)
+#define WTF_CPU_X86 1
+#endif
+
+/* CPU(X86_64) - AMD64 / Intel64 / x86_64 64-bit */
+#if defined(__x86_64__) \
+ || defined(_M_X64)
+#define WTF_CPU_X86_64 1
+#endif
+
+/* CPU(ARM) - ARM, any version*/
+#define WTF_ARM_ARCH_AT_LEAST(N) (CPU(ARM) && WTF_ARM_ARCH_VERSION >= N)
+
+#if defined(arm) \
+ || defined(__arm__) \
+ || defined(ARM) \
+ || defined(_ARM_)
+#define WTF_CPU_ARM 1
+
+#if defined(__ARM_PCS_VFP)
+#define WTF_CPU_ARM_HARDFP 1
+#endif
+
+#if defined(__ARMEB__) || (COMPILER(RVCT) && defined(__BIG_ENDIAN))
+#define WTF_CPU_BIG_ENDIAN 1
+
+#elif !defined(__ARM_EABI__) \
+ && !defined(__EABI__) \
+ && !defined(__VFP_FP__) \
+ && !defined(_WIN32_WCE)
+#define WTF_CPU_MIDDLE_ENDIAN 1
+
+#endif
+
+/* Set WTF_ARM_ARCH_VERSION */
+#if defined(__ARM_ARCH_4__) \
+ || defined(__ARM_ARCH_4T__) \
+ || defined(__MARM_ARMV4__)
+#define WTF_ARM_ARCH_VERSION 4
+
+#elif defined(__ARM_ARCH_5__) \
+ || defined(__ARM_ARCH_5T__) \
+ || defined(__MARM_ARMV5__)
+#define WTF_ARM_ARCH_VERSION 5
+
+#elif defined(__ARM_ARCH_5E__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define WTF_ARM_ARCH_VERSION 5
+/*ARMv5TE requires allocators to use aligned memory*/
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+
+#elif defined(__ARM_ARCH_6__) \
+ || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARMV6__)
+#define WTF_ARM_ARCH_VERSION 6
+
+#elif defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7S__)
+#define WTF_ARM_ARCH_VERSION 7
+
+/* MSVC sets _M_ARM */
+#elif defined(_M_ARM)
+#define WTF_ARM_ARCH_VERSION _M_ARM
+
+/* RVCT sets _TARGET_ARCH_ARM */
+#elif defined(__TARGET_ARCH_ARM)
+#define WTF_ARM_ARCH_VERSION __TARGET_ARCH_ARM
+
+#if defined(__TARGET_ARCH_5E) \
+ || defined(__TARGET_ARCH_5TE) \
+ || defined(__TARGET_ARCH_5TEJ)
+/*ARMv5TE requires allocators to use aligned memory*/
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
+#endif
+
+#else
+#define WTF_ARM_ARCH_VERSION 0
+
+#endif
+
+/* Set WTF_THUMB_ARCH_VERSION */
+#if defined(__ARM_ARCH_4T__)
+#define WTF_THUMB_ARCH_VERSION 1
+
+#elif defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+#define WTF_THUMB_ARCH_VERSION 2
+
+#elif defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6M__)
+#define WTF_THUMB_ARCH_VERSION 3
+
+#elif defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_7__) \
+ || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7S__)
+#define WTF_THUMB_ARCH_VERSION 4
+
+/* RVCT sets __TARGET_ARCH_THUMB */
+#elif defined(__TARGET_ARCH_THUMB)
+#define WTF_THUMB_ARCH_VERSION __TARGET_ARCH_THUMB
+
+#else
+#define WTF_THUMB_ARCH_VERSION 0
+#endif
+
+
+/* CPU(ARMV5_OR_LOWER) - ARM instruction set v5 or earlier */
+/* On ARMv5 and below the natural alignment is required.
+ And there are some other differences for v5 or earlier. */
+#if !defined(ARMV5_OR_LOWER) && !WTF_ARM_ARCH_AT_LEAST(6)
+#define WTF_CPU_ARMV5_OR_LOWER 1
+#endif
+
+
+/* CPU(ARM_TRADITIONAL) - Thumb2 is not available, only traditional ARM (v4 or greater) */
+/* CPU(ARM_THUMB2) - Thumb2 instruction set is available */
+/* Only one of these will be defined. */
+#if !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2)
+# if defined(thumb2) || defined(__thumb2__) \
+ || ((defined(__thumb) || defined(__thumb__)) && WTF_THUMB_ARCH_VERSION == 4)
+# define WTF_CPU_ARM_TRADITIONAL 0
+# define WTF_CPU_ARM_THUMB2 1
+# elif WTF_ARM_ARCH_AT_LEAST(4)
+# define WTF_CPU_ARM_TRADITIONAL 1
+# define WTF_CPU_ARM_THUMB2 0
+# else
+# error "Not supported ARM architecture"
+# endif
+#elif CPU(ARM_TRADITIONAL) && CPU(ARM_THUMB2) /* Sanity Check */
+# error "Cannot use both of WTF_CPU_ARM_TRADITIONAL and WTF_CPU_ARM_THUMB2 platforms"
+#endif /* !defined(WTF_CPU_ARM_TRADITIONAL) && !defined(WTF_CPU_ARM_THUMB2) */
+
+#if defined(__ARM_NEON__) && !defined(WTF_CPU_ARM_NEON)
+#define WTF_CPU_ARM_NEON 1
+#endif
+
+#if CPU(ARM_NEON) && (!COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 7, 0))
+// All NEON intrinsics usage can be disabled by this macro.
+#define HAVE_ARM_NEON_INTRINSICS 1
+#endif
+
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+#define WTF_CPU_ARM_VFP 1
+#endif
+
+#if defined(__ARM_ARCH_7S__)
+#define WTF_CPU_APPLE_ARMV7S 1
+#endif
+
+#endif /* ARM */
+
+#if CPU(ARM) || CPU(MIPS) || CPU(SH4) || CPU(SPARC)
+#define WTF_CPU_NEEDS_ALIGNED_ACCESS 1
+#endif
+
+/* ==== OS() - underlying operating system; only to be used for mandated low-level services like
+ virtual memory, not to choose a GUI toolkit ==== */
+
+/* OS(AIX) - AIX */
+#ifdef _AIX
+#define WTF_OS_AIX 1
+#endif
+
+/* OS(DARWIN) - Any Darwin-based OS, including Mac OS X and iPhone OS */
+#ifdef __APPLE__
+#define WTF_OS_DARWIN 1
+
+#include <Availability.h>
+#include <AvailabilityMacros.h>
+#include <TargetConditionals.h>
+#endif
+
+/* OS(IOS) - iOS */
+/* OS(MAC_OS_X) - Mac OS X (not including iOS) */
+#if OS(DARWIN) && ((defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) \
+ || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) \
+ || (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR))
+#define WTF_OS_IOS 1
+#elif OS(DARWIN) && defined(TARGET_OS_MAC) && TARGET_OS_MAC
+#define WTF_OS_MAC_OS_X 1
+
+/* FIXME: These can be removed after sufficient time has passed since the removal of BUILDING_ON / TARGETING macros. */
+
+#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED 0 / 0
+#define ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED 0 / 0
+
+#define BUILDING_ON_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+#define BUILDING_ON_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+#define BUILDING_ON_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MIN_REQUIRED
+
+#define TARGETING_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#define TARGETING_SNOW_LEOPARD ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#define TARGETING_LION ERROR_PLEASE_COMPARE_WITH_MAC_OS_X_VERSION_MAX_ALLOWED
+#endif
+
+/* OS(FREEBSD) - FreeBSD */
+#if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
+#define WTF_OS_FREEBSD 1
+#endif
+
+/* OS(HURD) - GNU/Hurd */
+#ifdef __GNU__
+#define WTF_OS_HURD 1
+#endif
+
+/* OS(LINUX) - Linux */
+#ifdef __linux__
+#define WTF_OS_LINUX 1
+#endif
+
+/* OS(NETBSD) - NetBSD */
+#if defined(__NetBSD__)
+#define WTF_OS_NETBSD 1
+#endif
+
+/* OS(OPENBSD) - OpenBSD */
+#ifdef __OpenBSD__
+#define WTF_OS_OPENBSD 1
+#endif
+
+/* OS(QNX) - QNX */
+#if defined(__QNXNTO__)
+#define WTF_OS_QNX 1
+#endif
+
+/* OS(SOLARIS) - Solaris */
+#if defined(sun) || defined(__sun)
+#define WTF_OS_SOLARIS 1
+#endif
+
+/* OS(WINCE) - Windows CE; note that for this platform OS(WINDOWS) is also defined */
+#if defined(_WIN32_WCE)
+#define WTF_OS_WINCE 1
+#endif
+
+/* OS(WINDOWS) - Any version of Windows */
+#if defined(WIN32) || defined(_WIN32)
+#define WTF_OS_WINDOWS 1
+#endif
+
+#define WTF_OS_WIN ERROR "USE WINDOWS WITH OS NOT WIN"
+#define WTF_OS_MAC ERROR "USE MAC_OS_X WITH OS NOT MAC"
+
+/* OS(UNIX) - Any Unix-like system */
+#if OS(AIX) \
+ || OS(DARWIN) \
+ || OS(FREEBSD) \
+ || OS(HURD) \
+ || OS(LINUX) \
+ || OS(NETBSD) \
+ || OS(OPENBSD) \
+ || OS(QNX) \
+ || OS(SOLARIS) \
+ || defined(unix) \
+ || defined(__unix) \
+ || defined(__unix__)
+#define WTF_OS_UNIX 1
+#endif
+
+/* Operating environments */
+
+/* FIXME: these are all mixes of OS, operating environment and policy choices. */
+/* PLATFORM(QT) */
+/* PLATFORM(WX) */
+/* PLATFORM(EFL) */
+/* PLATFORM(GTK) */
+/* PLATFORM(BLACKBERRY) */
+/* PLATFORM(MAC) */
+/* PLATFORM(WIN) */
+#if defined(BUILDING_QT__)
+#define WTF_PLATFORM_QT 1
+#elif defined(BUILDING_WX__)
+#define WTF_PLATFORM_WX 1
+#elif defined(BUILDING_EFL__)
+#define WTF_PLATFORM_EFL 1
+#elif defined(BUILDING_GTK__)
+#define WTF_PLATFORM_GTK 1
+#elif defined(BUILDING_BLACKBERRY__)
+#define WTF_PLATFORM_BLACKBERRY 1
+#elif OS(DARWIN)
+#define WTF_PLATFORM_MAC 1
+#elif OS(WINDOWS)
+#define WTF_PLATFORM_WIN 1
+#endif
+
+/* PLATFORM(IOS) */
+/* FIXME: this is sometimes used as an OS switch and sometimes for higher-level things */
+#if (defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE)
+#define WTF_PLATFORM_IOS 1
+#endif
+
+/* PLATFORM(IOS_SIMULATOR) */
+#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
+#define WTF_PLATFORM_IOS 1
+#define WTF_PLATFORM_IOS_SIMULATOR 1
+#endif
+
+/* Graphics engines */
+
+/* USE(CG) and PLATFORM(CI) */
+#if PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_CG 1
+#endif
+#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WIN) && USE(CG))
+#define WTF_USE_CA 1
+#endif
+
+#if PLATFORM(BLACKBERRY)
+#define WTF_USE_SKIA 1
+#define WTF_USE_LOW_QUALITY_IMAGE_INTERPOLATION 1
+#define WTF_USE_LOW_QUALITY_IMAGE_NO_JPEG_DITHERING 1
+#define WTF_USE_LOW_QUALITY_IMAGE_NO_JPEG_FANCY_UPSAMPLING 1
+#endif
+
+#if PLATFORM(GTK)
+#define WTF_USE_CAIRO 1
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#endif
+
+/* On Windows, use QueryPerformanceCounter by default */
+#if OS(WINDOWS)
+#define WTF_USE_QUERY_PERFORMANCE_COUNTER 1
+#endif
+
+#if OS(WINCE) && !PLATFORM(QT)
+#define NOSHLWAPI /* shlwapi.h not available on WinCe */
+
+/* MSDN documentation says these functions are provided with uspce.lib. But we cannot find this file. */
+#define __usp10__ /* disable "usp10.h" */
+
+#define _INC_ASSERT /* disable "assert.h" */
+#define assert(x)
+
+#endif /* OS(WINCE) && !PLATFORM(QT) */
+
+#if OS(WINCE) && !PLATFORM(QT)
+#define WTF_USE_WCHAR_UNICODE 1
+#elif PLATFORM(GTK)
+/* The GTK+ Unicode backend is configurable */
+#else
+#define WTF_USE_ICU_UNICODE 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS)
+#if CPU(X86_64)
+#define WTF_USE_PLUGIN_HOST_PROCESS 1
+#endif
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#define WTF_USE_SCROLLBAR_PAINTER 1
+#define HAVE_XPC 1
+#endif
+#define WTF_USE_CF 1
+#define HAVE_READLINE 1
+#define HAVE_RUNLOOP_TIMER 1
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#define HAVE_LAYER_HOSTING_IN_WINDOW_SERVER 1
+#endif
+#define WTF_USE_APPKIT 1
+#define WTF_USE_SECURITY_FRAMEWORK 1
+#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
+
+#if PLATFORM(IOS)
+#define DONT_FINALIZE_ON_MAIN_THREAD 1
+#endif
+
+#if PLATFORM(QT) && OS(DARWIN)
+#define WTF_USE_CF 1
+#endif
+
+#if OS(DARWIN) && !PLATFORM(GTK) && !PLATFORM(QT)
+#define ENABLE_PURGEABLE_MEMORY 1
+#endif
+
+#if PLATFORM(IOS)
+#define HAVE_READLINE 1
+#define WTF_USE_APPKIT 0
+#define WTF_USE_CF 1
+#define WTF_USE_CFNETWORK 1
+#define WTF_USE_NETWORK_CFDATA_ARRAY_CALLBACK 1
+#define WTF_USE_SECURITY_FRAMEWORK 0
+#define WTF_USE_WEB_THREAD 1
+#endif /* PLATFORM(IOS) */
+
+#if PLATFORM(WIN) && !OS(WINCE)
+#define WTF_USE_CF 1
+#endif
+
+#if PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)
+#define WTF_USE_CFNETWORK 1
+#endif
+
+#if USE(CFNETWORK) || PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_CFURLCACHE 1
+#endif
+
+#if PLATFORM(WX)
+#if !CPU(PPC)
+#if !defined(ENABLE_ASSEMBLER)
+#define ENABLE_ASSEMBLER 1
+#endif
+#define ENABLE_JIT 1
+#endif
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#define ENABLE_LLINT 0
+#if OS(DARWIN)
+#define WTF_USE_CF 1
+#endif
+#endif
+
+#if !defined(HAVE_ACCESSIBILITY)
+#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(EFL)
+#define HAVE_ACCESSIBILITY 1
+#endif
+#endif /* !defined(HAVE_ACCESSIBILITY) */
+
+#if OS(UNIX)
+#define HAVE_ERRNO_H 1
+#define HAVE_MMAP 1
+#define HAVE_SIGNAL_H 1
+#define HAVE_STRINGS_H 1
+#define HAVE_SYS_PARAM_H 1
+#define HAVE_SYS_TIME_H 1
+#define WTF_USE_OS_RANDOMNESS 1
+#define WTF_USE_PTHREADS 1
+#endif /* OS(UNIX) */
+
+#if OS(UNIX) && !OS(QNX)
+#define HAVE_LANGINFO_H 1
+#endif
+
+#if (OS(FREEBSD) || OS(OPENBSD)) && !defined(__GLIBC__)
+#define HAVE_PTHREAD_NP_H 1
+#endif
+
+#if !defined(HAVE_VASPRINTF)
+#if !COMPILER(MSVC) && !COMPILER(RVCT) && !COMPILER(MINGW) && !(COMPILER(GCC) && OS(QNX))
+#define HAVE_VASPRINTF 1
+#endif
+#endif
+
+#if !defined(HAVE_STRNSTR)
+#if OS(DARWIN) || (OS(FREEBSD) && !defined(__GLIBC__))
+#define HAVE_STRNSTR 1
+#endif
+#endif
+
+#if !OS(WINDOWS) && !OS(SOLARIS)
+#define HAVE_TM_GMTOFF 1
+#define HAVE_TM_ZONE 1
+#define HAVE_TIMEGM 1
+#endif
+
+#if OS(DARWIN)
+
+#define HAVE_DISPATCH_H 1
+#define HAVE_MADV_FREE 1
+#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MERGESORT 1
+#define HAVE_PTHREAD_SETNAME_NP 1
+#define HAVE_SYS_TIMEB_H 1
+#define WTF_USE_ACCELERATE 1
+
+#if !PLATFORM(IOS)
+#define HAVE_HOSTED_CORE_ANIMATION 1
+#endif /* !PLATFORM(IOS) */
+
+#endif /* OS(DARWIN) */
+
+#if OS(WINDOWS) && !OS(WINCE)
+#define HAVE_SYS_TIMEB_H 1
+#define HAVE_ALIGNED_MALLOC 1
+#define HAVE_ISDEBUGGERPRESENT 1
+#endif
+
+#if OS(WINDOWS)
+#define HAVE_VIRTUALALLOC 1
+#define WTF_USE_OS_RANDOMNESS 1
+#endif
+
+#if OS(QNX)
+#define HAVE_MADV_FREE_REUSE 1
+#define HAVE_MADV_FREE 1
+#endif
+
+/* ENABLE macro defaults */
+
+/* FIXME: move out all ENABLE() defines from here to FeatureDefines.h */
+
+/* Include feature macros */
+#include <wtf/FeatureDefines.h>
+
+#if PLATFORM(QT)
+/* We must not customize the global operator new and delete for the Qt port. */
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#if !OS(UNIX)
+#define USE_SYSTEM_MALLOC 1
+#endif
+#endif
+
+#if PLATFORM(EFL)
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#endif
+
+#if !defined(ENABLE_GLOBAL_FASTMALLOC_NEW)
+#define ENABLE_GLOBAL_FASTMALLOC_NEW 1
+#endif
+
+#define ENABLE_DEBUG_WITH_BREAKPOINT 0
+#define ENABLE_SAMPLING_COUNTERS 0
+#define ENABLE_SAMPLING_FLAGS 0
+#define ENABLE_SAMPLING_REGIONS 0
+#define ENABLE_OPCODE_SAMPLING 0
+#define ENABLE_CODEBLOCK_SAMPLING 0
+#if ENABLE(CODEBLOCK_SAMPLING) && !ENABLE(OPCODE_SAMPLING)
+#error "CODEBLOCK_SAMPLING requires OPCODE_SAMPLING"
+#endif
+#if ENABLE(OPCODE_SAMPLING) || ENABLE(SAMPLING_FLAGS) || ENABLE(SAMPLING_REGIONS)
+#define ENABLE_SAMPLING_THREAD 1
+#endif
+
+#if !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64)
+#if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) \
+ || (CPU(IA64) && !CPU(IA64_32)) \
+ || CPU(ALPHA) \
+ || CPU(SPARC64) \
+ || CPU(S390X) \
+ || CPU(PPC64)
+#define WTF_USE_JSVALUE64 1
+#else
+#define WTF_USE_JSVALUE32_64 1
+#endif
+#endif /* !defined(WTF_USE_JSVALUE64) && !defined(WTF_USE_JSVALUE32_64) */
+
+/* Disable the JIT on versions of GCC prior to 4.1 */
+#if !defined(ENABLE_JIT) && COMPILER(GCC) && !GCC_VERSION_AT_LEAST(4, 1, 0)
+#define ENABLE_JIT 0
+#endif
+
+#if !defined(ENABLE_JIT) && CPU(SH4) && PLATFORM(QT)
+#define ENABLE_JIT 1
+#endif
+
+/* The JIT is enabled by default on all x86, x86-64, ARM & MIPS platforms. */
+#if !defined(ENABLE_JIT) \
+ && (CPU(X86) || CPU(X86_64) || CPU(ARM) || CPU(MIPS)) \
+ && (OS(DARWIN) || !COMPILER(GCC) || GCC_VERSION_AT_LEAST(4, 1, 0)) \
+ && !OS(WINCE) \
+ && !(OS(QNX) && !PLATFORM(QT)) /* We use JIT in QNX Qt */
+#define ENABLE_JIT 1
+#endif
+
+/* If possible, try to enable a disassembler. This is optional. We proceed in two
+ steps: first we try to find some disassembler that we can use, and then we
+ decide if the high-level disassembler API can be enabled. */
+#if !defined(WTF_USE_UDIS86) && ENABLE(JIT) && (PLATFORM(MAC) || (PLATFORM(QT) && OS(LINUX))) \
+ && (CPU(X86) || CPU(X86_64))
+#define WTF_USE_UDIS86 1
+#endif
+
+#if !defined(ENABLE_DISASSEMBLER) && USE(UDIS86)
+#define ENABLE_DISASSEMBLER 1
+#endif
+
+/* On the GTK+ port we take an extra precaution for LLINT support:
+ * We disable it on x86 builds if the build target doesn't support SSE2
+ * instructions (LLINT requires SSE2 on this platform). */
+#if !defined(ENABLE_LLINT) && PLATFORM(GTK) && CPU(X86) && COMPILER(GCC) \
+ && !defined(__SSE2__)
+#define ENABLE_LLINT 0
+#endif
+
+/* On some of the platforms where we have a JIT, we want to also have the
+ low-level interpreter. */
+#if !defined(ENABLE_LLINT) \
+ && ENABLE(JIT) \
+ && (OS(DARWIN) || OS(LINUX)) \
+ && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK) || PLATFORM(QT)) \
+ && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(MIPS))
+#define ENABLE_LLINT 1
+#endif
+
+#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT) && !COMPILER(MSVC)
+/* Enable the DFG JIT on X86 and X86_64. Only tested on Mac and GNU/Linux. */
+#if (CPU(X86) || CPU(X86_64)) && (OS(DARWIN) || OS(LINUX))
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on ARMv7. Only tested on iOS and Qt Linux. */
+#if CPU(ARM_THUMB2) && (PLATFORM(IOS) || PLATFORM(BLACKBERRY) || PLATFORM(QT))
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on ARM. */
+#if CPU(ARM_TRADITIONAL)
+#define ENABLE_DFG_JIT 1
+#endif
+/* Enable the DFG JIT on MIPS. */
+#if CPU(MIPS)
+#define ENABLE_DFG_JIT 1
+#endif
+#endif
+
+/* If the jit is not available, enable the LLInt C Loop: */
+#if !ENABLE(JIT)
+#undef ENABLE_LLINT /* Undef so that we can redefine it. */
+#undef ENABLE_LLINT_C_LOOP /* Undef so that we can redefine it. */
+#undef ENABLE_DFG_JIT /* Undef so that we can redefine it. */
+#define ENABLE_LLINT 1
+#define ENABLE_LLINT_C_LOOP 1
+#define ENABLE_DFG_JIT 0
+#endif
+
+/* Do a sanity check to make sure that we at least have one execution engine in
+ use: */
+#if !(ENABLE(JIT) || ENABLE(LLINT))
+#error You have to have at least one execution model enabled to build JSC
+#endif
+
+/* Profiling of types and values used by JIT code. DFG_JIT depends on it, but you
+ can enable it manually with DFG turned off if you want to use it as a standalone
+ profiler. In that case, you probably want to also enable VERBOSE_VALUE_PROFILE
+ below. */
+#if !defined(ENABLE_VALUE_PROFILER) && ENABLE(DFG_JIT)
+#define ENABLE_VALUE_PROFILER 1
+#endif
+
+#if !defined(ENABLE_VERBOSE_VALUE_PROFILE) && ENABLE(VALUE_PROFILER)
+#define ENABLE_VERBOSE_VALUE_PROFILE 0
+#endif
+
+#if !defined(ENABLE_SIMPLE_HEAP_PROFILING)
+#define ENABLE_SIMPLE_HEAP_PROFILING 0
+#endif
+
+/* Counts uses of write barriers using sampling counters. Be sure to also
+ set ENABLE_SAMPLING_COUNTERS to 1. */
+#if !defined(ENABLE_WRITE_BARRIER_PROFILING)
+#define ENABLE_WRITE_BARRIER_PROFILING 0
+#endif
+
+/* Enable verification that that register allocations are not made within generated control flow.
+ Turned on for debug builds. */
+#if !defined(ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION) && ENABLE(DFG_JIT)
+#if !defined(NDEBUG)
+#define ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION 1
+#else
+#define ENABLE_DFG_REGISTER_ALLOCATION_VALIDATION 0
+#endif
+#endif
+
+/* Configure the JIT */
+#if CPU(X86) && COMPILER(MSVC)
+#define JSC_HOST_CALL __fastcall
+#elif CPU(X86) && COMPILER(GCC)
+#define JSC_HOST_CALL __attribute__ ((fastcall))
+#else
+#define JSC_HOST_CALL
+#endif
+
+/* Configure the interpreter */
+#if COMPILER(GCC) || (COMPILER(RVCT) && defined(__GNUC__))
+#define HAVE_COMPUTED_GOTO 1
+#endif
+
+/* Determine if we need to enable Computed Goto Opcodes or not: */
+#if HAVE(COMPUTED_GOTO) && ENABLE(LLINT)
+#define ENABLE_COMPUTED_GOTO_OPCODES 1
+#endif
+
+/* Regular Expression Tracing - Set to 1 to trace RegExp's in jsc. Results dumped at exit */
+#define ENABLE_REGEXP_TRACING 0
+
+/* Yet Another Regex Runtime - turned on by default for JIT enabled ports. */
+#if !defined(ENABLE_YARR_JIT) && (ENABLE(JIT) || ENABLE(LLINT_C_LOOP)) && !(OS(QNX) && PLATFORM(QT))
+#define ENABLE_YARR_JIT 1
+
+/* Setting this flag compares JIT results with interpreter results. */
+#define ENABLE_YARR_JIT_DEBUG 0
+#endif
+
+/* If either the JIT or the RegExp JIT is enabled, then the Assembler must be
+ enabled as well: */
+#if ENABLE(JIT) || ENABLE(YARR_JIT)
+#if defined(ENABLE_ASSEMBLER) && !ENABLE_ASSEMBLER
+#error "Cannot enable the JIT or RegExp JIT without enabling the Assembler"
+#else
+#undef ENABLE_ASSEMBLER
+#define ENABLE_ASSEMBLER 1
+#endif
+#endif
+
+/* Pick which allocator to use; we only need an executable allocator if the assembler is compiled in.
+ On x86-64 we use a single fixed mmap, on other platforms we mmap on demand. */
+#if ENABLE(ASSEMBLER)
+#if CPU(X86_64) && !OS(WINDOWS) || PLATFORM(IOS)
+#define ENABLE_EXECUTABLE_ALLOCATOR_FIXED 1
+#else
+#define ENABLE_EXECUTABLE_ALLOCATOR_DEMAND 1
+#endif
+#endif
+
+/* Use the QXmlStreamReader implementation for XMLDocumentParser */
+/* Use the QXmlQuery implementation for XSLTProcessor */
+#if PLATFORM(QT)
+#if !USE(LIBXML2)
+#define WTF_USE_QXMLSTREAM 1
+#define WTF_USE_QXMLQUERY 1
+#endif
+#endif
+
+/* Accelerated compositing */
+#if PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(QT) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO))
+#define WTF_USE_ACCELERATED_COMPOSITING 1
+#endif
+
+#if ENABLE(WEBGL) && !defined(WTF_USE_3D_GRAPHICS)
+#define WTF_USE_3D_GRAPHICS 1
+#endif
+
+/* Qt always uses Texture Mapper */
+#if PLATFORM(QT)
+#define WTF_USE_TEXTURE_MAPPER 1
+#endif
+
+#if USE(TEXTURE_MAPPER) && USE(3D_GRAPHICS) && !defined(WTF_USE_TEXTURE_MAPPER_GL)
+#define WTF_USE_TEXTURE_MAPPER_GL 1
+#endif
+
+/* Compositing on the UI-process in WebKit2 */
+#if USE(3D_GRAPHICS) && PLATFORM(QT)
+#define WTF_USE_COORDINATED_GRAPHICS 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(IOS)
+#define WTF_USE_PROTECTION_SPACE_AUTH_CALLBACK 1
+#endif
+
+/* Set up a define for a common error that is intended to cause a build error -- thus the space after Error. */
+#define WTF_PLATFORM_CFNETWORK Error USE_macro_should_be_used_with_CFNETWORK
+
+/* FIXME: Eventually we should enable this for all platforms and get rid of the define. */
+#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(QT) || PLATFORM(GTK) || PLATFORM(EFL)
+#define WTF_USE_PLATFORM_STRATEGIES 1
+#endif
+
+#if PLATFORM(WIN)
+#define WTF_USE_CROSS_PLATFORM_CONTEXT_MENUS 1
+#endif
+
+#if PLATFORM(MAC) && HAVE(ACCESSIBILITY)
+#define WTF_USE_ACCESSIBILITY_CONTEXT_MENUS 1
+#endif
+
+#if CPU(ARM_THUMB2)
+#define ENABLE_BRANCH_COMPACTION 1
+#endif
+
+#if !defined(ENABLE_THREADING_LIBDISPATCH) && HAVE(DISPATCH_H)
+#define ENABLE_THREADING_LIBDISPATCH 1
+#elif !defined(ENABLE_THREADING_OPENMP) && defined(_OPENMP)
+#define ENABLE_THREADING_OPENMP 1
+#elif !defined(THREADING_GENERIC)
+#define ENABLE_THREADING_GENERIC 1
+#endif
+
+#if USE(GLIB)
+#include <wtf/gobject/GTypedefs.h>
+#endif
+
+/* FIXME: This define won't be needed once #27551 is fully landed. However,
+ since most ports try to support sub-project independence, adding new headers
+ to WTF causes many ports to break, and so this way we can address the build
+ breakages one port at a time. */
+#if !defined(WTF_USE_EXPORT_MACROS) && (PLATFORM(MAC) || PLATFORM(QT) || PLATFORM(WX))
+#define WTF_USE_EXPORT_MACROS 1
+#endif
+
+#if !defined(WTF_USE_EXPORT_MACROS_FOR_TESTING) && (PLATFORM(GTK) || PLATFORM(WIN))
+#define WTF_USE_EXPORT_MACROS_FOR_TESTING 1
+#endif
+
+#if (PLATFORM(QT) && !OS(DARWIN) && !OS(WINDOWS)) || PLATFORM(GTK) || PLATFORM(EFL)
+#define WTF_USE_UNIX_DOMAIN_SOCKETS 1
+#endif
+
+#if !defined(ENABLE_COMPARE_AND_SWAP) && (OS(WINDOWS) || (COMPILER(GCC) && (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2))))
+#define ENABLE_COMPARE_AND_SWAP 1
+#endif
+
+#define ENABLE_OBJECT_MARK_LOGGING 0
+
+#if !defined(ENABLE_PARALLEL_GC) && !ENABLE(OBJECT_MARK_LOGGING) && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(BLACKBERRY) || PLATFORM(GTK)) && ENABLE(COMPARE_AND_SWAP)
+#define ENABLE_PARALLEL_GC 1
+#elif PLATFORM(QT)
+// Parallel GC is temporarily disabled on Qt because of regular crashes, see https://bugs.webkit.org/show_bug.cgi?id=90957 for details
+#define ENABLE_PARALLEL_GC 0
+#endif
+
+#if !defined(ENABLE_GC_VALIDATION) && !defined(NDEBUG)
+#define ENABLE_GC_VALIDATION 1
+#endif
+
+#if !defined(ENABLE_BINDING_INTEGRITY)
+#define ENABLE_BINDING_INTEGRITY 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
+#define WTF_USE_AVFOUNDATION 1
+#endif
+
+#if (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 60000) || (PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080)
+#define WTF_USE_COREMEDIA 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#define HAVE_AVFOUNDATION_TEXT_TRACK_SUPPORT 1
+#endif
+
+#if PLATFORM(MAC) && !PLATFORM(IOS) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1090
+#define HAVE_MEDIA_ACCESSIBILITY_FRAMEWORK 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(GTK) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)) || PLATFORM(BLACKBERRY)
+#define WTF_USE_REQUEST_ANIMATION_FRAME_TIMER 1
+#endif
+
+#if PLATFORM(MAC) || PLATFORM(BLACKBERRY)
+#define WTF_USE_REQUEST_ANIMATION_FRAME_DISPLAY_MONITOR 1
+#endif
+
+#if PLATFORM(MAC) && (PLATFORM(IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070)
+#define HAVE_INVERTED_WHEEL_EVENTS 1
+#endif
+
+#if PLATFORM(MAC)
+#define WTF_USE_COREAUDIO 1
+#endif
+
+#if !defined(WTF_USE_ZLIB) && !PLATFORM(QT)
+#define WTF_USE_ZLIB 1
+#endif
+
+#if PLATFORM(QT)
+#include <qglobal.h>
+#if defined(QT_OPENGL_ES_2) && !defined(WTF_USE_OPENGL_ES_2)
+#define WTF_USE_OPENGL_ES_2 1
+#endif
+#endif
+
+#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
+#define WTF_USE_CONTENT_FILTERING 1
+#endif
+
+#endif /* WTF_Platform_h */
diff --git a/src/3rdparty/masm/wtf/PossiblyNull.h b/src/3rdparty/masm/wtf/PossiblyNull.h
new file mode 100644
index 0000000000..46a7d713be
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PossiblyNull.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PossiblyNull_h
+#define PossiblyNull_h
+
+#include <wtf/Assertions.h>
+
+namespace WTF {
+
+template <typename T> struct PossiblyNull {
+ PossiblyNull(T data)
+ : m_data(data)
+ {
+ }
+ PossiblyNull(const PossiblyNull<T>& source)
+ : m_data(source.m_data)
+ {
+ source.m_data = 0;
+ }
+ ~PossiblyNull() { ASSERT(!m_data); }
+ bool getValue(T& out) WARN_UNUSED_RETURN;
+private:
+ mutable T m_data;
+};
+
+template <typename T> bool PossiblyNull<T>::getValue(T& out)
+{
+ out = m_data;
+ bool result = !!m_data;
+ m_data = 0;
+ return result;
+}
+
+}
+
+#endif
diff --git a/src/3rdparty/masm/wtf/PrintStream.cpp b/src/3rdparty/masm/wtf/PrintStream.cpp
new file mode 100644
index 0000000000..3bf362e281
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PrintStream.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PrintStream.h"
+
+#include <stdio.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+namespace WTF {
+
+PrintStream::PrintStream() { }
+PrintStream::~PrintStream() { } // Force the vtable to be in this module
+
+void PrintStream::printf(const char* format, ...)
+{
+ va_list argList;
+ va_start(argList, format);
+ vprintf(format, argList);
+ va_end(argList);
+}
+
+void PrintStream::flush()
+{
+}
+
+void printInternal(PrintStream& out, const char* string)
+{
+ out.printf("%s", string);
+}
+
+void printInternal(PrintStream& out, bool value)
+{
+ if (value)
+ out.print("true");
+ else
+ out.print("false");
+}
+
+void printInternal(PrintStream& out, int value)
+{
+ out.printf("%d", value);
+}
+
+void printInternal(PrintStream& out, unsigned value)
+{
+ out.printf("%u", value);
+}
+
+void printInternal(PrintStream& out, long value)
+{
+ out.printf("%ld", value);
+}
+
+void printInternal(PrintStream& out, unsigned long value)
+{
+ out.printf("%lu", value);
+}
+
+void printInternal(PrintStream& out, long long value)
+{
+ out.printf("%lld", value);
+}
+
+void printInternal(PrintStream& out, unsigned long long value)
+{
+ out.printf("%llu", value);
+}
+
+void printInternal(PrintStream& out, float value)
+{
+ out.print(static_cast<double>(value));
+}
+
+void printInternal(PrintStream& out, double value)
+{
+ out.printf("%lf", value);
+}
+
+void printInternal(PrintStream& out, RawPointer value)
+{
+ out.printf("%p", value.value());
+}
+
+void dumpCharacter(PrintStream& out, char value)
+{
+ out.printf("%c", value);
+}
+
+} // namespace WTF
+
diff --git a/src/3rdparty/masm/wtf/PrintStream.h b/src/3rdparty/masm/wtf/PrintStream.h
new file mode 100644
index 0000000000..6fcf9c1567
--- /dev/null
+++ b/src/3rdparty/masm/wtf/PrintStream.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PrintStream_h
+#define PrintStream_h
+
+#include <stdarg.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Platform.h>
+#include <wtf/RawPointer.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+class CString;
+class String;
+
+class PrintStream {
+ WTF_MAKE_FAST_ALLOCATED; WTF_MAKE_NONCOPYABLE(PrintStream);
+public:
+ PrintStream();
+ virtual ~PrintStream();
+
+ void printf(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+ virtual void vprintf(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(2, 0) = 0;
+
+ // Typically a no-op for many subclasses of PrintStream, this is a hint that
+ // the implementation should flush its buffers if it had not done so already.
+ virtual void flush();
+
+ template<typename T>
+ void print(const T& value)
+ {
+ printInternal(*this, value);
+ }
+
+ template<typename T1, typename T2>
+ void print(const T1& value1, const T2& value2)
+ {
+ print(value1);
+ print(value2);
+ }
+
+ template<typename T1, typename T2, typename T3>
+ void print(const T1& value1, const T2& value2, const T3& value3)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ print(value12);
+ }
+
+ template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13>
+ void print(const T1& value1, const T2& value2, const T3& value3, const T4& value4, const T5& value5, const T6& value6, const T7& value7, const T8& value8, const T9& value9, const T10& value10, const T11& value11, const T12& value12, const T13& value13)
+ {
+ print(value1);
+ print(value2);
+ print(value3);
+ print(value4);
+ print(value5);
+ print(value6);
+ print(value7);
+ print(value8);
+ print(value9);
+ print(value10);
+ print(value11);
+ print(value12);
+ print(value13);
+ }
+};
+
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, const char*);
+inline void printInternal(PrintStream& out, char* value) { printInternal(out, static_cast<const char*>(value)); }
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, bool);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, int);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, long long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, unsigned long long);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, float);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, double);
+WTF_EXPORT_PRIVATE void printInternal(PrintStream&, RawPointer);
+
+template<typename T>
+void printInternal(PrintStream& out, const T& value)
+{
+ value.dump(out);
+}
+
+#define MAKE_PRINT_ADAPTOR(Name, Type, function) \
+ class Name { \
+ public: \
+ Name(const Type& value) \
+ : m_value(value) \
+ { \
+ } \
+ void dump(PrintStream& out) const \
+ { \
+ function(out, m_value); \
+ } \
+ private: \
+ Type m_value; \
+ }
+
+#define MAKE_PRINT_METHOD_ADAPTOR(Name, Type, method) \
+ class Name { \
+ public: \
+ Name(const Type& value) \
+ : m_value(value) \
+ { \
+ } \
+ void dump(PrintStream& out) const \
+ { \
+ m_value.method(out); \
+ } \
+ private: \
+ const Type& m_value; \
+ }
+
+#define MAKE_PRINT_METHOD(Type, dumpMethod, method) \
+ MAKE_PRINT_METHOD_ADAPTOR(DumperFor_##method, Type, dumpMethod); \
+ DumperFor_##method method() const { return DumperFor_##method(*this); }
+
+// Use an adaptor-based dumper for characters to avoid situations where
+// you've "compressed" an integer to a character and it ends up printing
+// as ASCII when you wanted it to print as a number.
+void dumpCharacter(PrintStream&, char);
+MAKE_PRINT_ADAPTOR(CharacterDump, char, dumpCharacter);
+
+template<typename T>
+class PointerDump {
+public:
+ PointerDump(const T* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ void dump(PrintStream& out) const
+ {
+ if (m_ptr)
+ printInternal(out, *m_ptr);
+ else
+ out.print("(null)");
+ }
+private:
+ const T* m_ptr;
+};
+
+template<typename T>
+PointerDump<T> pointerDump(const T* ptr) { return PointerDump<T>(ptr); }
+
+} // namespace WTF
+
+using WTF::CharacterDump;
+using WTF::PointerDump;
+using WTF::PrintStream;
+using WTF::pointerDump;
+
+#endif // PrintStream_h
+
diff --git a/src/3rdparty/masm/wtf/RawPointer.h b/src/3rdparty/masm/wtf/RawPointer.h
new file mode 100644
index 0000000000..6dc7292fb4
--- /dev/null
+++ b/src/3rdparty/masm/wtf/RawPointer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RawPointer_h
+#define RawPointer_h
+
+namespace WTF {
+
+class RawPointer {
+public:
+ RawPointer()
+ : m_value(0)
+ {
+ }
+
+ explicit RawPointer(void* value)
+ : m_value(value)
+ {
+ }
+
+ explicit RawPointer(const void* value)
+ : m_value(value)
+ {
+ }
+
+ const void* value() const { return m_value; }
+
+private:
+ const void* m_value;
+};
+
+} // namespace WTF
+
+using WTF::RawPointer;
+
+#endif // RawPointer_h
diff --git a/src/3rdparty/masm/wtf/StdLibExtras.h b/src/3rdparty/masm/wtf/StdLibExtras.h
new file mode 100644
index 0000000000..605f98ec82
--- /dev/null
+++ b/src/3rdparty/masm/wtf/StdLibExtras.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_StdLibExtras_h
+#define WTF_StdLibExtras_h
+
+#include <wtf/Assertions.h>
+#include <wtf/CheckedArithmetic.h>
+
+// Use these to declare and define a static local variable (static T;) so that
+// it is leaked so that its destructors are not called at exit. Using this
+// macro also allows workarounds a compiler bug present in Apple's version of GCC 4.0.1.
+#ifndef DEFINE_STATIC_LOCAL
+#if COMPILER(GCC) && defined(__APPLE_CC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 0 && __GNUC_PATCHLEVEL__ == 1
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type* name##Ptr = new type arguments; \
+ type& name = *name##Ptr
+#else
+#define DEFINE_STATIC_LOCAL(type, name, arguments) \
+ static type& name = *new type arguments
+#endif
+#endif
+
+// Use this macro to declare and define a debug-only global variable that may have a
+// non-trivial constructor and destructor. When building with clang, this will suppress
+// warnings about global constructors and exit-time destructors.
+#ifndef NDEBUG
+#if COMPILER(CLANG)
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
+ _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
+ static type name arguments; \
+ _Pragma("clang diagnostic pop")
+#else
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) \
+ static type name arguments;
+#endif // COMPILER(CLANG)
+#else
+#define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments)
+#endif // NDEBUG
+
+// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes.
+// The magic number 0x4000 is insignificant. We use it to avoid using NULL, since
+// NULL can cause compiler problems, especially in cases of multiple inheritance.
+#define OBJECT_OFFSETOF(class, field) (reinterpret_cast<ptrdiff_t>(&(reinterpret_cast<class*>(0x4000)->field)) - 0x4000)
+
+// STRINGIZE: Can convert any value to quoted string, even expandable macros
+#define STRINGIZE(exp) #exp
+#define STRINGIZE_VALUE_OF(exp) STRINGIZE(exp)
+
+/*
+ * The reinterpret_cast<Type1*>([pointer to Type2]) expressions - where
+ * sizeof(Type1) > sizeof(Type2) - cause the following warning on ARM with GCC:
+ * increases required alignment of target type.
+ *
+ * An implicit or an extra static_cast<void*> bypasses the warning.
+ * For more info see the following bugzilla entries:
+ * - https://bugs.webkit.org/show_bug.cgi?id=38045
+ * - http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43976
+ */
+#if (CPU(ARM) || CPU(MIPS)) && COMPILER(GCC)
+template<typename Type>
+bool isPointerTypeAlignmentOkay(Type* ptr)
+{
+ return !(reinterpret_cast<intptr_t>(ptr) % __alignof__(Type));
+}
+
+template<typename TypePtr>
+TypePtr reinterpret_cast_ptr(void* ptr)
+{
+ ASSERT(isPointerTypeAlignmentOkay(reinterpret_cast<TypePtr>(ptr)));
+ return reinterpret_cast<TypePtr>(ptr);
+}
+
+template<typename TypePtr>
+TypePtr reinterpret_cast_ptr(const void* ptr)
+{
+ ASSERT(isPointerTypeAlignmentOkay(reinterpret_cast<TypePtr>(ptr)));
+ return reinterpret_cast<TypePtr>(ptr);
+}
+#else
+template<typename Type>
+bool isPointerTypeAlignmentOkay(Type*)
+{
+ return true;
+}
+#define reinterpret_cast_ptr reinterpret_cast
+#endif
+
+namespace WTF {
+
+static const size_t KB = 1024;
+static const size_t MB = 1024 * 1024;
+
+inline bool isPointerAligned(void* p)
+{
+ return !((intptr_t)(p) & (sizeof(char*) - 1));
+}
+
+inline bool is8ByteAligned(void* p)
+{
+ return !((uintptr_t)(p) & (sizeof(double) - 1));
+}
+
+/*
+ * C++'s idea of a reinterpret_cast lacks sufficient cojones.
+ */
+template<typename TO, typename FROM>
+inline TO bitwise_cast(FROM from)
+{
+ COMPILE_ASSERT(sizeof(TO) == sizeof(FROM), WTF_bitwise_cast_sizeof_casted_types_is_equal);
+ union {
+ FROM from;
+ TO to;
+ } u;
+ u.from = from;
+ return u.to;
+}
+
+template<typename To, typename From>
+inline To safeCast(From value)
+{
+ ASSERT(isInBounds<To>(value));
+ return static_cast<To>(value);
+}
+
+// Returns a count of the number of bits set in 'bits'.
+inline size_t bitCount(unsigned bits)
+{
+ bits = bits - ((bits >> 1) & 0x55555555);
+ bits = (bits & 0x33333333) + ((bits >> 2) & 0x33333333);
+ return (((bits + (bits >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
+}
+
+// Macro that returns a compile time constant with the length of an array, but gives an error if passed a non-array.
+template<typename T, size_t Size> char (&ArrayLengthHelperFunction(T (&)[Size]))[Size];
+// GCC needs some help to deduce a 0 length array.
+#if COMPILER(GCC)
+template<typename T> char (&ArrayLengthHelperFunction(T (&)[0]))[0];
+#endif
+#define WTF_ARRAY_LENGTH(array) sizeof(::WTF::ArrayLengthHelperFunction(array))
+
+// Efficient implementation that takes advantage of powers of two.
+inline size_t roundUpToMultipleOf(size_t divisor, size_t x)
+{
+ ASSERT(divisor && !(divisor & (divisor - 1)));
+ size_t remainderMask = divisor - 1;
+ return (x + remainderMask) & ~remainderMask;
+}
+template<size_t divisor> inline size_t roundUpToMultipleOf(size_t x)
+{
+ COMPILE_ASSERT(divisor && !(divisor & (divisor - 1)), divisor_is_a_power_of_two);
+ return roundUpToMultipleOf(divisor, x);
+}
+
+enum BinarySearchMode {
+ KeyMustBePresentInArray,
+ KeyMightNotBePresentInArray,
+ ReturnAdjacentElementIfKeyIsNotPresent
+};
+
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey, BinarySearchMode mode>
+inline ArrayElementType* binarySearchImpl(ArrayType& array, size_t size, KeyType key, const ExtractKey& extractKey = ExtractKey())
+{
+ size_t offset = 0;
+ while (size > 1) {
+ size_t pos = (size - 1) >> 1;
+ KeyType val = extractKey(&array[offset + pos]);
+
+ if (val == key)
+ return &array[offset + pos];
+ // The item we are looking for is smaller than the item being check; reduce the value of 'size',
+ // chopping off the right hand half of the array.
+ if (key < val)
+ size = pos;
+ // Discard all values in the left hand half of the array, up to and including the item at pos.
+ else {
+ size -= (pos + 1);
+ offset += (pos + 1);
+ }
+
+ ASSERT(mode != KeyMustBePresentInArray || size);
+ }
+
+ if (mode == KeyMightNotBePresentInArray && !size)
+ return 0;
+
+ ArrayElementType* result = &array[offset];
+
+ if (mode == KeyMightNotBePresentInArray && key != extractKey(result))
+ return 0;
+
+ if (mode == KeyMustBePresentInArray) {
+ ASSERT(size == 1);
+ ASSERT(key == extractKey(result));
+ }
+
+ return result;
+}
+
+// If the element is not found, crash if asserts are enabled, and behave like approximateBinarySearch in release builds.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* binarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMustBePresentInArray>(array, size, key, extractKey);
+}
+
+// Return zero if the element is not found.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* tryBinarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMightNotBePresentInArray>(array, size, key, extractKey);
+}
+
+// Return the element that is either to the left, or the right, of where the element would have been found.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* approximateBinarySearch(ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, ReturnAdjacentElementIfKeyIsNotPresent>(array, size, key, extractKey);
+}
+
+// Variants of the above that use const.
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* binarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMustBePresentInArray>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* tryBinarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, KeyMightNotBePresentInArray>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+template<typename ArrayElementType, typename KeyType, typename ArrayType, typename ExtractKey>
+inline ArrayElementType* approximateBinarySearch(const ArrayType& array, size_t size, KeyType key, ExtractKey extractKey = ExtractKey())
+{
+ return binarySearchImpl<ArrayElementType, KeyType, ArrayType, ExtractKey, ReturnAdjacentElementIfKeyIsNotPresent>(const_cast<ArrayType&>(array), size, key, extractKey);
+}
+
+} // namespace WTF
+
+// This version of placement new omits a 0 check.
+enum NotNullTag { NotNull };
+inline void* operator new(size_t, NotNullTag, void* location)
+{
+ ASSERT(location);
+ return location;
+}
+
+using WTF::KB;
+using WTF::MB;
+using WTF::isPointerAligned;
+using WTF::is8ByteAligned;
+using WTF::binarySearch;
+using WTF::tryBinarySearch;
+using WTF::approximateBinarySearch;
+using WTF::bitwise_cast;
+using WTF::safeCast;
+
+#endif // WTF_StdLibExtras_h
diff --git a/src/3rdparty/masm/wtf/VMTags.h b/src/3rdparty/masm/wtf/VMTags.h
new file mode 100644
index 0000000000..117bc3721e
--- /dev/null
+++ b/src/3rdparty/masm/wtf/VMTags.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef VMTags_h
+#define VMTags_h
+
+// On Mac OS X, the VM subsystem allows tagging memory requested from mmap and vm_map
+// in order to aid tools that inspect system memory use.
+#if OS(DARWIN)
+
+#include <mach/vm_statistics.h>
+
+#if defined(VM_MEMORY_TCMALLOC)
+#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(VM_MEMORY_TCMALLOC)
+#else
+#define VM_TAG_FOR_TCMALLOC_MEMORY VM_MAKE_TAG(53)
+#endif // defined(VM_MEMORY_TCMALLOC)
+
+#if defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+#else
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY VM_MAKE_TAG(64)
+#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR)
+
+#if defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+#else
+#define VM_TAG_FOR_REGISTERFILE_MEMORY VM_MAKE_TAG(65)
+#endif // defined(VM_MEMORY_JAVASCRIPT_JIT_REGISTER_FILE)
+
+#if defined(VM_MEMORY_JAVASCRIPT_CORE)
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE)
+#else
+#define VM_TAG_FOR_COLLECTOR_MEMORY VM_MAKE_TAG(63)
+#endif // defined(VM_MEMORY_JAVASCRIPT_CORE)
+
+#if defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+#else
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY VM_MAKE_TAG(69)
+#endif // defined(VM_MEMORY_WEBCORE_PURGEABLE_BUFFERS)
+
+#else // OS(DARWIN)
+
+#define VM_TAG_FOR_TCMALLOC_MEMORY -1
+#define VM_TAG_FOR_COLLECTOR_MEMORY -1
+#define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1
+#define VM_TAG_FOR_REGISTERFILE_MEMORY -1
+#define VM_TAG_FOR_WEBCORE_PURGEABLE_MEMORY -1
+
+#endif // OS(DARWIN)
+
+#endif // VMTags_h
diff --git a/src/3rdparty/masm/yarr/Yarr.h b/src/3rdparty/masm/yarr/Yarr.h
new file mode 100644
index 0000000000..d393e9fa90
--- /dev/null
+++ b/src/3rdparty/masm/yarr/Yarr.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Yarr_h
+#define Yarr_h
+
+#include "YarrInterpreter.h"
+#include "YarrPattern.h"
+
+namespace JSC { namespace Yarr {
+
+#define YarrStackSpaceForBackTrackInfoPatternCharacter 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoCharacterClass 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoBackReference 2
+#define YarrStackSpaceForBackTrackInfoAlternative 1 // One per alternative.
+#define YarrStackSpaceForBackTrackInfoParentheticalAssertion 1
+#define YarrStackSpaceForBackTrackInfoParenthesesOnce 1 // Only for !fixed quantifiers.
+#define YarrStackSpaceForBackTrackInfoParenthesesTerminal 1
+#define YarrStackSpaceForBackTrackInfoParentheses 2
+
+static const unsigned quantifyInfinite = UINT_MAX;
+static const unsigned offsetNoMatch = (unsigned)-1;
+
+// The below limit restricts the number of "recursive" match calls in order to
+// avoid spending exponential time on complex regular expressions.
+static const unsigned matchLimit = 1000000;
+
+enum JSRegExpResult {
+ JSRegExpMatch = 1,
+ JSRegExpNoMatch = 0,
+ JSRegExpErrorNoMatch = -1,
+ JSRegExpErrorHitLimit = -2,
+ JSRegExpErrorNoMemory = -3,
+ JSRegExpErrorInternal = -4
+};
+
+enum YarrCharSize {
+ Char8,
+ Char16
+};
+
+} } // namespace JSC::Yarr
+
+#endif // Yarr_h
+
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp
new file mode 100644
index 0000000000..7bb3d08eb5
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// DO NOT EDIT! - this file autogenerated by YarrCanonicalizeUCS2.js
+
+#include "config.h"
+#include "YarrCanonicalizeUCS2.h"
+
+namespace JSC { namespace Yarr {
+
+#include <stdint.h>
+
+uint16_t ucs2CharacterSet0[] = { 0x01c4u, 0x01c5u, 0x01c6u, 0 };
+uint16_t ucs2CharacterSet1[] = { 0x01c7u, 0x01c8u, 0x01c9u, 0 };
+uint16_t ucs2CharacterSet2[] = { 0x01cau, 0x01cbu, 0x01ccu, 0 };
+uint16_t ucs2CharacterSet3[] = { 0x01f1u, 0x01f2u, 0x01f3u, 0 };
+uint16_t ucs2CharacterSet4[] = { 0x0392u, 0x03b2u, 0x03d0u, 0 };
+uint16_t ucs2CharacterSet5[] = { 0x0395u, 0x03b5u, 0x03f5u, 0 };
+uint16_t ucs2CharacterSet6[] = { 0x0398u, 0x03b8u, 0x03d1u, 0 };
+uint16_t ucs2CharacterSet7[] = { 0x0345u, 0x0399u, 0x03b9u, 0x1fbeu, 0 };
+uint16_t ucs2CharacterSet8[] = { 0x039au, 0x03bau, 0x03f0u, 0 };
+uint16_t ucs2CharacterSet9[] = { 0x00b5u, 0x039cu, 0x03bcu, 0 };
+uint16_t ucs2CharacterSet10[] = { 0x03a0u, 0x03c0u, 0x03d6u, 0 };
+uint16_t ucs2CharacterSet11[] = { 0x03a1u, 0x03c1u, 0x03f1u, 0 };
+uint16_t ucs2CharacterSet12[] = { 0x03a3u, 0x03c2u, 0x03c3u, 0 };
+uint16_t ucs2CharacterSet13[] = { 0x03a6u, 0x03c6u, 0x03d5u, 0 };
+uint16_t ucs2CharacterSet14[] = { 0x1e60u, 0x1e61u, 0x1e9bu, 0 };
+
+static const size_t UCS2_CANONICALIZATION_SETS = 15;
+uint16_t* characterSetInfo[UCS2_CANONICALIZATION_SETS] = {
+ ucs2CharacterSet0,
+ ucs2CharacterSet1,
+ ucs2CharacterSet2,
+ ucs2CharacterSet3,
+ ucs2CharacterSet4,
+ ucs2CharacterSet5,
+ ucs2CharacterSet6,
+ ucs2CharacterSet7,
+ ucs2CharacterSet8,
+ ucs2CharacterSet9,
+ ucs2CharacterSet10,
+ ucs2CharacterSet11,
+ ucs2CharacterSet12,
+ ucs2CharacterSet13,
+ ucs2CharacterSet14,
+};
+
+const size_t UCS2_CANONICALIZATION_RANGES = 364;
+UCS2CanonicalizationRange rangeInfo[UCS2_CANONICALIZATION_RANGES] = {
+ { 0x0000u, 0x0040u, 0x0000u, CanonicalizeUnique },
+ { 0x0041u, 0x005au, 0x0020u, CanonicalizeRangeLo },
+ { 0x005bu, 0x0060u, 0x0000u, CanonicalizeUnique },
+ { 0x0061u, 0x007au, 0x0020u, CanonicalizeRangeHi },
+ { 0x007bu, 0x00b4u, 0x0000u, CanonicalizeUnique },
+ { 0x00b5u, 0x00b5u, 0x0009u, CanonicalizeSet },
+ { 0x00b6u, 0x00bfu, 0x0000u, CanonicalizeUnique },
+ { 0x00c0u, 0x00d6u, 0x0020u, CanonicalizeRangeLo },
+ { 0x00d7u, 0x00d7u, 0x0000u, CanonicalizeUnique },
+ { 0x00d8u, 0x00deu, 0x0020u, CanonicalizeRangeLo },
+ { 0x00dfu, 0x00dfu, 0x0000u, CanonicalizeUnique },
+ { 0x00e0u, 0x00f6u, 0x0020u, CanonicalizeRangeHi },
+ { 0x00f7u, 0x00f7u, 0x0000u, CanonicalizeUnique },
+ { 0x00f8u, 0x00feu, 0x0020u, CanonicalizeRangeHi },
+ { 0x00ffu, 0x00ffu, 0x0079u, CanonicalizeRangeLo },
+ { 0x0100u, 0x012fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0130u, 0x0131u, 0x0000u, CanonicalizeUnique },
+ { 0x0132u, 0x0137u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0138u, 0x0138u, 0x0000u, CanonicalizeUnique },
+ { 0x0139u, 0x0148u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0149u, 0x0149u, 0x0000u, CanonicalizeUnique },
+ { 0x014au, 0x0177u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0178u, 0x0178u, 0x0079u, CanonicalizeRangeHi },
+ { 0x0179u, 0x017eu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x017fu, 0x017fu, 0x0000u, CanonicalizeUnique },
+ { 0x0180u, 0x0180u, 0x00c3u, CanonicalizeRangeLo },
+ { 0x0181u, 0x0181u, 0x00d2u, CanonicalizeRangeLo },
+ { 0x0182u, 0x0185u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0186u, 0x0186u, 0x00ceu, CanonicalizeRangeLo },
+ { 0x0187u, 0x0188u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0189u, 0x018au, 0x00cdu, CanonicalizeRangeLo },
+ { 0x018bu, 0x018cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x018du, 0x018du, 0x0000u, CanonicalizeUnique },
+ { 0x018eu, 0x018eu, 0x004fu, CanonicalizeRangeLo },
+ { 0x018fu, 0x018fu, 0x00cau, CanonicalizeRangeLo },
+ { 0x0190u, 0x0190u, 0x00cbu, CanonicalizeRangeLo },
+ { 0x0191u, 0x0192u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0193u, 0x0193u, 0x00cdu, CanonicalizeRangeLo },
+ { 0x0194u, 0x0194u, 0x00cfu, CanonicalizeRangeLo },
+ { 0x0195u, 0x0195u, 0x0061u, CanonicalizeRangeLo },
+ { 0x0196u, 0x0196u, 0x00d3u, CanonicalizeRangeLo },
+ { 0x0197u, 0x0197u, 0x00d1u, CanonicalizeRangeLo },
+ { 0x0198u, 0x0199u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x019au, 0x019au, 0x00a3u, CanonicalizeRangeLo },
+ { 0x019bu, 0x019bu, 0x0000u, CanonicalizeUnique },
+ { 0x019cu, 0x019cu, 0x00d3u, CanonicalizeRangeLo },
+ { 0x019du, 0x019du, 0x00d5u, CanonicalizeRangeLo },
+ { 0x019eu, 0x019eu, 0x0082u, CanonicalizeRangeLo },
+ { 0x019fu, 0x019fu, 0x00d6u, CanonicalizeRangeLo },
+ { 0x01a0u, 0x01a5u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01a6u, 0x01a6u, 0x00dau, CanonicalizeRangeLo },
+ { 0x01a7u, 0x01a8u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01a9u, 0x01a9u, 0x00dau, CanonicalizeRangeLo },
+ { 0x01aau, 0x01abu, 0x0000u, CanonicalizeUnique },
+ { 0x01acu, 0x01adu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01aeu, 0x01aeu, 0x00dau, CanonicalizeRangeLo },
+ { 0x01afu, 0x01b0u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01b1u, 0x01b2u, 0x00d9u, CanonicalizeRangeLo },
+ { 0x01b3u, 0x01b6u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01b7u, 0x01b7u, 0x00dbu, CanonicalizeRangeLo },
+ { 0x01b8u, 0x01b9u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01bau, 0x01bbu, 0x0000u, CanonicalizeUnique },
+ { 0x01bcu, 0x01bdu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01beu, 0x01beu, 0x0000u, CanonicalizeUnique },
+ { 0x01bfu, 0x01bfu, 0x0038u, CanonicalizeRangeLo },
+ { 0x01c0u, 0x01c3u, 0x0000u, CanonicalizeUnique },
+ { 0x01c4u, 0x01c6u, 0x0000u, CanonicalizeSet },
+ { 0x01c7u, 0x01c9u, 0x0001u, CanonicalizeSet },
+ { 0x01cau, 0x01ccu, 0x0002u, CanonicalizeSet },
+ { 0x01cdu, 0x01dcu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x01ddu, 0x01ddu, 0x004fu, CanonicalizeRangeHi },
+ { 0x01deu, 0x01efu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01f0u, 0x01f0u, 0x0000u, CanonicalizeUnique },
+ { 0x01f1u, 0x01f3u, 0x0003u, CanonicalizeSet },
+ { 0x01f4u, 0x01f5u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x01f6u, 0x01f6u, 0x0061u, CanonicalizeRangeHi },
+ { 0x01f7u, 0x01f7u, 0x0038u, CanonicalizeRangeHi },
+ { 0x01f8u, 0x021fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0220u, 0x0220u, 0x0082u, CanonicalizeRangeHi },
+ { 0x0221u, 0x0221u, 0x0000u, CanonicalizeUnique },
+ { 0x0222u, 0x0233u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0234u, 0x0239u, 0x0000u, CanonicalizeUnique },
+ { 0x023au, 0x023au, 0x2a2bu, CanonicalizeRangeLo },
+ { 0x023bu, 0x023cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x023du, 0x023du, 0x00a3u, CanonicalizeRangeHi },
+ { 0x023eu, 0x023eu, 0x2a28u, CanonicalizeRangeLo },
+ { 0x023fu, 0x0240u, 0x2a3fu, CanonicalizeRangeLo },
+ { 0x0241u, 0x0242u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x0243u, 0x0243u, 0x00c3u, CanonicalizeRangeHi },
+ { 0x0244u, 0x0244u, 0x0045u, CanonicalizeRangeLo },
+ { 0x0245u, 0x0245u, 0x0047u, CanonicalizeRangeLo },
+ { 0x0246u, 0x024fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0250u, 0x0250u, 0x2a1fu, CanonicalizeRangeLo },
+ { 0x0251u, 0x0251u, 0x2a1cu, CanonicalizeRangeLo },
+ { 0x0252u, 0x0252u, 0x2a1eu, CanonicalizeRangeLo },
+ { 0x0253u, 0x0253u, 0x00d2u, CanonicalizeRangeHi },
+ { 0x0254u, 0x0254u, 0x00ceu, CanonicalizeRangeHi },
+ { 0x0255u, 0x0255u, 0x0000u, CanonicalizeUnique },
+ { 0x0256u, 0x0257u, 0x00cdu, CanonicalizeRangeHi },
+ { 0x0258u, 0x0258u, 0x0000u, CanonicalizeUnique },
+ { 0x0259u, 0x0259u, 0x00cau, CanonicalizeRangeHi },
+ { 0x025au, 0x025au, 0x0000u, CanonicalizeUnique },
+ { 0x025bu, 0x025bu, 0x00cbu, CanonicalizeRangeHi },
+ { 0x025cu, 0x025fu, 0x0000u, CanonicalizeUnique },
+ { 0x0260u, 0x0260u, 0x00cdu, CanonicalizeRangeHi },
+ { 0x0261u, 0x0262u, 0x0000u, CanonicalizeUnique },
+ { 0x0263u, 0x0263u, 0x00cfu, CanonicalizeRangeHi },
+ { 0x0264u, 0x0264u, 0x0000u, CanonicalizeUnique },
+ { 0x0265u, 0x0265u, 0xa528u, CanonicalizeRangeLo },
+ { 0x0266u, 0x0267u, 0x0000u, CanonicalizeUnique },
+ { 0x0268u, 0x0268u, 0x00d1u, CanonicalizeRangeHi },
+ { 0x0269u, 0x0269u, 0x00d3u, CanonicalizeRangeHi },
+ { 0x026au, 0x026au, 0x0000u, CanonicalizeUnique },
+ { 0x026bu, 0x026bu, 0x29f7u, CanonicalizeRangeLo },
+ { 0x026cu, 0x026eu, 0x0000u, CanonicalizeUnique },
+ { 0x026fu, 0x026fu, 0x00d3u, CanonicalizeRangeHi },
+ { 0x0270u, 0x0270u, 0x0000u, CanonicalizeUnique },
+ { 0x0271u, 0x0271u, 0x29fdu, CanonicalizeRangeLo },
+ { 0x0272u, 0x0272u, 0x00d5u, CanonicalizeRangeHi },
+ { 0x0273u, 0x0274u, 0x0000u, CanonicalizeUnique },
+ { 0x0275u, 0x0275u, 0x00d6u, CanonicalizeRangeHi },
+ { 0x0276u, 0x027cu, 0x0000u, CanonicalizeUnique },
+ { 0x027du, 0x027du, 0x29e7u, CanonicalizeRangeLo },
+ { 0x027eu, 0x027fu, 0x0000u, CanonicalizeUnique },
+ { 0x0280u, 0x0280u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0281u, 0x0282u, 0x0000u, CanonicalizeUnique },
+ { 0x0283u, 0x0283u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0284u, 0x0287u, 0x0000u, CanonicalizeUnique },
+ { 0x0288u, 0x0288u, 0x00dau, CanonicalizeRangeHi },
+ { 0x0289u, 0x0289u, 0x0045u, CanonicalizeRangeHi },
+ { 0x028au, 0x028bu, 0x00d9u, CanonicalizeRangeHi },
+ { 0x028cu, 0x028cu, 0x0047u, CanonicalizeRangeHi },
+ { 0x028du, 0x0291u, 0x0000u, CanonicalizeUnique },
+ { 0x0292u, 0x0292u, 0x00dbu, CanonicalizeRangeHi },
+ { 0x0293u, 0x0344u, 0x0000u, CanonicalizeUnique },
+ { 0x0345u, 0x0345u, 0x0007u, CanonicalizeSet },
+ { 0x0346u, 0x036fu, 0x0000u, CanonicalizeUnique },
+ { 0x0370u, 0x0373u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0374u, 0x0375u, 0x0000u, CanonicalizeUnique },
+ { 0x0376u, 0x0377u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0378u, 0x037au, 0x0000u, CanonicalizeUnique },
+ { 0x037bu, 0x037du, 0x0082u, CanonicalizeRangeLo },
+ { 0x037eu, 0x0385u, 0x0000u, CanonicalizeUnique },
+ { 0x0386u, 0x0386u, 0x0026u, CanonicalizeRangeLo },
+ { 0x0387u, 0x0387u, 0x0000u, CanonicalizeUnique },
+ { 0x0388u, 0x038au, 0x0025u, CanonicalizeRangeLo },
+ { 0x038bu, 0x038bu, 0x0000u, CanonicalizeUnique },
+ { 0x038cu, 0x038cu, 0x0040u, CanonicalizeRangeLo },
+ { 0x038du, 0x038du, 0x0000u, CanonicalizeUnique },
+ { 0x038eu, 0x038fu, 0x003fu, CanonicalizeRangeLo },
+ { 0x0390u, 0x0390u, 0x0000u, CanonicalizeUnique },
+ { 0x0391u, 0x0391u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0392u, 0x0392u, 0x0004u, CanonicalizeSet },
+ { 0x0393u, 0x0394u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0395u, 0x0395u, 0x0005u, CanonicalizeSet },
+ { 0x0396u, 0x0397u, 0x0020u, CanonicalizeRangeLo },
+ { 0x0398u, 0x0398u, 0x0006u, CanonicalizeSet },
+ { 0x0399u, 0x0399u, 0x0007u, CanonicalizeSet },
+ { 0x039au, 0x039au, 0x0008u, CanonicalizeSet },
+ { 0x039bu, 0x039bu, 0x0020u, CanonicalizeRangeLo },
+ { 0x039cu, 0x039cu, 0x0009u, CanonicalizeSet },
+ { 0x039du, 0x039fu, 0x0020u, CanonicalizeRangeLo },
+ { 0x03a0u, 0x03a0u, 0x000au, CanonicalizeSet },
+ { 0x03a1u, 0x03a1u, 0x000bu, CanonicalizeSet },
+ { 0x03a2u, 0x03a2u, 0x0000u, CanonicalizeUnique },
+ { 0x03a3u, 0x03a3u, 0x000cu, CanonicalizeSet },
+ { 0x03a4u, 0x03a5u, 0x0020u, CanonicalizeRangeLo },
+ { 0x03a6u, 0x03a6u, 0x000du, CanonicalizeSet },
+ { 0x03a7u, 0x03abu, 0x0020u, CanonicalizeRangeLo },
+ { 0x03acu, 0x03acu, 0x0026u, CanonicalizeRangeHi },
+ { 0x03adu, 0x03afu, 0x0025u, CanonicalizeRangeHi },
+ { 0x03b0u, 0x03b0u, 0x0000u, CanonicalizeUnique },
+ { 0x03b1u, 0x03b1u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b2u, 0x03b2u, 0x0004u, CanonicalizeSet },
+ { 0x03b3u, 0x03b4u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b5u, 0x03b5u, 0x0005u, CanonicalizeSet },
+ { 0x03b6u, 0x03b7u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03b8u, 0x03b8u, 0x0006u, CanonicalizeSet },
+ { 0x03b9u, 0x03b9u, 0x0007u, CanonicalizeSet },
+ { 0x03bau, 0x03bau, 0x0008u, CanonicalizeSet },
+ { 0x03bbu, 0x03bbu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03bcu, 0x03bcu, 0x0009u, CanonicalizeSet },
+ { 0x03bdu, 0x03bfu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03c0u, 0x03c0u, 0x000au, CanonicalizeSet },
+ { 0x03c1u, 0x03c1u, 0x000bu, CanonicalizeSet },
+ { 0x03c2u, 0x03c3u, 0x000cu, CanonicalizeSet },
+ { 0x03c4u, 0x03c5u, 0x0020u, CanonicalizeRangeHi },
+ { 0x03c6u, 0x03c6u, 0x000du, CanonicalizeSet },
+ { 0x03c7u, 0x03cbu, 0x0020u, CanonicalizeRangeHi },
+ { 0x03ccu, 0x03ccu, 0x0040u, CanonicalizeRangeHi },
+ { 0x03cdu, 0x03ceu, 0x003fu, CanonicalizeRangeHi },
+ { 0x03cfu, 0x03cfu, 0x0008u, CanonicalizeRangeLo },
+ { 0x03d0u, 0x03d0u, 0x0004u, CanonicalizeSet },
+ { 0x03d1u, 0x03d1u, 0x0006u, CanonicalizeSet },
+ { 0x03d2u, 0x03d4u, 0x0000u, CanonicalizeUnique },
+ { 0x03d5u, 0x03d5u, 0x000du, CanonicalizeSet },
+ { 0x03d6u, 0x03d6u, 0x000au, CanonicalizeSet },
+ { 0x03d7u, 0x03d7u, 0x0008u, CanonicalizeRangeHi },
+ { 0x03d8u, 0x03efu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x03f0u, 0x03f0u, 0x0008u, CanonicalizeSet },
+ { 0x03f1u, 0x03f1u, 0x000bu, CanonicalizeSet },
+ { 0x03f2u, 0x03f2u, 0x0007u, CanonicalizeRangeLo },
+ { 0x03f3u, 0x03f4u, 0x0000u, CanonicalizeUnique },
+ { 0x03f5u, 0x03f5u, 0x0005u, CanonicalizeSet },
+ { 0x03f6u, 0x03f6u, 0x0000u, CanonicalizeUnique },
+ { 0x03f7u, 0x03f8u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x03f9u, 0x03f9u, 0x0007u, CanonicalizeRangeHi },
+ { 0x03fau, 0x03fbu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x03fcu, 0x03fcu, 0x0000u, CanonicalizeUnique },
+ { 0x03fdu, 0x03ffu, 0x0082u, CanonicalizeRangeHi },
+ { 0x0400u, 0x040fu, 0x0050u, CanonicalizeRangeLo },
+ { 0x0410u, 0x042fu, 0x0020u, CanonicalizeRangeLo },
+ { 0x0430u, 0x044fu, 0x0020u, CanonicalizeRangeHi },
+ { 0x0450u, 0x045fu, 0x0050u, CanonicalizeRangeHi },
+ { 0x0460u, 0x0481u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0482u, 0x0489u, 0x0000u, CanonicalizeUnique },
+ { 0x048au, 0x04bfu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x04c0u, 0x04c0u, 0x000fu, CanonicalizeRangeLo },
+ { 0x04c1u, 0x04ceu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x04cfu, 0x04cfu, 0x000fu, CanonicalizeRangeHi },
+ { 0x04d0u, 0x0527u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x0528u, 0x0530u, 0x0000u, CanonicalizeUnique },
+ { 0x0531u, 0x0556u, 0x0030u, CanonicalizeRangeLo },
+ { 0x0557u, 0x0560u, 0x0000u, CanonicalizeUnique },
+ { 0x0561u, 0x0586u, 0x0030u, CanonicalizeRangeHi },
+ { 0x0587u, 0x109fu, 0x0000u, CanonicalizeUnique },
+ { 0x10a0u, 0x10c5u, 0x1c60u, CanonicalizeRangeLo },
+ { 0x10c6u, 0x1d78u, 0x0000u, CanonicalizeUnique },
+ { 0x1d79u, 0x1d79u, 0x8a04u, CanonicalizeRangeLo },
+ { 0x1d7au, 0x1d7cu, 0x0000u, CanonicalizeUnique },
+ { 0x1d7du, 0x1d7du, 0x0ee6u, CanonicalizeRangeLo },
+ { 0x1d7eu, 0x1dffu, 0x0000u, CanonicalizeUnique },
+ { 0x1e00u, 0x1e5fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1e60u, 0x1e61u, 0x000eu, CanonicalizeSet },
+ { 0x1e62u, 0x1e95u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1e96u, 0x1e9au, 0x0000u, CanonicalizeUnique },
+ { 0x1e9bu, 0x1e9bu, 0x000eu, CanonicalizeSet },
+ { 0x1e9cu, 0x1e9fu, 0x0000u, CanonicalizeUnique },
+ { 0x1ea0u, 0x1effu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x1f00u, 0x1f07u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f08u, 0x1f0fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f10u, 0x1f15u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f16u, 0x1f17u, 0x0000u, CanonicalizeUnique },
+ { 0x1f18u, 0x1f1du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f1eu, 0x1f1fu, 0x0000u, CanonicalizeUnique },
+ { 0x1f20u, 0x1f27u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f28u, 0x1f2fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f30u, 0x1f37u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f38u, 0x1f3fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f40u, 0x1f45u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f46u, 0x1f47u, 0x0000u, CanonicalizeUnique },
+ { 0x1f48u, 0x1f4du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f4eu, 0x1f50u, 0x0000u, CanonicalizeUnique },
+ { 0x1f51u, 0x1f51u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f52u, 0x1f52u, 0x0000u, CanonicalizeUnique },
+ { 0x1f53u, 0x1f53u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f54u, 0x1f54u, 0x0000u, CanonicalizeUnique },
+ { 0x1f55u, 0x1f55u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f56u, 0x1f56u, 0x0000u, CanonicalizeUnique },
+ { 0x1f57u, 0x1f57u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f58u, 0x1f58u, 0x0000u, CanonicalizeUnique },
+ { 0x1f59u, 0x1f59u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5au, 0x1f5au, 0x0000u, CanonicalizeUnique },
+ { 0x1f5bu, 0x1f5bu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5cu, 0x1f5cu, 0x0000u, CanonicalizeUnique },
+ { 0x1f5du, 0x1f5du, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f5eu, 0x1f5eu, 0x0000u, CanonicalizeUnique },
+ { 0x1f5fu, 0x1f5fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f60u, 0x1f67u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1f68u, 0x1f6fu, 0x0008u, CanonicalizeRangeHi },
+ { 0x1f70u, 0x1f71u, 0x004au, CanonicalizeRangeLo },
+ { 0x1f72u, 0x1f75u, 0x0056u, CanonicalizeRangeLo },
+ { 0x1f76u, 0x1f77u, 0x0064u, CanonicalizeRangeLo },
+ { 0x1f78u, 0x1f79u, 0x0080u, CanonicalizeRangeLo },
+ { 0x1f7au, 0x1f7bu, 0x0070u, CanonicalizeRangeLo },
+ { 0x1f7cu, 0x1f7du, 0x007eu, CanonicalizeRangeLo },
+ { 0x1f7eu, 0x1fafu, 0x0000u, CanonicalizeUnique },
+ { 0x1fb0u, 0x1fb1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fb2u, 0x1fb7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fb8u, 0x1fb9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1fbau, 0x1fbbu, 0x004au, CanonicalizeRangeHi },
+ { 0x1fbcu, 0x1fbdu, 0x0000u, CanonicalizeUnique },
+ { 0x1fbeu, 0x1fbeu, 0x0007u, CanonicalizeSet },
+ { 0x1fbfu, 0x1fc7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fc8u, 0x1fcbu, 0x0056u, CanonicalizeRangeHi },
+ { 0x1fccu, 0x1fcfu, 0x0000u, CanonicalizeUnique },
+ { 0x1fd0u, 0x1fd1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fd2u, 0x1fd7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fd8u, 0x1fd9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1fdau, 0x1fdbu, 0x0064u, CanonicalizeRangeHi },
+ { 0x1fdcu, 0x1fdfu, 0x0000u, CanonicalizeUnique },
+ { 0x1fe0u, 0x1fe1u, 0x0008u, CanonicalizeRangeLo },
+ { 0x1fe2u, 0x1fe4u, 0x0000u, CanonicalizeUnique },
+ { 0x1fe5u, 0x1fe5u, 0x0007u, CanonicalizeRangeLo },
+ { 0x1fe6u, 0x1fe7u, 0x0000u, CanonicalizeUnique },
+ { 0x1fe8u, 0x1fe9u, 0x0008u, CanonicalizeRangeHi },
+ { 0x1feau, 0x1febu, 0x0070u, CanonicalizeRangeHi },
+ { 0x1fecu, 0x1fecu, 0x0007u, CanonicalizeRangeHi },
+ { 0x1fedu, 0x1ff7u, 0x0000u, CanonicalizeUnique },
+ { 0x1ff8u, 0x1ff9u, 0x0080u, CanonicalizeRangeHi },
+ { 0x1ffau, 0x1ffbu, 0x007eu, CanonicalizeRangeHi },
+ { 0x1ffcu, 0x2131u, 0x0000u, CanonicalizeUnique },
+ { 0x2132u, 0x2132u, 0x001cu, CanonicalizeRangeLo },
+ { 0x2133u, 0x214du, 0x0000u, CanonicalizeUnique },
+ { 0x214eu, 0x214eu, 0x001cu, CanonicalizeRangeHi },
+ { 0x214fu, 0x215fu, 0x0000u, CanonicalizeUnique },
+ { 0x2160u, 0x216fu, 0x0010u, CanonicalizeRangeLo },
+ { 0x2170u, 0x217fu, 0x0010u, CanonicalizeRangeHi },
+ { 0x2180u, 0x2182u, 0x0000u, CanonicalizeUnique },
+ { 0x2183u, 0x2184u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2185u, 0x24b5u, 0x0000u, CanonicalizeUnique },
+ { 0x24b6u, 0x24cfu, 0x001au, CanonicalizeRangeLo },
+ { 0x24d0u, 0x24e9u, 0x001au, CanonicalizeRangeHi },
+ { 0x24eau, 0x2bffu, 0x0000u, CanonicalizeUnique },
+ { 0x2c00u, 0x2c2eu, 0x0030u, CanonicalizeRangeLo },
+ { 0x2c2fu, 0x2c2fu, 0x0000u, CanonicalizeUnique },
+ { 0x2c30u, 0x2c5eu, 0x0030u, CanonicalizeRangeHi },
+ { 0x2c5fu, 0x2c5fu, 0x0000u, CanonicalizeUnique },
+ { 0x2c60u, 0x2c61u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2c62u, 0x2c62u, 0x29f7u, CanonicalizeRangeHi },
+ { 0x2c63u, 0x2c63u, 0x0ee6u, CanonicalizeRangeHi },
+ { 0x2c64u, 0x2c64u, 0x29e7u, CanonicalizeRangeHi },
+ { 0x2c65u, 0x2c65u, 0x2a2bu, CanonicalizeRangeHi },
+ { 0x2c66u, 0x2c66u, 0x2a28u, CanonicalizeRangeHi },
+ { 0x2c67u, 0x2c6cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2c6du, 0x2c6du, 0x2a1cu, CanonicalizeRangeHi },
+ { 0x2c6eu, 0x2c6eu, 0x29fdu, CanonicalizeRangeHi },
+ { 0x2c6fu, 0x2c6fu, 0x2a1fu, CanonicalizeRangeHi },
+ { 0x2c70u, 0x2c70u, 0x2a1eu, CanonicalizeRangeHi },
+ { 0x2c71u, 0x2c71u, 0x0000u, CanonicalizeUnique },
+ { 0x2c72u, 0x2c73u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2c74u, 0x2c74u, 0x0000u, CanonicalizeUnique },
+ { 0x2c75u, 0x2c76u, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2c77u, 0x2c7du, 0x0000u, CanonicalizeUnique },
+ { 0x2c7eu, 0x2c7fu, 0x2a3fu, CanonicalizeRangeHi },
+ { 0x2c80u, 0x2ce3u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0x2ce4u, 0x2ceau, 0x0000u, CanonicalizeUnique },
+ { 0x2cebu, 0x2ceeu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0x2cefu, 0x2cffu, 0x0000u, CanonicalizeUnique },
+ { 0x2d00u, 0x2d25u, 0x1c60u, CanonicalizeRangeHi },
+ { 0x2d26u, 0xa63fu, 0x0000u, CanonicalizeUnique },
+ { 0xa640u, 0xa66du, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa66eu, 0xa67fu, 0x0000u, CanonicalizeUnique },
+ { 0xa680u, 0xa697u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa698u, 0xa721u, 0x0000u, CanonicalizeUnique },
+ { 0xa722u, 0xa72fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa730u, 0xa731u, 0x0000u, CanonicalizeUnique },
+ { 0xa732u, 0xa76fu, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa770u, 0xa778u, 0x0000u, CanonicalizeUnique },
+ { 0xa779u, 0xa77cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0xa77du, 0xa77du, 0x8a04u, CanonicalizeRangeHi },
+ { 0xa77eu, 0xa787u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa788u, 0xa78au, 0x0000u, CanonicalizeUnique },
+ { 0xa78bu, 0xa78cu, 0x0000u, CanonicalizeAlternatingUnaligned },
+ { 0xa78du, 0xa78du, 0xa528u, CanonicalizeRangeHi },
+ { 0xa78eu, 0xa78fu, 0x0000u, CanonicalizeUnique },
+ { 0xa790u, 0xa791u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa792u, 0xa79fu, 0x0000u, CanonicalizeUnique },
+ { 0xa7a0u, 0xa7a9u, 0x0000u, CanonicalizeAlternatingAligned },
+ { 0xa7aau, 0xff20u, 0x0000u, CanonicalizeUnique },
+ { 0xff21u, 0xff3au, 0x0020u, CanonicalizeRangeLo },
+ { 0xff3bu, 0xff40u, 0x0000u, CanonicalizeUnique },
+ { 0xff41u, 0xff5au, 0x0020u, CanonicalizeRangeHi },
+ { 0xff5bu, 0xffffu, 0x0000u, CanonicalizeUnique },
+};
+
+const size_t LATIN_CANONICALIZATION_RANGES = 20;
+LatinCanonicalizationRange latinRangeInfo[LATIN_CANONICALIZATION_RANGES] = {
+ { 0x0000u, 0x0040u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0041u, 0x005au, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x005bu, 0x0060u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0061u, 0x007au, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x007bu, 0x00bfu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00c0u, 0x00d6u, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00d7u, 0x00d7u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00d8u, 0x00deu, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00dfu, 0x00dfu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00e0u, 0x00f6u, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00f7u, 0x00f7u, 0x0000u, CanonicalizeLatinSelf },
+ { 0x00f8u, 0x00feu, 0x0000u, CanonicalizeLatinMask0x20 },
+ { 0x00ffu, 0x00ffu, 0x0000u, CanonicalizeLatinSelf },
+ { 0x0100u, 0x0177u, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x0178u, 0x0178u, 0x00ffu, CanonicalizeLatinOther },
+ { 0x0179u, 0x039bu, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x039cu, 0x039cu, 0x00b5u, CanonicalizeLatinOther },
+ { 0x039du, 0x03bbu, 0x0000u, CanonicalizeLatinInvalid },
+ { 0x03bcu, 0x03bcu, 0x00b5u, CanonicalizeLatinOther },
+ { 0x03bdu, 0xffffu, 0x0000u, CanonicalizeLatinInvalid },
+};
+
+} } // JSC::Yarr
+
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h
new file mode 100644
index 0000000000..9dce78200c
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrCanonicalizeUCS2_H
+#define YarrCanonicalizeUCS2_H
+
+#include <stdint.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+// This set of data (autogenerated using YarrCanonicalizeUCS2.js into YarrCanonicalizeUCS2.cpp)
+// provides information for each UCS2 code point as to the set of code points that it should
+// match under the ES5.1 case insensitive RegExp matching rules, specified in 15.10.2.8.
+enum UCS2CanonicalizationType {
+ CanonicalizeUnique, // No canonically equal values, e.g. 0x0.
+ CanonicalizeSet, // Value indicates a set in characterSetInfo.
+ CanonicalizeRangeLo, // Value is positive delta to pair, E.g. 0x41 has value 0x20, -> 0x61.
+ CanonicalizeRangeHi, // Value is positive delta to pair, E.g. 0x61 has value 0x20, -> 0x41.
+ CanonicalizeAlternatingAligned, // Aligned consequtive pair, e.g. 0x1f4,0x1f5.
+ CanonicalizeAlternatingUnaligned, // Unaligned consequtive pair, e.g. 0x241,0x242.
+};
+struct UCS2CanonicalizationRange { uint16_t begin, end, value, type; };
+extern const size_t UCS2_CANONICALIZATION_RANGES;
+extern uint16_t* characterSetInfo[];
+extern UCS2CanonicalizationRange rangeInfo[];
+
+// This table is similar to the full rangeInfo table, however this maps from UCS2 codepoints to
+// the set of Latin1 codepoints that could match.
+enum LatinCanonicalizationType {
+ CanonicalizeLatinSelf, // This character is in the Latin1 range, but has no canonical equivalent in the range.
+ CanonicalizeLatinMask0x20, // One of a pair of characters, under the mask 0x20.
+ CanonicalizeLatinOther, // This character is not in the Latin1 range, but canonicalizes to another that is.
+ CanonicalizeLatinInvalid, // Cannot match against Latin1 input.
+};
+struct LatinCanonicalizationRange { uint16_t begin, end, value, type; };
+extern const size_t LATIN_CANONICALIZATION_RANGES;
+extern LatinCanonicalizationRange latinRangeInfo[];
+
+// This searches in log2 time over ~364 entries, so should typically result in 8 compares.
+inline UCS2CanonicalizationRange* rangeInfoFor(UChar ch)
+{
+ UCS2CanonicalizationRange* info = rangeInfo;
+ size_t entries = UCS2_CANONICALIZATION_RANGES;
+
+ while (true) {
+ size_t candidate = entries >> 1;
+ UCS2CanonicalizationRange* candidateInfo = info + candidate;
+ if (ch < candidateInfo->begin)
+ entries = candidate;
+ else if (ch <= candidateInfo->end)
+ return candidateInfo;
+ else {
+ info = candidateInfo + 1;
+ entries -= (candidate + 1);
+ }
+ }
+}
+
+// Should only be called for characters that have one canonically matching value.
+inline UChar getCanonicalPair(UCS2CanonicalizationRange* info, UChar ch)
+{
+ ASSERT(ch >= info->begin && ch <= info->end);
+ switch (info->type) {
+ case CanonicalizeRangeLo:
+ return ch + info->value;
+ case CanonicalizeRangeHi:
+ return ch - info->value;
+ case CanonicalizeAlternatingAligned:
+ return ch ^ 1;
+ case CanonicalizeAlternatingUnaligned:
+ return ((ch - 1) ^ 1) + 1;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+// Returns true if no other UCS2 codepoint can match this value.
+inline bool isCanonicallyUnique(UChar ch)
+{
+ return rangeInfoFor(ch)->type == CanonicalizeUnique;
+}
+
+// Returns true if values are equal, under the canonicalization rules.
+inline bool areCanonicallyEquivalent(UChar a, UChar b)
+{
+ UCS2CanonicalizationRange* info = rangeInfoFor(a);
+ switch (info->type) {
+ case CanonicalizeUnique:
+ return a == b;
+ case CanonicalizeSet: {
+ for (uint16_t* set = characterSetInfo[info->value]; (a = *set); ++set) {
+ if (a == b)
+ return true;
+ }
+ return false;
+ }
+ case CanonicalizeRangeLo:
+ return (a == b) || (a + info->value == b);
+ case CanonicalizeRangeHi:
+ return (a == b) || (a - info->value == b);
+ case CanonicalizeAlternatingAligned:
+ return (a | 1) == (b | 1);
+ case CanonicalizeAlternatingUnaligned:
+ return ((a - 1) | 1) == ((b - 1) | 1);
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+}
+
+} } // JSC::Yarr
+
+#endif
diff --git a/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js
new file mode 100644
index 0000000000..00361dd46e
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// See ES 5.1, 15.10.2.8
+function canonicalize(ch)
+{
+ var u = String.fromCharCode(ch).toUpperCase();
+ if (u.length > 1)
+ return ch;
+ var cu = u.charCodeAt(0);
+ if (ch >= 128 && cu < 128)
+ return ch;
+ return cu;
+}
+
+var MAX_UCS2 = 0xFFFF;
+var MAX_LATIN = 0xFF;
+
+var groupedCanonically = [];
+// Pass 1: populate groupedCanonically - this is mapping from canonicalized
+// values back to the set of character code that canonicalize to them.
+for (var i = 0; i <= MAX_UCS2; ++i) {
+ var ch = canonicalize(i);
+ if (!groupedCanonically[ch])
+ groupedCanonically[ch] = [];
+ groupedCanonically[ch].push(i);
+}
+
+var typeInfo = [];
+var latinTypeInfo = [];
+var characterSetInfo = [];
+// Pass 2: populate typeInfo & characterSetInfo. For every character calculate
+// a typeInfo value, described by the types above, and a value payload.
+for (cu in groupedCanonically) {
+ // The set of characters that canonicalize to cu
+ var characters = groupedCanonically[cu];
+
+ // If there is only one, it is unique.
+ if (characters.length == 1) {
+ typeInfo[characters[0]] = "CanonicalizeUnique:0";
+ latinTypeInfo[characters[0]] = characters[0] <= MAX_LATIN ? "CanonicalizeLatinSelf:0" : "CanonicalizeLatinInvalid:0";
+ continue;
+ }
+
+ // Sort the array.
+ characters.sort(function(x,y){return x-y;});
+
+ // If there are more than two characters, create an entry in characterSetInfo.
+ if (characters.length > 2) {
+ for (i in characters)
+ typeInfo[characters[i]] = "CanonicalizeSet:" + characterSetInfo.length;
+ characterSetInfo.push(characters);
+
+ if (characters[1] <= MAX_LATIN)
+ throw new Error("sets with more than one latin character not supported!");
+ if (characters[0] <= MAX_LATIN) {
+ for (i in characters)
+ latinTypeInfo[characters[i]] = "CanonicalizeLatinOther:" + characters[0];
+ latinTypeInfo[characters[0]] = "CanonicalizeLatinSelf:0";
+ } else {
+ for (i in characters)
+ latinTypeInfo[characters[i]] = "CanonicalizeLatinInvalid:0";
+ }
+
+ continue;
+ }
+
+ // We have a pair, mark alternating ranges, otherwise track whether this is the low or high partner.
+ var lo = characters[0];
+ var hi = characters[1];
+ var delta = hi - lo;
+ if (delta == 1) {
+ var type = lo & 1 ? "CanonicalizeAlternatingUnaligned:0" : "CanonicalizeAlternatingAligned:0";
+ typeInfo[lo] = type;
+ typeInfo[hi] = type;
+ } else {
+ typeInfo[lo] = "CanonicalizeRangeLo:" + delta;
+ typeInfo[hi] = "CanonicalizeRangeHi:" + delta;
+ }
+
+ if (lo > MAX_LATIN) {
+ latinTypeInfo[lo] = "CanonicalizeLatinInvalid:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinInvalid:0";
+ } else if (hi > MAX_LATIN) {
+ latinTypeInfo[lo] = "CanonicalizeLatinSelf:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinOther:" + lo;
+ } else {
+ if (delta != 0x20 || lo & 0x20)
+ throw new Error("pairs of latin characters that don't mask with 0x20 not supported!");
+ latinTypeInfo[lo] = "CanonicalizeLatinMask0x20:0";
+ latinTypeInfo[hi] = "CanonicalizeLatinMask0x20:0";
+ }
+}
+
+var rangeInfo = [];
+// Pass 3: coallesce types into ranges.
+for (var end = 0; end <= MAX_UCS2; ++end) {
+ var begin = end;
+ var type = typeInfo[end];
+ while (end < MAX_UCS2 && typeInfo[end + 1] == type)
+ ++end;
+ rangeInfo.push({begin:begin, end:end, type:type});
+}
+
+var latinRangeInfo = [];
+// Pass 4: coallesce latin-1 types into ranges.
+for (var end = 0; end <= MAX_UCS2; ++end) {
+ var begin = end;
+ var type = latinTypeInfo[end];
+ while (end < MAX_UCS2 && latinTypeInfo[end + 1] == type)
+ ++end;
+ latinRangeInfo.push({begin:begin, end:end, type:type});
+}
+
+
+// Helper function to convert a number to a fixed width hex representation of a C uint16_t.
+function hex(x)
+{
+ var s = Number(x).toString(16);
+ while (s.length < 4)
+ s = 0 + s;
+ return "0x" + s + "u";
+}
+
+var copyright = (
+ "/*" + "\n" +
+ " * Copyright (C) 2012 Apple Inc. All rights reserved." + "\n" +
+ " *" + "\n" +
+ " * Redistribution and use in source and binary forms, with or without" + "\n" +
+ " * modification, are permitted provided that the following conditions" + "\n" +
+ " * are met:" + "\n" +
+ " * 1. Redistributions of source code must retain the above copyright" + "\n" +
+ " * notice, this list of conditions and the following disclaimer." + "\n" +
+ " * 2. Redistributions in binary form must reproduce the above copyright" + "\n" +
+ " * notice, this list of conditions and the following disclaimer in the" + "\n" +
+ " * documentation and/or other materials provided with the distribution." + "\n" +
+ " *" + "\n" +
+ " * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY" + "\n" +
+ " * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE" + "\n" +
+ " * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR" + "\n" +
+ " * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR" + "\n" +
+ " * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL," + "\n" +
+ " * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO," + "\n" +
+ " * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR" + "\n" +
+ " * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY" + "\n" +
+ " * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT" + "\n" +
+ " * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE" + "\n" +
+ " * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. " + "\n" +
+ " */");
+
+print(copyright);
+print();
+print("// DO NOT EDIT! - this file autogenerated by YarrCanonicalizeUCS2.js");
+print();
+print('#include "config.h"');
+print('#include "YarrCanonicalizeUCS2.h"');
+print();
+print("namespace JSC { namespace Yarr {");
+print();
+print("#include <stdint.h>");
+print();
+
+for (i in characterSetInfo) {
+ var characters = ""
+ var set = characterSetInfo[i];
+ for (var j in set)
+ characters += hex(set[j]) + ", ";
+ print("uint16_t ucs2CharacterSet" + i + "[] = { " + characters + "0 };");
+}
+print();
+print("static const size_t UCS2_CANONICALIZATION_SETS = " + characterSetInfo.length + ";");
+print("uint16_t* characterSetInfo[UCS2_CANONICALIZATION_SETS] = {");
+for (i in characterSetInfo)
+print(" ucs2CharacterSet" + i + ",");
+print("};");
+print();
+print("const size_t UCS2_CANONICALIZATION_RANGES = " + rangeInfo.length + ";");
+print("UCS2CanonicalizationRange rangeInfo[UCS2_CANONICALIZATION_RANGES] = {");
+for (i in rangeInfo) {
+ var info = rangeInfo[i];
+ var typeAndValue = info.type.split(':');
+ print(" { " + hex(info.begin) + ", " + hex(info.end) + ", " + hex(typeAndValue[1]) + ", " + typeAndValue[0] + " },");
+}
+print("};");
+print();
+print("const size_t LATIN_CANONICALIZATION_RANGES = " + latinRangeInfo.length + ";");
+print("LatinCanonicalizationRange latinRangeInfo[LATIN_CANONICALIZATION_RANGES] = {");
+for (i in latinRangeInfo) {
+ var info = latinRangeInfo[i];
+ var typeAndValue = info.type.split(':');
+ print(" { " + hex(info.begin) + ", " + hex(info.end) + ", " + hex(typeAndValue[1]) + ", " + typeAndValue[0] + " },");
+}
+print("};");
+print();
+print("} } // JSC::Yarr");
+print();
+
diff --git a/src/3rdparty/masm/yarr/YarrInterpreter.cpp b/src/3rdparty/masm/yarr/YarrInterpreter.cpp
new file mode 100644
index 0000000000..f0312ea251
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrInterpreter.cpp
@@ -0,0 +1,1959 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrInterpreter.h"
+
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+#include <wtf/BumpPointerAllocator.h>
+#include <wtf/DataLog.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+template<typename CharType>
+class Interpreter {
+public:
+ struct ParenthesesDisjunctionContext;
+
+ struct BackTrackInfoPatternCharacter {
+ uintptr_t matchAmount;
+ };
+ struct BackTrackInfoCharacterClass {
+ uintptr_t matchAmount;
+ };
+ struct BackTrackInfoBackReference {
+ uintptr_t begin; // Not really needed for greedy quantifiers.
+ uintptr_t matchAmount; // Not really needed for fixed quantifiers.
+ };
+ struct BackTrackInfoAlternative {
+ uintptr_t offset;
+ };
+ struct BackTrackInfoParentheticalAssertion {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParenthesesOnce {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParenthesesTerminal {
+ uintptr_t begin;
+ };
+ struct BackTrackInfoParentheses {
+ uintptr_t matchAmount;
+ ParenthesesDisjunctionContext* lastContext;
+ };
+
+ static inline void appendParenthesesDisjunctionContext(BackTrackInfoParentheses* backTrack, ParenthesesDisjunctionContext* context)
+ {
+ context->next = backTrack->lastContext;
+ backTrack->lastContext = context;
+ ++backTrack->matchAmount;
+ }
+
+ static inline void popParenthesesDisjunctionContext(BackTrackInfoParentheses* backTrack)
+ {
+ RELEASE_ASSERT(backTrack->matchAmount);
+ RELEASE_ASSERT(backTrack->lastContext);
+ backTrack->lastContext = backTrack->lastContext->next;
+ --backTrack->matchAmount;
+ }
+
+ struct DisjunctionContext
+ {
+ DisjunctionContext()
+ : term(0)
+ {
+ }
+
+ void* operator new(size_t, void* where)
+ {
+ return where;
+ }
+
+ int term;
+ unsigned matchBegin;
+ unsigned matchEnd;
+ uintptr_t frame[1];
+ };
+
+ DisjunctionContext* allocDisjunctionContext(ByteDisjunction* disjunction)
+ {
+ size_t size = sizeof(DisjunctionContext) - sizeof(uintptr_t) + disjunction->m_frameSize * sizeof(uintptr_t);
+ allocatorPool = allocatorPool->ensureCapacity(size);
+ RELEASE_ASSERT(allocatorPool);
+ return new (allocatorPool->alloc(size)) DisjunctionContext();
+ }
+
+ void freeDisjunctionContext(DisjunctionContext* context)
+ {
+ allocatorPool = allocatorPool->dealloc(context);
+ }
+
+ struct ParenthesesDisjunctionContext
+ {
+ ParenthesesDisjunctionContext(unsigned* output, ByteTerm& term)
+ : next(0)
+ {
+ unsigned firstSubpatternId = term.atom.subpatternId;
+ unsigned numNestedSubpatterns = term.atom.parenthesesDisjunction->m_numSubpatterns;
+
+ for (unsigned i = 0; i < (numNestedSubpatterns << 1); ++i) {
+ subpatternBackup[i] = output[(firstSubpatternId << 1) + i];
+ output[(firstSubpatternId << 1) + i] = offsetNoMatch;
+ }
+
+ new (getDisjunctionContext(term)) DisjunctionContext();
+ }
+
+ void* operator new(size_t, void* where)
+ {
+ return where;
+ }
+
+ void restoreOutput(unsigned* output, unsigned firstSubpatternId, unsigned numNestedSubpatterns)
+ {
+ for (unsigned i = 0; i < (numNestedSubpatterns << 1); ++i)
+ output[(firstSubpatternId << 1) + i] = subpatternBackup[i];
+ }
+
+ DisjunctionContext* getDisjunctionContext(ByteTerm& term)
+ {
+ return reinterpret_cast<DisjunctionContext*>(&(subpatternBackup[term.atom.parenthesesDisjunction->m_numSubpatterns << 1]));
+ }
+
+ ParenthesesDisjunctionContext* next;
+ unsigned subpatternBackup[1];
+ };
+
+ ParenthesesDisjunctionContext* allocParenthesesDisjunctionContext(ByteDisjunction* disjunction, unsigned* output, ByteTerm& term)
+ {
+ size_t size = sizeof(ParenthesesDisjunctionContext) - sizeof(unsigned) + (term.atom.parenthesesDisjunction->m_numSubpatterns << 1) * sizeof(unsigned) + sizeof(DisjunctionContext) - sizeof(uintptr_t) + disjunction->m_frameSize * sizeof(uintptr_t);
+ allocatorPool = allocatorPool->ensureCapacity(size);
+ RELEASE_ASSERT(allocatorPool);
+ return new (allocatorPool->alloc(size)) ParenthesesDisjunctionContext(output, term);
+ }
+
+ void freeParenthesesDisjunctionContext(ParenthesesDisjunctionContext* context)
+ {
+ allocatorPool = allocatorPool->dealloc(context);
+ }
+
+ class InputStream {
+ public:
+ InputStream(const CharType* input, unsigned start, unsigned length)
+ : input(input)
+ , pos(start)
+ , length(length)
+ {
+ }
+
+ void next()
+ {
+ ++pos;
+ }
+
+ void rewind(unsigned amount)
+ {
+ ASSERT(pos >= amount);
+ pos -= amount;
+ }
+
+ int read()
+ {
+ ASSERT(pos < length);
+ if (pos < length)
+ return input[pos];
+ return -1;
+ }
+
+ int readPair()
+ {
+ ASSERT(pos + 1 < length);
+ return input[pos] | input[pos + 1] << 16;
+ }
+
+ int readChecked(unsigned negativePositionOffest)
+ {
+ RELEASE_ASSERT(pos >= negativePositionOffest);
+ unsigned p = pos - negativePositionOffest;
+ ASSERT(p < length);
+ return input[p];
+ }
+
+ int reread(unsigned from)
+ {
+ ASSERT(from < length);
+ return input[from];
+ }
+
+ int prev()
+ {
+ ASSERT(!(pos > length));
+ if (pos && length)
+ return input[pos - 1];
+ return -1;
+ }
+
+ unsigned getPos()
+ {
+ return pos;
+ }
+
+ void setPos(unsigned p)
+ {
+ pos = p;
+ }
+
+ bool atStart()
+ {
+ return pos == 0;
+ }
+
+ bool atEnd()
+ {
+ return pos == length;
+ }
+
+ unsigned end()
+ {
+ return length;
+ }
+
+ bool checkInput(unsigned count)
+ {
+ if (((pos + count) <= length) && ((pos + count) >= pos)) {
+ pos += count;
+ return true;
+ }
+ return false;
+ }
+
+ void uncheckInput(unsigned count)
+ {
+ RELEASE_ASSERT(pos >= count);
+ pos -= count;
+ }
+
+ bool atStart(unsigned negativePositionOffest)
+ {
+ return pos == negativePositionOffest;
+ }
+
+ bool atEnd(unsigned negativePositionOffest)
+ {
+ RELEASE_ASSERT(pos >= negativePositionOffest);
+ return (pos - negativePositionOffest) == length;
+ }
+
+ bool isAvailableInput(unsigned offset)
+ {
+ return (((pos + offset) <= length) && ((pos + offset) >= pos));
+ }
+
+ private:
+ const CharType* input;
+ unsigned pos;
+ unsigned length;
+ };
+
+ bool testCharacterClass(CharacterClass* characterClass, int ch)
+ {
+ if (ch & 0xFF80) {
+ for (unsigned i = 0; i < characterClass->m_matchesUnicode.size(); ++i)
+ if (ch == characterClass->m_matchesUnicode[i])
+ return true;
+ for (unsigned i = 0; i < characterClass->m_rangesUnicode.size(); ++i)
+ if ((ch >= characterClass->m_rangesUnicode[i].begin) && (ch <= characterClass->m_rangesUnicode[i].end))
+ return true;
+ } else {
+ for (unsigned i = 0; i < characterClass->m_matches.size(); ++i)
+ if (ch == characterClass->m_matches[i])
+ return true;
+ for (unsigned i = 0; i < characterClass->m_ranges.size(); ++i)
+ if ((ch >= characterClass->m_ranges[i].begin) && (ch <= characterClass->m_ranges[i].end))
+ return true;
+ }
+
+ return false;
+ }
+
+ bool checkCharacter(int testChar, unsigned negativeInputOffset)
+ {
+ return testChar == input.readChecked(negativeInputOffset);
+ }
+
+ bool checkCasedCharacter(int loChar, int hiChar, unsigned negativeInputOffset)
+ {
+ int ch = input.readChecked(negativeInputOffset);
+ return (loChar == ch) || (hiChar == ch);
+ }
+
+ bool checkCharacterClass(CharacterClass* characterClass, bool invert, unsigned negativeInputOffset)
+ {
+ bool match = testCharacterClass(characterClass, input.readChecked(negativeInputOffset));
+ return invert ? !match : match;
+ }
+
+ bool tryConsumeBackReference(int matchBegin, int matchEnd, unsigned negativeInputOffset)
+ {
+ unsigned matchSize = (unsigned)(matchEnd - matchBegin);
+
+ if (!input.checkInput(matchSize))
+ return false;
+
+ if (pattern->m_ignoreCase) {
+ for (unsigned i = 0; i < matchSize; ++i) {
+ int oldCh = input.reread(matchBegin + i);
+ int ch = input.readChecked(negativeInputOffset + matchSize - i);
+
+ if (oldCh == ch)
+ continue;
+
+ // The definition for canonicalize (see ES 5.1, 15.10.2.8) means that
+ // unicode values are never allowed to match against ascii ones.
+ if (isASCII(oldCh) || isASCII(ch)) {
+ if (toASCIIUpper(oldCh) == toASCIIUpper(ch))
+ continue;
+ } else if (areCanonicallyEquivalent(oldCh, ch))
+ continue;
+
+ input.uncheckInput(matchSize);
+ return false;
+ }
+ } else {
+ for (unsigned i = 0; i < matchSize; ++i) {
+ if (!checkCharacter(input.reread(matchBegin + i), negativeInputOffset + matchSize - i)) {
+ input.uncheckInput(matchSize);
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ bool matchAssertionBOL(ByteTerm& term)
+ {
+ return (input.atStart(term.inputPosition)) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.readChecked(term.inputPosition + 1)));
+ }
+
+ bool matchAssertionEOL(ByteTerm& term)
+ {
+ if (term.inputPosition)
+ return (input.atEnd(term.inputPosition)) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.readChecked(term.inputPosition)));
+
+ return (input.atEnd()) || (pattern->m_multiline && testCharacterClass(pattern->newlineCharacterClass, input.read()));
+ }
+
+ bool matchAssertionWordBoundary(ByteTerm& term)
+ {
+ bool prevIsWordchar = !input.atStart(term.inputPosition) && testCharacterClass(pattern->wordcharCharacterClass, input.readChecked(term.inputPosition + 1));
+ bool readIsWordchar;
+ if (term.inputPosition)
+ readIsWordchar = !input.atEnd(term.inputPosition) && testCharacterClass(pattern->wordcharCharacterClass, input.readChecked(term.inputPosition));
+ else
+ readIsWordchar = !input.atEnd() && testCharacterClass(pattern->wordcharCharacterClass, input.read());
+
+ bool wordBoundary = prevIsWordchar != readIsWordchar;
+ return term.invert() ? !wordBoundary : wordBoundary;
+ }
+
+ bool backtrackPatternCharacter(ByteTerm& term, DisjunctionContext* context)
+ {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCharacter(term.atom.patternCharacter, term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool backtrackPatternCasedCharacter(ByteTerm& term, DisjunctionContext* context)
+ {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCasedCharacter(term.atom.casedCharacter.lo, term.atom.casedCharacter.hi, term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchCharacterClass(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeCharacterClass);
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ for (unsigned matchAmount = 0; matchAmount < term.atom.quantityCount; ++matchAmount) {
+ if (!checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition - matchAmount))
+ return false;
+ }
+ return true;
+ }
+
+ case QuantifierGreedy: {
+ unsigned matchAmount = 0;
+ while ((matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ return true;
+ }
+
+ case QuantifierNonGreedy:
+ backTrack->matchAmount = 0;
+ return true;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool backtrackCharacterClass(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeCharacterClass);
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.uncheckInput(1);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && input.checkInput(1)) {
+ ++backTrack->matchAmount;
+ if (checkCharacterClass(term.atom.characterClass, term.invert(), term.inputPosition + 1))
+ return true;
+ }
+ input.uncheckInput(backTrack->matchAmount);
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchBackReference(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeBackReference);
+ BackTrackInfoBackReference* backTrack = reinterpret_cast<BackTrackInfoBackReference*>(context->frame + term.frameLocation);
+
+ unsigned matchBegin = output[(term.atom.subpatternId << 1)];
+ unsigned matchEnd = output[(term.atom.subpatternId << 1) + 1];
+
+ // If the end position of the referenced match hasn't set yet then the backreference in the same parentheses where it references to that.
+ // In this case the result of match is empty string like when it references to a parentheses with zero-width match.
+ // Eg.: /(a\1)/
+ if (matchEnd == offsetNoMatch)
+ return true;
+
+ if (matchBegin == offsetNoMatch)
+ return true;
+
+ ASSERT(matchBegin <= matchEnd);
+
+ if (matchBegin == matchEnd)
+ return true;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ backTrack->begin = input.getPos();
+ for (unsigned matchAmount = 0; matchAmount < term.atom.quantityCount; ++matchAmount) {
+ if (!tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition)) {
+ input.setPos(backTrack->begin);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ case QuantifierGreedy: {
+ unsigned matchAmount = 0;
+ while ((matchAmount < term.atom.quantityCount) && tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition))
+ ++matchAmount;
+ backTrack->matchAmount = matchAmount;
+ return true;
+ }
+
+ case QuantifierNonGreedy:
+ backTrack->begin = input.getPos();
+ backTrack->matchAmount = 0;
+ return true;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool backtrackBackReference(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeBackReference);
+ BackTrackInfoBackReference* backTrack = reinterpret_cast<BackTrackInfoBackReference*>(context->frame + term.frameLocation);
+
+ unsigned matchBegin = output[(term.atom.subpatternId << 1)];
+ unsigned matchEnd = output[(term.atom.subpatternId << 1) + 1];
+
+ if (matchBegin == offsetNoMatch)
+ return false;
+
+ ASSERT(matchBegin <= matchEnd);
+
+ if (matchBegin == matchEnd)
+ return false;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount:
+ // for quantityCount == 1, could rewind.
+ input.setPos(backTrack->begin);
+ break;
+
+ case QuantifierGreedy:
+ if (backTrack->matchAmount) {
+ --backTrack->matchAmount;
+ input.rewind(matchEnd - matchBegin);
+ return true;
+ }
+ break;
+
+ case QuantifierNonGreedy:
+ if ((backTrack->matchAmount < term.atom.quantityCount) && tryConsumeBackReference(matchBegin, matchEnd, term.inputPosition)) {
+ ++backTrack->matchAmount;
+ return true;
+ }
+ input.setPos(backTrack->begin);
+ break;
+ }
+
+ return false;
+ }
+
+ void recordParenthesesMatch(ByteTerm& term, ParenthesesDisjunctionContext* context)
+ {
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = context->getDisjunctionContext(term)->matchBegin + term.inputPosition;
+ output[(subpatternId << 1) + 1] = context->getDisjunctionContext(term)->matchEnd + term.inputPosition;
+ }
+ }
+ void resetMatches(ByteTerm& term, ParenthesesDisjunctionContext* context)
+ {
+ unsigned firstSubpatternId = term.atom.subpatternId;
+ unsigned count = term.atom.parenthesesDisjunction->m_numSubpatterns;
+ context->restoreOutput(output, firstSubpatternId, count);
+ }
+ JSRegExpResult parenthesesDoBacktrack(ByteTerm& term, BackTrackInfoParentheses* backTrack)
+ {
+ while (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+
+ JSRegExpResult result = matchDisjunction(term.atom.parenthesesDisjunction, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch)
+ return JSRegExpMatch;
+
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ return JSRegExpNoMatch;
+ }
+
+ bool matchParenthesesOnceBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy: {
+ // set this speculatively; if we get to the parens end this will be true.
+ backTrack->begin = input.getPos();
+ break;
+ }
+ case QuantifierNonGreedy: {
+ backTrack->begin = notFound;
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+ case QuantifierFixedCount:
+ break;
+ }
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = input.getPos() - term.inputPosition;
+ }
+
+ return true;
+ }
+
+ bool matchParenthesesOnceEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1) + 1] = input.getPos() + term.inputPosition;
+ }
+
+ if (term.atom.quantityType == QuantifierFixedCount)
+ return true;
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+ return backTrack->begin != input.getPos();
+ }
+
+ bool backtrackParenthesesOnceBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ if (term.capture()) {
+ unsigned subpatternId = term.atom.subpatternId;
+ output[(subpatternId << 1)] = offsetNoMatch;
+ output[(subpatternId << 1) + 1] = offsetNoMatch;
+ }
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy:
+ // if we backtrack to this point, there is another chance - try matching nothing.
+ ASSERT(backTrack->begin != notFound);
+ backTrack->begin = notFound;
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ case QuantifierNonGreedy:
+ ASSERT(backTrack->begin != notFound);
+ case QuantifierFixedCount:
+ break;
+ }
+
+ return false;
+ }
+
+ bool backtrackParenthesesOnceEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternOnceEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParenthesesOnce* backTrack = reinterpret_cast<BackTrackInfoParenthesesOnce*>(context->frame + term.frameLocation);
+
+ switch (term.atom.quantityType) {
+ case QuantifierGreedy:
+ if (backTrack->begin == notFound) {
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+ case QuantifierNonGreedy:
+ if (backTrack->begin == notFound) {
+ backTrack->begin = input.getPos();
+ if (term.capture()) {
+ // Technically this access to inputPosition should be accessing the begin term's
+ // inputPosition, but for repeats other than fixed these values should be
+ // the same anyway! (We don't pre-check for greedy or non-greedy matches.)
+ ASSERT((&term - term.atom.parenthesesWidth)->type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+ ASSERT((&term - term.atom.parenthesesWidth)->inputPosition == term.inputPosition);
+ unsigned subpatternId = term.atom.subpatternId;
+ output[subpatternId << 1] = input.getPos() + term.inputPosition;
+ }
+ context->term -= term.atom.parenthesesWidth;
+ return true;
+ }
+ case QuantifierFixedCount:
+ break;
+ }
+
+ return false;
+ }
+
+ bool matchParenthesesTerminalBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+ ASSERT(term.atom.quantityType == QuantifierGreedy);
+ ASSERT(term.atom.quantityCount == quantifyInfinite);
+ ASSERT(!term.capture());
+
+ BackTrackInfoParenthesesTerminal* backTrack = reinterpret_cast<BackTrackInfoParenthesesTerminal*>(context->frame + term.frameLocation);
+ backTrack->begin = input.getPos();
+ return true;
+ }
+
+ bool matchParenthesesTerminalEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalEnd);
+
+ BackTrackInfoParenthesesTerminal* backTrack = reinterpret_cast<BackTrackInfoParenthesesTerminal*>(context->frame + term.frameLocation);
+ // Empty match is a failed match.
+ if (backTrack->begin == input.getPos())
+ return false;
+
+ // Successful match! Okay, what's next? - loop around and try to match moar!
+ context->term -= (term.atom.parenthesesWidth + 1);
+ return true;
+ }
+
+ bool backtrackParenthesesTerminalBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+ ASSERT(term.atom.quantityType == QuantifierGreedy);
+ ASSERT(term.atom.quantityCount == quantifyInfinite);
+ ASSERT(!term.capture());
+
+ // If we backtrack to this point, we have failed to match this iteration of the parens.
+ // Since this is greedy / zero minimum a failed is also accepted as a match!
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+
+ bool backtrackParenthesesTerminalEnd(ByteTerm&, DisjunctionContext*)
+ {
+ // 'Terminal' parentheses are at the end of the regex, and as such a match past end
+ // should always be returned as a successful match - we should never backtrack to here.
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+
+ bool matchParentheticalAssertionBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ backTrack->begin = input.getPos();
+ return true;
+ }
+
+ bool matchParentheticalAssertionEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ input.setPos(backTrack->begin);
+
+ // We've reached the end of the parens; if they are inverted, this is failure.
+ if (term.invert()) {
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+
+ return true;
+ }
+
+ bool backtrackParentheticalAssertionBegin(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionBegin);
+ ASSERT(term.atom.quantityCount == 1);
+
+ // We've failed to match parens; if they are inverted, this is win!
+ if (term.invert()) {
+ context->term += term.atom.parenthesesWidth;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool backtrackParentheticalAssertionEnd(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParentheticalAssertionEnd);
+ ASSERT(term.atom.quantityCount == 1);
+
+ BackTrackInfoParentheticalAssertion* backTrack = reinterpret_cast<BackTrackInfoParentheticalAssertion*>(context->frame + term.frameLocation);
+
+ input.setPos(backTrack->begin);
+
+ context->term -= term.atom.parenthesesWidth;
+ return false;
+ }
+
+ JSRegExpResult matchParentheses(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpattern);
+
+ BackTrackInfoParentheses* backTrack = reinterpret_cast<BackTrackInfoParentheses*>(context->frame + term.frameLocation);
+ ByteDisjunction* disjunctionBody = term.atom.parenthesesDisjunction;
+
+ backTrack->matchAmount = 0;
+ backTrack->lastContext = 0;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ // While we haven't yet reached our fixed limit,
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ // Try to do a match, and it it succeeds, add it to the list.
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ // The match failed; try to find an alternate point to carry on from.
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ JSRegExpResult backtrackResult = parenthesesDoBacktrack(term, backTrack);
+ if (backtrackResult != JSRegExpMatch)
+ return backtrackResult;
+ }
+ }
+
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ case QuantifierGreedy: {
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+
+ break;
+ }
+ }
+
+ if (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ case QuantifierNonGreedy:
+ return JSRegExpMatch;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ // Rules for backtracking differ depending on whether this is greedy or non-greedy.
+ //
+ // Greedy matches never should try just adding more - you should already have done
+ // the 'more' cases. Always backtrack, at least a leetle bit. However cases where
+ // you backtrack an item off the list needs checking, since we'll never have matched
+ // the one less case. Tracking forwards, still add as much as possible.
+ //
+ // Non-greedy, we've already done the one less case, so don't match on popping.
+ // We haven't done the one more case, so always try to add that.
+ //
+ JSRegExpResult backtrackParentheses(ByteTerm& term, DisjunctionContext* context)
+ {
+ ASSERT(term.type == ByteTerm::TypeParenthesesSubpattern);
+
+ BackTrackInfoParentheses* backTrack = reinterpret_cast<BackTrackInfoParentheses*>(context->frame + term.frameLocation);
+ ByteDisjunction* disjunctionBody = term.atom.parenthesesDisjunction;
+
+ switch (term.atom.quantityType) {
+ case QuantifierFixedCount: {
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+
+ ParenthesesDisjunctionContext* context = 0;
+ JSRegExpResult result = parenthesesDoBacktrack(term, backTrack);
+
+ if (result != JSRegExpMatch)
+ return result;
+
+ // While we haven't yet reached our fixed limit,
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ // Try to do a match, and it it succeeds, add it to the list.
+ context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ result = matchDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+
+ if (result == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ // The match failed; try to find an alternate point to carry on from.
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ JSRegExpResult backtrackResult = parenthesesDoBacktrack(term, backTrack);
+ if (backtrackResult != JSRegExpMatch)
+ return backtrackResult;
+ }
+ }
+
+ ASSERT(backTrack->matchAmount == term.atom.quantityCount);
+ context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ case QuantifierGreedy: {
+ if (!backTrack->matchAmount)
+ return JSRegExpNoMatch;
+
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch) {
+ while (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult parenthesesResult = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (parenthesesResult == JSRegExpMatch)
+ appendParenthesesDisjunctionContext(backTrack, context);
+ else {
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (parenthesesResult != JSRegExpNoMatch)
+ return parenthesesResult;
+
+ break;
+ }
+ }
+ } else {
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ if (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ case QuantifierNonGreedy: {
+ // If we've not reached the limit, try to add one more match.
+ if (backTrack->matchAmount < term.atom.quantityCount) {
+ ParenthesesDisjunctionContext* context = allocParenthesesDisjunctionContext(disjunctionBody, output, term);
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term));
+ if (result == JSRegExpMatch) {
+ appendParenthesesDisjunctionContext(backTrack, context);
+ recordParenthesesMatch(term, context);
+ return JSRegExpMatch;
+ }
+
+ resetMatches(term, context);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ // Nope - okay backtrack looking for an alternative.
+ while (backTrack->matchAmount) {
+ ParenthesesDisjunctionContext* context = backTrack->lastContext;
+ JSRegExpResult result = matchNonZeroDisjunction(disjunctionBody, context->getDisjunctionContext(term), true);
+ if (result == JSRegExpMatch) {
+ // successful backtrack! we're back in the game!
+ if (backTrack->matchAmount) {
+ context = backTrack->lastContext;
+ recordParenthesesMatch(term, context);
+ }
+ return JSRegExpMatch;
+ }
+
+ // pop a match off the stack
+ resetMatches(term, context);
+ popParenthesesDisjunctionContext(backTrack);
+ freeParenthesesDisjunctionContext(context);
+
+ if (result != JSRegExpNoMatch)
+ return result;
+ }
+
+ return JSRegExpNoMatch;
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ bool matchDotStarEnclosure(ByteTerm& term, DisjunctionContext* context)
+ {
+ UNUSED_PARAM(term);
+ unsigned matchBegin = context->matchBegin;
+
+ if (matchBegin) {
+ for (matchBegin--; true; matchBegin--) {
+ if (testCharacterClass(pattern->newlineCharacterClass, input.reread(matchBegin))) {
+ ++matchBegin;
+ break;
+ }
+
+ if (!matchBegin)
+ break;
+ }
+ }
+
+ unsigned matchEnd = input.getPos();
+
+ for (; (matchEnd != input.end())
+ && (!testCharacterClass(pattern->newlineCharacterClass, input.reread(matchEnd))); matchEnd++) { }
+
+ if (((matchBegin && term.anchors.m_bol)
+ || ((matchEnd != input.end()) && term.anchors.m_eol))
+ && !pattern->m_multiline)
+ return false;
+
+ context->matchBegin = matchBegin;
+ context->matchEnd = matchEnd;
+ return true;
+ }
+
+#define MATCH_NEXT() { ++context->term; goto matchAgain; }
+#define BACKTRACK() { --context->term; goto backtrack; }
+#define currentTerm() (disjunction->terms[context->term])
+ JSRegExpResult matchDisjunction(ByteDisjunction* disjunction, DisjunctionContext* context, bool btrack = false)
+ {
+ if (!--remainingMatchCount)
+ return JSRegExpErrorHitLimit;
+
+ if (btrack)
+ BACKTRACK();
+
+ context->matchBegin = input.getPos();
+ context->term = 0;
+
+ matchAgain:
+ ASSERT(context->term < static_cast<int>(disjunction->terms.size()));
+
+ switch (currentTerm().type) {
+ case ByteTerm::TypeSubpatternBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeSubpatternEnd:
+ context->matchEnd = input.getPos();
+ return JSRegExpMatch;
+
+ case ByteTerm::TypeBodyAlternativeBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeBodyAlternativeDisjunction:
+ case ByteTerm::TypeBodyAlternativeEnd:
+ context->matchEnd = input.getPos();
+ return JSRegExpMatch;
+
+ case ByteTerm::TypeAlternativeBegin:
+ MATCH_NEXT();
+ case ByteTerm::TypeAlternativeDisjunction:
+ case ByteTerm::TypeAlternativeEnd: {
+ int offset = currentTerm().alternative.end;
+ BackTrackInfoAlternative* backTrack = reinterpret_cast<BackTrackInfoAlternative*>(context->frame + currentTerm().frameLocation);
+ backTrack->offset = offset;
+ context->term += offset;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypeAssertionBOL:
+ if (matchAssertionBOL(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeAssertionEOL:
+ if (matchAssertionEOL(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeAssertionWordBoundary:
+ if (matchAssertionWordBoundary(currentTerm()))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypePatternCharacterOnce:
+ case ByteTerm::TypePatternCharacterFixed: {
+ for (unsigned matchAmount = 0; matchAmount < currentTerm().atom.quantityCount; ++matchAmount) {
+ if (!checkCharacter(currentTerm().atom.patternCharacter, currentTerm().inputPosition - matchAmount))
+ BACKTRACK();
+ }
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCharacterGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ unsigned matchAmount = 0;
+ while ((matchAmount < currentTerm().atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCharacter(currentTerm().atom.patternCharacter, currentTerm().inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCharacterNonGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ backTrack->matchAmount = 0;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypePatternCasedCharacterOnce:
+ case ByteTerm::TypePatternCasedCharacterFixed: {
+ for (unsigned matchAmount = 0; matchAmount < currentTerm().atom.quantityCount; ++matchAmount) {
+ if (!checkCasedCharacter(currentTerm().atom.casedCharacter.lo, currentTerm().atom.casedCharacter.hi, currentTerm().inputPosition - matchAmount))
+ BACKTRACK();
+ }
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCasedCharacterGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ unsigned matchAmount = 0;
+ while ((matchAmount < currentTerm().atom.quantityCount) && input.checkInput(1)) {
+ if (!checkCasedCharacter(currentTerm().atom.casedCharacter.lo, currentTerm().atom.casedCharacter.hi, currentTerm().inputPosition + 1)) {
+ input.uncheckInput(1);
+ break;
+ }
+ ++matchAmount;
+ }
+ backTrack->matchAmount = matchAmount;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypePatternCasedCharacterNonGreedy: {
+ BackTrackInfoPatternCharacter* backTrack = reinterpret_cast<BackTrackInfoPatternCharacter*>(context->frame + currentTerm().frameLocation);
+ backTrack->matchAmount = 0;
+ MATCH_NEXT();
+ }
+
+ case ByteTerm::TypeCharacterClass:
+ if (matchCharacterClass(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeBackReference:
+ if (matchBackReference(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpattern: {
+ JSRegExpResult result = matchParentheses(currentTerm(), context);
+
+ if (result == JSRegExpMatch) {
+ MATCH_NEXT();
+ } else if (result != JSRegExpNoMatch)
+ return result;
+
+ BACKTRACK();
+ }
+ case ByteTerm::TypeParenthesesSubpatternOnceBegin:
+ if (matchParenthesesOnceBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternOnceEnd:
+ if (matchParenthesesOnceEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalBegin:
+ if (matchParenthesesTerminalBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalEnd:
+ if (matchParenthesesTerminalEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionBegin:
+ if (matchParentheticalAssertionBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionEnd:
+ if (matchParentheticalAssertionEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeCheckInput:
+ if (input.checkInput(currentTerm().checkInputCount))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeUncheckInput:
+ input.uncheckInput(currentTerm().checkInputCount);
+ MATCH_NEXT();
+
+ case ByteTerm::TypeDotStarEnclosure:
+ if (matchDotStarEnclosure(currentTerm(), context))
+ return JSRegExpMatch;
+ BACKTRACK();
+ }
+
+ // We should never fall-through to here.
+ RELEASE_ASSERT_NOT_REACHED();
+
+ backtrack:
+ ASSERT(context->term < static_cast<int>(disjunction->terms.size()));
+
+ switch (currentTerm().type) {
+ case ByteTerm::TypeSubpatternBegin:
+ return JSRegExpNoMatch;
+ case ByteTerm::TypeSubpatternEnd:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case ByteTerm::TypeBodyAlternativeBegin:
+ case ByteTerm::TypeBodyAlternativeDisjunction: {
+ int offset = currentTerm().alternative.next;
+ context->term += offset;
+ if (offset > 0)
+ MATCH_NEXT();
+
+ if (input.atEnd())
+ return JSRegExpNoMatch;
+
+ input.next();
+
+ context->matchBegin = input.getPos();
+
+ if (currentTerm().alternative.onceThrough)
+ context->term += currentTerm().alternative.next;
+
+ MATCH_NEXT();
+ }
+ case ByteTerm::TypeBodyAlternativeEnd:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case ByteTerm::TypeAlternativeBegin:
+ case ByteTerm::TypeAlternativeDisjunction: {
+ int offset = currentTerm().alternative.next;
+ context->term += offset;
+ if (offset > 0)
+ MATCH_NEXT();
+ BACKTRACK();
+ }
+ case ByteTerm::TypeAlternativeEnd: {
+ // We should never backtrack back into an alternative of the main body of the regex.
+ BackTrackInfoAlternative* backTrack = reinterpret_cast<BackTrackInfoAlternative*>(context->frame + currentTerm().frameLocation);
+ unsigned offset = backTrack->offset;
+ context->term -= offset;
+ BACKTRACK();
+ }
+
+ case ByteTerm::TypeAssertionBOL:
+ case ByteTerm::TypeAssertionEOL:
+ case ByteTerm::TypeAssertionWordBoundary:
+ BACKTRACK();
+
+ case ByteTerm::TypePatternCharacterOnce:
+ case ByteTerm::TypePatternCharacterFixed:
+ case ByteTerm::TypePatternCharacterGreedy:
+ case ByteTerm::TypePatternCharacterNonGreedy:
+ if (backtrackPatternCharacter(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypePatternCasedCharacterOnce:
+ case ByteTerm::TypePatternCasedCharacterFixed:
+ case ByteTerm::TypePatternCasedCharacterGreedy:
+ case ByteTerm::TypePatternCasedCharacterNonGreedy:
+ if (backtrackPatternCasedCharacter(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeCharacterClass:
+ if (backtrackCharacterClass(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeBackReference:
+ if (backtrackBackReference(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpattern: {
+ JSRegExpResult result = backtrackParentheses(currentTerm(), context);
+
+ if (result == JSRegExpMatch) {
+ MATCH_NEXT();
+ } else if (result != JSRegExpNoMatch)
+ return result;
+
+ BACKTRACK();
+ }
+ case ByteTerm::TypeParenthesesSubpatternOnceBegin:
+ if (backtrackParenthesesOnceBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternOnceEnd:
+ if (backtrackParenthesesOnceEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalBegin:
+ if (backtrackParenthesesTerminalBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParenthesesSubpatternTerminalEnd:
+ if (backtrackParenthesesTerminalEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionBegin:
+ if (backtrackParentheticalAssertionBegin(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+ case ByteTerm::TypeParentheticalAssertionEnd:
+ if (backtrackParentheticalAssertionEnd(currentTerm(), context))
+ MATCH_NEXT();
+ BACKTRACK();
+
+ case ByteTerm::TypeCheckInput:
+ input.uncheckInput(currentTerm().checkInputCount);
+ BACKTRACK();
+
+ case ByteTerm::TypeUncheckInput:
+ input.checkInput(currentTerm().checkInputCount);
+ BACKTRACK();
+
+ case ByteTerm::TypeDotStarEnclosure:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return JSRegExpErrorNoMatch;
+ }
+
+ JSRegExpResult matchNonZeroDisjunction(ByteDisjunction* disjunction, DisjunctionContext* context, bool btrack = false)
+ {
+ JSRegExpResult result = matchDisjunction(disjunction, context, btrack);
+
+ if (result == JSRegExpMatch) {
+ while (context->matchBegin == context->matchEnd) {
+ result = matchDisjunction(disjunction, context, true);
+ if (result != JSRegExpMatch)
+ return result;
+ }
+ return JSRegExpMatch;
+ }
+
+ return result;
+ }
+
+ unsigned interpret()
+ {
+ if (!input.isAvailableInput(0))
+ return offsetNoMatch;
+
+ for (unsigned i = 0; i < pattern->m_body->m_numSubpatterns + 1; ++i)
+ output[i << 1] = offsetNoMatch;
+
+ allocatorPool = pattern->m_allocator->startAllocator();
+ RELEASE_ASSERT(allocatorPool);
+
+ DisjunctionContext* context = allocDisjunctionContext(pattern->m_body.get());
+
+ JSRegExpResult result = matchDisjunction(pattern->m_body.get(), context, false);
+ if (result == JSRegExpMatch) {
+ output[0] = context->matchBegin;
+ output[1] = context->matchEnd;
+ }
+
+ freeDisjunctionContext(context);
+
+ pattern->m_allocator->stopAllocator();
+
+ ASSERT((result == JSRegExpMatch) == (output[0] != offsetNoMatch));
+ return output[0];
+ }
+
+ Interpreter(BytecodePattern* pattern, unsigned* output, const CharType* input, unsigned length, unsigned start)
+ : pattern(pattern)
+ , output(output)
+ , input(input, start, length)
+ , allocatorPool(0)
+ , remainingMatchCount(matchLimit)
+ {
+ }
+
+private:
+ BytecodePattern* pattern;
+ unsigned* output;
+ InputStream input;
+ BumpPointerPool* allocatorPool;
+ unsigned remainingMatchCount;
+};
+
+class ByteCompiler {
+ struct ParenthesesStackEntry {
+ unsigned beginTerm;
+ unsigned savedAlternativeIndex;
+ ParenthesesStackEntry(unsigned beginTerm, unsigned savedAlternativeIndex/*, unsigned subpatternId, bool capture = false*/)
+ : beginTerm(beginTerm)
+ , savedAlternativeIndex(savedAlternativeIndex)
+ {
+ }
+ };
+
+public:
+ ByteCompiler(YarrPattern& pattern)
+ : m_pattern(pattern)
+ {
+ m_currentAlternativeIndex = 0;
+ }
+
+ PassOwnPtr<BytecodePattern> compile(BumpPointerAllocator* allocator)
+ {
+ regexBegin(m_pattern.m_numSubpatterns, m_pattern.m_body->m_callFrameSize, m_pattern.m_body->m_alternatives[0]->onceThrough());
+ emitDisjunction(m_pattern.m_body);
+ regexEnd();
+
+ return adoptPtr(new BytecodePattern(m_bodyDisjunction.release(), m_allParenthesesInfo, m_pattern, allocator));
+ }
+
+ void checkInput(unsigned count)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::CheckInput(count));
+ }
+
+ void uncheckInput(unsigned count)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::UncheckInput(count));
+ }
+
+ void assertionBOL(unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::BOL(inputPosition));
+ }
+
+ void assertionEOL(unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::EOL(inputPosition));
+ }
+
+ void assertionWordBoundary(bool invert, unsigned inputPosition)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::WordBoundary(invert, inputPosition));
+ }
+
+ void atomPatternCharacter(UChar ch, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ if (m_pattern.m_ignoreCase) {
+ UChar lo = Unicode::toLower(ch);
+ UChar hi = Unicode::toUpper(ch);
+
+ if (lo != hi) {
+ m_bodyDisjunction->terms.append(ByteTerm(lo, hi, inputPosition, frameLocation, quantityCount, quantityType));
+ return;
+ }
+ }
+
+ m_bodyDisjunction->terms.append(ByteTerm(ch, inputPosition, frameLocation, quantityCount, quantityType));
+ }
+
+ void atomCharacterClass(CharacterClass* characterClass, bool invert, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm(characterClass, invert, inputPosition));
+
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ }
+
+ void atomBackReference(unsigned subpatternId, unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ ASSERT(subpatternId);
+
+ m_bodyDisjunction->terms.append(ByteTerm::BackReference(subpatternId, inputPosition));
+
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesOnceBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParenthesesTerminalBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternTerminalBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParenthesesSubpatternBegin(unsigned subpatternId, bool capture, unsigned inputPosition, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ // Errrk! - this is a little crazy, we initially generate as a TypeParenthesesSubpatternOnceBegin,
+ // then fix this up at the end! - simplifying this should make it much clearer.
+ // https://bugs.webkit.org/show_bug.cgi?id=50136
+
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceBegin, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParentheticalAssertionBegin(unsigned subpatternId, bool invert, unsigned frameLocation, unsigned alternativeFrameLocation)
+ {
+ int beginTerm = m_bodyDisjunction->terms.size();
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParentheticalAssertionBegin, subpatternId, false, invert, 0));
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = frameLocation;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeBegin());
+ m_bodyDisjunction->terms[m_bodyDisjunction->terms.size() - 1].frameLocation = alternativeFrameLocation;
+
+ m_parenthesesStack.append(ParenthesesStackEntry(beginTerm, m_currentAlternativeIndex));
+ m_currentAlternativeIndex = beginTerm + 1;
+ }
+
+ void atomParentheticalAssertionEnd(unsigned inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParentheticalAssertionBegin);
+
+ bool invert = m_bodyDisjunction->terms[beginTerm].invert();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParentheticalAssertionEnd, subpatternId, false, invert, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void assertionDotStarEnclosure(bool bolAnchored, bool eolAnchored)
+ {
+ m_bodyDisjunction->terms.append(ByteTerm::DotStarEnclosure(bolAnchored, eolAnchored));
+ }
+
+ unsigned popParenthesesStack()
+ {
+ ASSERT(m_parenthesesStack.size());
+ int stackEnd = m_parenthesesStack.size() - 1;
+ unsigned beginTerm = m_parenthesesStack[stackEnd].beginTerm;
+ m_currentAlternativeIndex = m_parenthesesStack[stackEnd].savedAlternativeIndex;
+ m_parenthesesStack.shrink(stackEnd);
+
+ ASSERT(beginTerm < m_bodyDisjunction->terms.size());
+ ASSERT(m_currentAlternativeIndex < m_bodyDisjunction->terms.size());
+
+ return beginTerm;
+ }
+
+#ifndef NDEBUG
+ void dumpDisjunction(ByteDisjunction* disjunction)
+ {
+ dataLogF("ByteDisjunction(%p):\n\t", disjunction);
+ for (unsigned i = 0; i < disjunction->terms.size(); ++i)
+ dataLogF("{ %d } ", disjunction->terms[i].type);
+ dataLogF("\n");
+ }
+#endif
+
+ void closeAlternative(int beginTerm)
+ {
+ int origBeginTerm = beginTerm;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeAlternativeBegin);
+ int endIndex = m_bodyDisjunction->terms.size();
+
+ unsigned frameLocation = m_bodyDisjunction->terms[beginTerm].frameLocation;
+
+ if (!m_bodyDisjunction->terms[beginTerm].alternative.next)
+ m_bodyDisjunction->terms.remove(beginTerm);
+ else {
+ while (m_bodyDisjunction->terms[beginTerm].alternative.next) {
+ beginTerm += m_bodyDisjunction->terms[beginTerm].alternative.next;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeAlternativeDisjunction);
+ m_bodyDisjunction->terms[beginTerm].alternative.end = endIndex - beginTerm;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ m_bodyDisjunction->terms[beginTerm].alternative.next = origBeginTerm - beginTerm;
+
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeEnd());
+ m_bodyDisjunction->terms[endIndex].frameLocation = frameLocation;
+ }
+ }
+
+ void closeBodyAlternative()
+ {
+ int beginTerm = 0;
+ int origBeginTerm = 0;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeBodyAlternativeBegin);
+ int endIndex = m_bodyDisjunction->terms.size();
+
+ unsigned frameLocation = m_bodyDisjunction->terms[beginTerm].frameLocation;
+
+ while (m_bodyDisjunction->terms[beginTerm].alternative.next) {
+ beginTerm += m_bodyDisjunction->terms[beginTerm].alternative.next;
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeBodyAlternativeDisjunction);
+ m_bodyDisjunction->terms[beginTerm].alternative.end = endIndex - beginTerm;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ m_bodyDisjunction->terms[beginTerm].alternative.next = origBeginTerm - beginTerm;
+
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeEnd());
+ m_bodyDisjunction->terms[endIndex].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesSubpatternEnd(unsigned lastSubpatternId, int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType, unsigned callFrameSize = 0)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+
+ ByteTerm& parenthesesBegin = m_bodyDisjunction->terms[beginTerm];
+
+ bool capture = parenthesesBegin.capture();
+ unsigned subpatternId = parenthesesBegin.atom.subpatternId;
+
+ unsigned numSubpatterns = lastSubpatternId - subpatternId + 1;
+ OwnPtr<ByteDisjunction> parenthesesDisjunction = adoptPtr(new ByteDisjunction(numSubpatterns, callFrameSize));
+
+ unsigned firstTermInParentheses = beginTerm + 1;
+ parenthesesDisjunction->terms.reserveInitialCapacity(endTerm - firstTermInParentheses + 2);
+
+ parenthesesDisjunction->terms.append(ByteTerm::SubpatternBegin());
+ for (unsigned termInParentheses = firstTermInParentheses; termInParentheses < endTerm; ++termInParentheses)
+ parenthesesDisjunction->terms.append(m_bodyDisjunction->terms[termInParentheses]);
+ parenthesesDisjunction->terms.append(ByteTerm::SubpatternEnd());
+
+ m_bodyDisjunction->terms.shrink(beginTerm);
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpattern, subpatternId, parenthesesDisjunction.get(), capture, inputPosition));
+ m_allParenthesesInfo.append(parenthesesDisjunction.release());
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[beginTerm].frameLocation = frameLocation;
+ }
+
+ void atomParenthesesOnceEnd(int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternOnceBegin);
+
+ bool capture = m_bodyDisjunction->terms[beginTerm].capture();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternOnceEnd, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void atomParenthesesTerminalEnd(int inputPosition, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ {
+ unsigned beginTerm = popParenthesesStack();
+ closeAlternative(beginTerm + 1);
+ unsigned endTerm = m_bodyDisjunction->terms.size();
+
+ ASSERT(m_bodyDisjunction->terms[beginTerm].type == ByteTerm::TypeParenthesesSubpatternTerminalBegin);
+
+ bool capture = m_bodyDisjunction->terms[beginTerm].capture();
+ unsigned subpatternId = m_bodyDisjunction->terms[beginTerm].atom.subpatternId;
+
+ m_bodyDisjunction->terms.append(ByteTerm(ByteTerm::TypeParenthesesSubpatternTerminalEnd, subpatternId, capture, false, inputPosition));
+ m_bodyDisjunction->terms[beginTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].atom.parenthesesWidth = endTerm - beginTerm;
+ m_bodyDisjunction->terms[endTerm].frameLocation = frameLocation;
+
+ m_bodyDisjunction->terms[beginTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[beginTerm].atom.quantityType = quantityType;
+ m_bodyDisjunction->terms[endTerm].atom.quantityCount = quantityCount.unsafeGet();
+ m_bodyDisjunction->terms[endTerm].atom.quantityType = quantityType;
+ }
+
+ void regexBegin(unsigned numSubpatterns, unsigned callFrameSize, bool onceThrough)
+ {
+ m_bodyDisjunction = adoptPtr(new ByteDisjunction(numSubpatterns, callFrameSize));
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeBegin(onceThrough));
+ m_bodyDisjunction->terms[0].frameLocation = 0;
+ m_currentAlternativeIndex = 0;
+ }
+
+ void regexEnd()
+ {
+ closeBodyAlternative();
+ }
+
+ void alternativeBodyDisjunction(bool onceThrough)
+ {
+ int newAlternativeIndex = m_bodyDisjunction->terms.size();
+ m_bodyDisjunction->terms[m_currentAlternativeIndex].alternative.next = newAlternativeIndex - m_currentAlternativeIndex;
+ m_bodyDisjunction->terms.append(ByteTerm::BodyAlternativeDisjunction(onceThrough));
+
+ m_currentAlternativeIndex = newAlternativeIndex;
+ }
+
+ void alternativeDisjunction()
+ {
+ int newAlternativeIndex = m_bodyDisjunction->terms.size();
+ m_bodyDisjunction->terms[m_currentAlternativeIndex].alternative.next = newAlternativeIndex - m_currentAlternativeIndex;
+ m_bodyDisjunction->terms.append(ByteTerm::AlternativeDisjunction());
+
+ m_currentAlternativeIndex = newAlternativeIndex;
+ }
+
+ void emitDisjunction(PatternDisjunction* disjunction, unsigned inputCountAlreadyChecked = 0, unsigned parenthesesInputCountAlreadyChecked = 0)
+ {
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ unsigned currentCountAlreadyChecked = inputCountAlreadyChecked;
+
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+
+ if (alt) {
+ if (disjunction == m_pattern.m_body)
+ alternativeBodyDisjunction(alternative->onceThrough());
+ else
+ alternativeDisjunction();
+ }
+
+ unsigned minimumSize = alternative->m_minimumSize;
+ ASSERT(minimumSize >= parenthesesInputCountAlreadyChecked);
+ unsigned countToCheck = minimumSize - parenthesesInputCountAlreadyChecked;
+
+ if (countToCheck) {
+ checkInput(countToCheck);
+ currentCountAlreadyChecked += countToCheck;
+ }
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+
+ switch (term.type) {
+ case PatternTerm::TypeAssertionBOL:
+ assertionBOL(currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ assertionEOL(currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ assertionWordBoundary(term.invert(), currentCountAlreadyChecked - term.inputPosition);
+ break;
+
+ case PatternTerm::TypePatternCharacter:
+ atomPatternCharacter(term.patternCharacter, currentCountAlreadyChecked - term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ atomCharacterClass(term.characterClass, term.invert(), currentCountAlreadyChecked- term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeBackReference:
+ atomBackReference(term.backReferenceSubpatternId, currentCountAlreadyChecked - term.inputPosition, term.frameLocation, term.quantityCount, term.quantityType);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern: {
+ unsigned disjunctionAlreadyCheckedCount = 0;
+ if (term.quantityCount == 1 && !term.parentheses.isCopy) {
+ unsigned alternativeFrameLocation = term.frameLocation;
+ // For QuantifierFixedCount we pre-check the minimum size; for greedy/non-greedy we reserve a slot in the frame.
+ if (term.quantityType == QuantifierFixedCount)
+ disjunctionAlreadyCheckedCount = term.parentheses.disjunction->m_minimumSize;
+ else
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesOnceBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, alternativeFrameLocation);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, disjunctionAlreadyCheckedCount);
+ atomParenthesesOnceEnd(delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType);
+ } else if (term.parentheses.isTerminal) {
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesTerminalBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, term.frameLocation + YarrStackSpaceForBackTrackInfoParenthesesOnce);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, disjunctionAlreadyCheckedCount);
+ atomParenthesesTerminalEnd(delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType);
+ } else {
+ unsigned delegateEndInputOffset = term.inputPosition - currentCountAlreadyChecked;
+ atomParenthesesSubpatternBegin(term.parentheses.subpatternId, term.capture(), disjunctionAlreadyCheckedCount - delegateEndInputOffset, term.frameLocation, 0);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, 0);
+ atomParenthesesSubpatternEnd(term.parentheses.lastSubpatternId, delegateEndInputOffset, term.frameLocation, term.quantityCount, term.quantityType, term.parentheses.disjunction->m_callFrameSize);
+ }
+ break;
+ }
+
+ case PatternTerm::TypeParentheticalAssertion: {
+ unsigned alternativeFrameLocation = term.frameLocation + YarrStackSpaceForBackTrackInfoParentheticalAssertion;
+
+ ASSERT(currentCountAlreadyChecked >= static_cast<unsigned>(term.inputPosition));
+ unsigned positiveInputOffset = currentCountAlreadyChecked - static_cast<unsigned>(term.inputPosition);
+ unsigned uncheckAmount = 0;
+ if (positiveInputOffset > term.parentheses.disjunction->m_minimumSize) {
+ uncheckAmount = positiveInputOffset - term.parentheses.disjunction->m_minimumSize;
+ uncheckInput(uncheckAmount);
+ currentCountAlreadyChecked -= uncheckAmount;
+ }
+
+ atomParentheticalAssertionBegin(term.parentheses.subpatternId, term.invert(), term.frameLocation, alternativeFrameLocation);
+ emitDisjunction(term.parentheses.disjunction, currentCountAlreadyChecked, positiveInputOffset - uncheckAmount);
+ atomParentheticalAssertionEnd(0, term.frameLocation, term.quantityCount, term.quantityType);
+ if (uncheckAmount) {
+ checkInput(uncheckAmount);
+ currentCountAlreadyChecked += uncheckAmount;
+ }
+ break;
+ }
+
+ case PatternTerm::TypeDotStarEnclosure:
+ assertionDotStarEnclosure(term.anchors.bolAnchor, term.anchors.eolAnchor);
+ break;
+ }
+ }
+ }
+ }
+
+private:
+ YarrPattern& m_pattern;
+ OwnPtr<ByteDisjunction> m_bodyDisjunction;
+ unsigned m_currentAlternativeIndex;
+ Vector<ParenthesesStackEntry> m_parenthesesStack;
+ Vector<OwnPtr<ByteDisjunction> > m_allParenthesesInfo;
+};
+
+PassOwnPtr<BytecodePattern> byteCompile(YarrPattern& pattern, BumpPointerAllocator* allocator)
+{
+ return ByteCompiler(pattern).compile(allocator);
+}
+
+unsigned interpret(BytecodePattern* bytecode, const String& input, unsigned start, unsigned* output)
+{
+ if (input.is8Bit())
+ return Interpreter<LChar>(bytecode, output, input.characters8(), input.length(), start).interpret();
+ return Interpreter<UChar>(bytecode, output, input.characters16(), input.length(), start).interpret();
+}
+
+unsigned interpret(BytecodePattern* bytecode, const LChar* input, unsigned length, unsigned start, unsigned* output)
+{
+ return Interpreter<LChar>(bytecode, output, input, length, start).interpret();
+}
+
+unsigned interpret(BytecodePattern* bytecode, const UChar* input, unsigned length, unsigned start, unsigned* output)
+{
+ return Interpreter<UChar>(bytecode, output, input, length, start).interpret();
+}
+
+// These should be the same for both UChar & LChar.
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoPatternCharacter) == (YarrStackSpaceForBackTrackInfoPatternCharacter * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoPatternCharacter);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoCharacterClass) == (YarrStackSpaceForBackTrackInfoCharacterClass * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoCharacterClass);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoBackReference) == (YarrStackSpaceForBackTrackInfoBackReference * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoBackReference);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoAlternative) == (YarrStackSpaceForBackTrackInfoAlternative * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoAlternative);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParentheticalAssertion) == (YarrStackSpaceForBackTrackInfoParentheticalAssertion * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParentheticalAssertion);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParenthesesOnce) == (YarrStackSpaceForBackTrackInfoParenthesesOnce * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParenthesesOnce);
+COMPILE_ASSERT(sizeof(Interpreter<UChar>::BackTrackInfoParentheses) == (YarrStackSpaceForBackTrackInfoParentheses * sizeof(uintptr_t)), CheckYarrStackSpaceForBackTrackInfoParentheses);
+
+
+} }
diff --git a/src/3rdparty/masm/yarr/YarrInterpreter.h b/src/3rdparty/masm/yarr/YarrInterpreter.h
new file mode 100644
index 0000000000..3b44acbd2b
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrInterpreter.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrInterpreter_h
+#define YarrInterpreter_h
+
+#include "YarrPattern.h"
+#include <wtf/PassOwnPtr.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace WTF {
+class BumpPointerAllocator;
+}
+using WTF::BumpPointerAllocator;
+
+namespace JSC { namespace Yarr {
+
+class ByteDisjunction;
+
+struct ByteTerm {
+ enum Type {
+ TypeBodyAlternativeBegin,
+ TypeBodyAlternativeDisjunction,
+ TypeBodyAlternativeEnd,
+ TypeAlternativeBegin,
+ TypeAlternativeDisjunction,
+ TypeAlternativeEnd,
+ TypeSubpatternBegin,
+ TypeSubpatternEnd,
+ TypeAssertionBOL,
+ TypeAssertionEOL,
+ TypeAssertionWordBoundary,
+ TypePatternCharacterOnce,
+ TypePatternCharacterFixed,
+ TypePatternCharacterGreedy,
+ TypePatternCharacterNonGreedy,
+ TypePatternCasedCharacterOnce,
+ TypePatternCasedCharacterFixed,
+ TypePatternCasedCharacterGreedy,
+ TypePatternCasedCharacterNonGreedy,
+ TypeCharacterClass,
+ TypeBackReference,
+ TypeParenthesesSubpattern,
+ TypeParenthesesSubpatternOnceBegin,
+ TypeParenthesesSubpatternOnceEnd,
+ TypeParenthesesSubpatternTerminalBegin,
+ TypeParenthesesSubpatternTerminalEnd,
+ TypeParentheticalAssertionBegin,
+ TypeParentheticalAssertionEnd,
+ TypeCheckInput,
+ TypeUncheckInput,
+ TypeDotStarEnclosure,
+ } type;
+ union {
+ struct {
+ union {
+ UChar patternCharacter;
+ struct {
+ UChar lo;
+ UChar hi;
+ } casedCharacter;
+ CharacterClass* characterClass;
+ unsigned subpatternId;
+ };
+ union {
+ ByteDisjunction* parenthesesDisjunction;
+ unsigned parenthesesWidth;
+ };
+ QuantifierType quantityType;
+ unsigned quantityCount;
+ } atom;
+ struct {
+ int next;
+ int end;
+ bool onceThrough;
+ } alternative;
+ struct {
+ bool m_bol : 1;
+ bool m_eol : 1;
+ } anchors;
+ unsigned checkInputCount;
+ };
+ unsigned frameLocation;
+ bool m_capture : 1;
+ bool m_invert : 1;
+ unsigned inputPosition;
+
+ ByteTerm(UChar ch, int inputPos, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ : frameLocation(frameLocation)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ switch (quantityType) {
+ case QuantifierFixedCount:
+ type = (quantityCount == 1) ? ByteTerm::TypePatternCharacterOnce : ByteTerm::TypePatternCharacterFixed;
+ break;
+ case QuantifierGreedy:
+ type = ByteTerm::TypePatternCharacterGreedy;
+ break;
+ case QuantifierNonGreedy:
+ type = ByteTerm::TypePatternCharacterNonGreedy;
+ break;
+ }
+
+ atom.patternCharacter = ch;
+ atom.quantityType = quantityType;
+ atom.quantityCount = quantityCount.unsafeGet();
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(UChar lo, UChar hi, int inputPos, unsigned frameLocation, Checked<unsigned> quantityCount, QuantifierType quantityType)
+ : frameLocation(frameLocation)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ switch (quantityType) {
+ case QuantifierFixedCount:
+ type = (quantityCount == 1) ? ByteTerm::TypePatternCasedCharacterOnce : ByteTerm::TypePatternCasedCharacterFixed;
+ break;
+ case QuantifierGreedy:
+ type = ByteTerm::TypePatternCasedCharacterGreedy;
+ break;
+ case QuantifierNonGreedy:
+ type = ByteTerm::TypePatternCasedCharacterNonGreedy;
+ break;
+ }
+
+ atom.casedCharacter.lo = lo;
+ atom.casedCharacter.hi = hi;
+ atom.quantityType = quantityType;
+ atom.quantityCount = quantityCount.unsafeGet();
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(CharacterClass* characterClass, bool invert, int inputPos)
+ : type(ByteTerm::TypeCharacterClass)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ atom.characterClass = characterClass;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(Type type, unsigned subpatternId, ByteDisjunction* parenthesesInfo, bool capture, int inputPos)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(false)
+ {
+ atom.subpatternId = subpatternId;
+ atom.parenthesesDisjunction = parenthesesInfo;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ ByteTerm(Type type, bool invert = false)
+ : type(type)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ }
+
+ ByteTerm(Type type, unsigned subpatternId, bool capture, bool invert, int inputPos)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(invert)
+ {
+ atom.subpatternId = subpatternId;
+ atom.quantityType = QuantifierFixedCount;
+ atom.quantityCount = 1;
+ inputPosition = inputPos;
+ }
+
+ static ByteTerm BOL(int inputPos)
+ {
+ ByteTerm term(TypeAssertionBOL);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm CheckInput(Checked<unsigned> count)
+ {
+ ByteTerm term(TypeCheckInput);
+ term.checkInputCount = count.unsafeGet();
+ return term;
+ }
+
+ static ByteTerm UncheckInput(Checked<unsigned> count)
+ {
+ ByteTerm term(TypeUncheckInput);
+ term.checkInputCount = count.unsafeGet();
+ return term;
+ }
+
+ static ByteTerm EOL(int inputPos)
+ {
+ ByteTerm term(TypeAssertionEOL);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm WordBoundary(bool invert, int inputPos)
+ {
+ ByteTerm term(TypeAssertionWordBoundary, invert);
+ term.inputPosition = inputPos;
+ return term;
+ }
+
+ static ByteTerm BackReference(unsigned subpatternId, int inputPos)
+ {
+ return ByteTerm(TypeBackReference, subpatternId, false, false, inputPos);
+ }
+
+ static ByteTerm BodyAlternativeBegin(bool onceThrough)
+ {
+ ByteTerm term(TypeBodyAlternativeBegin);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = onceThrough;
+ return term;
+ }
+
+ static ByteTerm BodyAlternativeDisjunction(bool onceThrough)
+ {
+ ByteTerm term(TypeBodyAlternativeDisjunction);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = onceThrough;
+ return term;
+ }
+
+ static ByteTerm BodyAlternativeEnd()
+ {
+ ByteTerm term(TypeBodyAlternativeEnd);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeBegin()
+ {
+ ByteTerm term(TypeAlternativeBegin);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeDisjunction()
+ {
+ ByteTerm term(TypeAlternativeDisjunction);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm AlternativeEnd()
+ {
+ ByteTerm term(TypeAlternativeEnd);
+ term.alternative.next = 0;
+ term.alternative.end = 0;
+ term.alternative.onceThrough = false;
+ return term;
+ }
+
+ static ByteTerm SubpatternBegin()
+ {
+ return ByteTerm(TypeSubpatternBegin);
+ }
+
+ static ByteTerm SubpatternEnd()
+ {
+ return ByteTerm(TypeSubpatternEnd);
+ }
+
+ static ByteTerm DotStarEnclosure(bool bolAnchor, bool eolAnchor)
+ {
+ ByteTerm term(TypeDotStarEnclosure);
+ term.anchors.m_bol = bolAnchor;
+ term.anchors.m_eol = eolAnchor;
+ return term;
+ }
+
+ bool invert()
+ {
+ return m_invert;
+ }
+
+ bool capture()
+ {
+ return m_capture;
+ }
+};
+
+class ByteDisjunction {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ByteDisjunction(unsigned numSubpatterns, unsigned frameSize)
+ : m_numSubpatterns(numSubpatterns)
+ , m_frameSize(frameSize)
+ {
+ }
+
+ Vector<ByteTerm> terms;
+ unsigned m_numSubpatterns;
+ unsigned m_frameSize;
+};
+
+struct BytecodePattern {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ BytecodePattern(PassOwnPtr<ByteDisjunction> body, Vector<OwnPtr<ByteDisjunction> >& parenthesesInfoToAdopt, YarrPattern& pattern, BumpPointerAllocator* allocator)
+ : m_body(body)
+ , m_ignoreCase(pattern.m_ignoreCase)
+ , m_multiline(pattern.m_multiline)
+ , m_allocator(allocator)
+ {
+ m_body->terms.shrinkToFit();
+
+ newlineCharacterClass = pattern.newlineCharacterClass();
+ wordcharCharacterClass = pattern.wordcharCharacterClass();
+
+ m_allParenthesesInfo.swap(parenthesesInfoToAdopt);
+ m_allParenthesesInfo.shrinkToFit();
+
+ m_userCharacterClasses.swap(pattern.m_userCharacterClasses);
+ m_userCharacterClasses.shrinkToFit();
+ }
+
+ OwnPtr<ByteDisjunction> m_body;
+ bool m_ignoreCase;
+ bool m_multiline;
+ // Each BytecodePattern is associated with a RegExp, each RegExp is associated
+ // with a JSGlobalData. Cache a pointer to out JSGlobalData's m_regExpAllocator.
+ BumpPointerAllocator* m_allocator;
+
+ CharacterClass* newlineCharacterClass;
+ CharacterClass* wordcharCharacterClass;
+
+private:
+ Vector<OwnPtr<ByteDisjunction> > m_allParenthesesInfo;
+ Vector<OwnPtr<CharacterClass> > m_userCharacterClasses;
+};
+
+JS_EXPORT_PRIVATE PassOwnPtr<BytecodePattern> byteCompile(YarrPattern&, BumpPointerAllocator*);
+JS_EXPORT_PRIVATE unsigned interpret(BytecodePattern*, const String& input, unsigned start, unsigned* output);
+unsigned interpret(BytecodePattern*, const LChar* input, unsigned length, unsigned start, unsigned* output);
+unsigned interpret(BytecodePattern*, const UChar* input, unsigned length, unsigned start, unsigned* output);
+
+} } // namespace JSC::Yarr
+
+#endif // YarrInterpreter_h
diff --git a/src/3rdparty/masm/yarr/YarrJIT.cpp b/src/3rdparty/masm/yarr/YarrJIT.cpp
new file mode 100644
index 0000000000..20b26c1eb9
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrJIT.cpp
@@ -0,0 +1,2702 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrJIT.h"
+
+#include <wtf/ASCIICType.h>
+#include "LinkBuffer.h"
+#include "Options.h"
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+
+#if ENABLE(YARR_JIT)
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+template<YarrJITCompileMode compileMode>
+class YarrGenerator : private MacroAssembler {
+ friend void jitCompile(JSGlobalData*, YarrCodeBlock& jitObject, const String& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
+
+#if CPU(ARM)
+ static const RegisterID input = ARMRegisters::r0;
+ static const RegisterID index = ARMRegisters::r1;
+ static const RegisterID length = ARMRegisters::r2;
+ static const RegisterID output = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r5;
+ static const RegisterID regT1 = ARMRegisters::r6;
+
+ static const RegisterID returnRegister = ARMRegisters::r0;
+ static const RegisterID returnRegister2 = ARMRegisters::r1;
+#elif CPU(MIPS)
+ static const RegisterID input = MIPSRegisters::a0;
+ static const RegisterID index = MIPSRegisters::a1;
+ static const RegisterID length = MIPSRegisters::a2;
+ static const RegisterID output = MIPSRegisters::a3;
+
+ static const RegisterID regT0 = MIPSRegisters::t4;
+ static const RegisterID regT1 = MIPSRegisters::t5;
+
+ static const RegisterID returnRegister = MIPSRegisters::v0;
+ static const RegisterID returnRegister2 = MIPSRegisters::v1;
+#elif CPU(SH4)
+ static const RegisterID input = SH4Registers::r4;
+ static const RegisterID index = SH4Registers::r5;
+ static const RegisterID length = SH4Registers::r6;
+ static const RegisterID output = SH4Registers::r7;
+
+ static const RegisterID regT0 = SH4Registers::r0;
+ static const RegisterID regT1 = SH4Registers::r1;
+
+ static const RegisterID returnRegister = SH4Registers::r0;
+ static const RegisterID returnRegister2 = SH4Registers::r1;
+#elif CPU(X86)
+ static const RegisterID input = X86Registers::eax;
+ static const RegisterID index = X86Registers::edx;
+ static const RegisterID length = X86Registers::ecx;
+ static const RegisterID output = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::ebx;
+ static const RegisterID regT1 = X86Registers::esi;
+
+ static const RegisterID returnRegister = X86Registers::eax;
+ static const RegisterID returnRegister2 = X86Registers::edx;
+#elif CPU(X86_64)
+#if !OS(WINDOWS)
+ static const RegisterID input = X86Registers::edi;
+ static const RegisterID index = X86Registers::esi;
+ static const RegisterID length = X86Registers::edx;
+ static const RegisterID output = X86Registers::ecx;
+#else
+ // If the return value doesn't fit in 64bits, its destination is pointed by rcx and the parameters are shifted.
+ // http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ COMPILE_ASSERT(sizeof(MatchResult) > sizeof(void*), MatchResult_does_not_fit_in_64bits);
+ static const RegisterID input = X86Registers::edx;
+ static const RegisterID index = X86Registers::r8;
+ static const RegisterID length = X86Registers::r9;
+ static const RegisterID output = X86Registers::r10;
+#endif
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::ebx;
+
+ static const RegisterID returnRegister = X86Registers::eax;
+ static const RegisterID returnRegister2 = X86Registers::edx;
+#endif
+
+ void optimizeAlternative(PatternAlternative* alternative)
+ {
+ if (!alternative->m_terms.size())
+ return;
+
+ for (unsigned i = 0; i < alternative->m_terms.size() - 1; ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+ PatternTerm& nextTerm = alternative->m_terms[i + 1];
+
+ if ((term.type == PatternTerm::TypeCharacterClass)
+ && (term.quantityType == QuantifierFixedCount)
+ && (nextTerm.type == PatternTerm::TypePatternCharacter)
+ && (nextTerm.quantityType == QuantifierFixedCount)) {
+ PatternTerm termCopy = term;
+ alternative->m_terms[i] = nextTerm;
+ alternative->m_terms[i + 1] = termCopy;
+ }
+ }
+ }
+
+ void matchCharacterClassRange(RegisterID character, JumpList& failures, JumpList& matchDest, const CharacterRange* ranges, unsigned count, unsigned* matchIndex, const UChar* matches, unsigned matchCount)
+ {
+ do {
+ // pick which range we're going to generate
+ int which = count >> 1;
+ char lo = ranges[which].begin;
+ char hi = ranges[which].end;
+
+ // check if there are any ranges or matches below lo. If not, just jl to failure -
+ // if there is anything else to check, check that first, if it falls through jmp to failure.
+ if ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) {
+ Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo));
+
+ // generate code for all ranges before this one
+ if (which)
+ matchCharacterClassRange(character, failures, matchDest, ranges, which, matchIndex, matches, matchCount);
+
+ while ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) {
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)matches[*matchIndex])));
+ ++*matchIndex;
+ }
+ failures.append(jump());
+
+ loOrAbove.link(this);
+ } else if (which) {
+ Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo));
+
+ matchCharacterClassRange(character, failures, matchDest, ranges, which, matchIndex, matches, matchCount);
+ failures.append(jump());
+
+ loOrAbove.link(this);
+ } else
+ failures.append(branch32(LessThan, character, Imm32((unsigned short)lo)));
+
+ while ((*matchIndex < matchCount) && (matches[*matchIndex] <= hi))
+ ++*matchIndex;
+
+ matchDest.append(branch32(LessThanOrEqual, character, Imm32((unsigned short)hi)));
+ // fall through to here, the value is above hi.
+
+ // shuffle along & loop around if there are any more matches to handle.
+ unsigned next = which + 1;
+ ranges += next;
+ count -= next;
+ } while (count);
+ }
+
+ void matchCharacterClass(RegisterID character, JumpList& matchDest, const CharacterClass* charClass)
+ {
+ if (charClass->m_table) {
+ ExtendedAddress tableEntry(character, reinterpret_cast<intptr_t>(charClass->m_table));
+ matchDest.append(branchTest8(charClass->m_tableInverted ? Zero : NonZero, tableEntry));
+ return;
+ }
+ Jump unicodeFail;
+ if (charClass->m_matchesUnicode.size() || charClass->m_rangesUnicode.size()) {
+ Jump isAscii = branch32(LessThanOrEqual, character, TrustedImm32(0x7f));
+
+ if (charClass->m_matchesUnicode.size()) {
+ for (unsigned i = 0; i < charClass->m_matchesUnicode.size(); ++i) {
+ UChar ch = charClass->m_matchesUnicode[i];
+ matchDest.append(branch32(Equal, character, Imm32(ch)));
+ }
+ }
+
+ if (charClass->m_rangesUnicode.size()) {
+ for (unsigned i = 0; i < charClass->m_rangesUnicode.size(); ++i) {
+ UChar lo = charClass->m_rangesUnicode[i].begin;
+ UChar hi = charClass->m_rangesUnicode[i].end;
+
+ Jump below = branch32(LessThan, character, Imm32(lo));
+ matchDest.append(branch32(LessThanOrEqual, character, Imm32(hi)));
+ below.link(this);
+ }
+ }
+
+ unicodeFail = jump();
+ isAscii.link(this);
+ }
+
+ if (charClass->m_ranges.size()) {
+ unsigned matchIndex = 0;
+ JumpList failures;
+ matchCharacterClassRange(character, failures, matchDest, charClass->m_ranges.begin(), charClass->m_ranges.size(), &matchIndex, charClass->m_matches.begin(), charClass->m_matches.size());
+ while (matchIndex < charClass->m_matches.size())
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)charClass->m_matches[matchIndex++])));
+
+ failures.link(this);
+ } else if (charClass->m_matches.size()) {
+ // optimization: gather 'a','A' etc back together, can mask & test once.
+ Vector<char> matchesAZaz;
+
+ for (unsigned i = 0; i < charClass->m_matches.size(); ++i) {
+ char ch = charClass->m_matches[i];
+ if (m_pattern.m_ignoreCase) {
+ if (isASCIILower(ch)) {
+ matchesAZaz.append(ch);
+ continue;
+ }
+ if (isASCIIUpper(ch))
+ continue;
+ }
+ matchDest.append(branch32(Equal, character, Imm32((unsigned short)ch)));
+ }
+
+ if (unsigned countAZaz = matchesAZaz.size()) {
+ or32(TrustedImm32(32), character);
+ for (unsigned i = 0; i < countAZaz; ++i)
+ matchDest.append(branch32(Equal, character, TrustedImm32(matchesAZaz[i])));
+ }
+ }
+
+ if (charClass->m_matchesUnicode.size() || charClass->m_rangesUnicode.size())
+ unicodeFail.link(this);
+ }
+
+ // Jumps if input not available; will have (incorrectly) incremented already!
+ Jump jumpIfNoAvailableInput(unsigned countToCheck = 0)
+ {
+ if (countToCheck)
+ add32(Imm32(countToCheck), index);
+ return branch32(Above, index, length);
+ }
+
+ Jump jumpIfAvailableInput(unsigned countToCheck)
+ {
+ add32(Imm32(countToCheck), index);
+ return branch32(BelowOrEqual, index, length);
+ }
+
+ Jump checkInput()
+ {
+ return branch32(BelowOrEqual, index, length);
+ }
+
+ Jump atEndOfInput()
+ {
+ return branch32(Equal, index, length);
+ }
+
+ Jump notAtEndOfInput()
+ {
+ return branch32(NotEqual, index, length);
+ }
+
+ Jump jumpIfCharNotEquals(UChar ch, int inputPosition, RegisterID character)
+ {
+ readCharacter(inputPosition, character);
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch)) {
+ or32(TrustedImm32(0x20), character);
+ ch |= 0x20;
+ }
+
+ return branch32(NotEqual, character, Imm32(ch));
+ }
+
+ void readCharacter(int inputPosition, RegisterID reg)
+ {
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, index, TimesOne, inputPosition * sizeof(char)), reg);
+ else
+ load16(BaseIndex(input, index, TimesTwo, inputPosition * sizeof(UChar)), reg);
+ }
+
+ void storeToFrame(RegisterID reg, unsigned frameLocation)
+ {
+ poke(reg, frameLocation);
+ }
+
+ void storeToFrame(TrustedImm32 imm, unsigned frameLocation)
+ {
+ poke(imm, frameLocation);
+ }
+
+ DataLabelPtr storeToFrameWithPatch(unsigned frameLocation)
+ {
+ return storePtrWithPatch(TrustedImmPtr(0), Address(stackPointerRegister, frameLocation * sizeof(void*)));
+ }
+
+ void loadFromFrame(unsigned frameLocation, RegisterID reg)
+ {
+ peek(reg, frameLocation);
+ }
+
+ void loadFromFrameAndJump(unsigned frameLocation)
+ {
+ jump(Address(stackPointerRegister, frameLocation * sizeof(void*)));
+ }
+
+ void initCallFrame()
+ {
+ unsigned callFrameSize = m_pattern.m_body->m_callFrameSize;
+ if (callFrameSize)
+ subPtr(Imm32(callFrameSize * sizeof(void*)), stackPointerRegister);
+ }
+ void removeCallFrame()
+ {
+ unsigned callFrameSize = m_pattern.m_body->m_callFrameSize;
+ if (callFrameSize)
+ addPtr(Imm32(callFrameSize * sizeof(void*)), stackPointerRegister);
+ }
+
+ // Used to record subpatters, should only be called if compileMode is IncludeSubpatterns.
+ void setSubpatternStart(RegisterID reg, unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(reg, Address(output, (subpattern << 1) * sizeof(int)));
+ }
+ void setSubpatternEnd(RegisterID reg, unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(reg, Address(output, ((subpattern << 1) + 1) * sizeof(int)));
+ }
+ void clearSubpatternStart(unsigned subpattern)
+ {
+ ASSERT(subpattern);
+ // FIXME: should be able to ASSERT(compileMode == IncludeSubpatterns), but then this function is conditionally NORETURN. :-(
+ store32(TrustedImm32(-1), Address(output, (subpattern << 1) * sizeof(int)));
+ }
+
+ // We use one of three different strategies to track the start of the current match,
+ // while matching.
+ // 1) If the pattern has a fixed size, do nothing! - we calculate the value lazily
+ // at the end of matching. This is irrespective of compileMode, and in this case
+ // these methods should never be called.
+ // 2) If we're compiling IncludeSubpatterns, 'output' contains a pointer to an output
+ // vector, store the match start in the output vector.
+ // 3) If we're compiling MatchOnly, 'output' is unused, store the match start directly
+ // in this register.
+ void setMatchStart(RegisterID reg)
+ {
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ if (compileMode == IncludeSubpatterns)
+ store32(reg, output);
+ else
+ move(reg, output);
+ }
+ void getMatchStart(RegisterID reg)
+ {
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ if (compileMode == IncludeSubpatterns)
+ load32(output, reg);
+ else
+ move(output, reg);
+ }
+
+ enum YarrOpCode {
+ // These nodes wrap body alternatives - those in the main disjunction,
+ // rather than subpatterns or assertions. These are chained together in
+ // a doubly linked list, with a 'begin' node for the first alternative,
+ // a 'next' node for each subsequent alternative, and an 'end' node at
+ // the end. In the case of repeating alternatives, the 'end' node also
+ // has a reference back to 'begin'.
+ OpBodyAlternativeBegin,
+ OpBodyAlternativeNext,
+ OpBodyAlternativeEnd,
+ // Similar to the body alternatives, but used for subpatterns with two
+ // or more alternatives.
+ OpNestedAlternativeBegin,
+ OpNestedAlternativeNext,
+ OpNestedAlternativeEnd,
+ // Used for alternatives in subpatterns where there is only a single
+ // alternative (backtrackingis easier in these cases), or for alternatives
+ // which never need to be backtracked (those in parenthetical assertions,
+ // terminal subpatterns).
+ OpSimpleNestedAlternativeBegin,
+ OpSimpleNestedAlternativeNext,
+ OpSimpleNestedAlternativeEnd,
+ // Used to wrap 'Once' subpattern matches (quantityCount == 1).
+ OpParenthesesSubpatternOnceBegin,
+ OpParenthesesSubpatternOnceEnd,
+ // Used to wrap 'Terminal' subpattern matches (at the end of the regexp).
+ OpParenthesesSubpatternTerminalBegin,
+ OpParenthesesSubpatternTerminalEnd,
+ // Used to wrap parenthetical assertions.
+ OpParentheticalAssertionBegin,
+ OpParentheticalAssertionEnd,
+ // Wraps all simple terms (pattern characters, character classes).
+ OpTerm,
+ // Where an expression contains only 'once through' body alternatives
+ // and no repeating ones, this op is used to return match failure.
+ OpMatchFailed
+ };
+
+ // This structure is used to hold the compiled opcode information,
+ // including reference back to the original PatternTerm/PatternAlternatives,
+ // and JIT compilation data structures.
+ struct YarrOp {
+ explicit YarrOp(PatternTerm* term)
+ : m_op(OpTerm)
+ , m_term(term)
+ , m_isDeadCode(false)
+ {
+ }
+
+ explicit YarrOp(YarrOpCode op)
+ : m_op(op)
+ , m_isDeadCode(false)
+ {
+ }
+
+ // The operation, as a YarrOpCode, and also a reference to the PatternTerm.
+ YarrOpCode m_op;
+ PatternTerm* m_term;
+
+ // For alternatives, this holds the PatternAlternative and doubly linked
+ // references to this alternative's siblings. In the case of the
+ // OpBodyAlternativeEnd node at the end of a section of repeating nodes,
+ // m_nextOp will reference the OpBodyAlternativeBegin node of the first
+ // repeating alternative.
+ PatternAlternative* m_alternative;
+ size_t m_previousOp;
+ size_t m_nextOp;
+
+ // Used to record a set of Jumps out of the generated code, typically
+ // used for jumps out to backtracking code, and a single reentry back
+ // into the code for a node (likely where a backtrack will trigger
+ // rematching).
+ Label m_reentry;
+ JumpList m_jumps;
+
+ // Used for backtracking when the prior alternative did not consume any
+ // characters but matched.
+ Jump m_zeroLengthMatch;
+
+ // This flag is used to null out the second pattern character, when
+ // two are fused to match a pair together.
+ bool m_isDeadCode;
+
+ // Currently used in the case of some of the more complex management of
+ // 'm_checked', to cache the offset used in this alternative, to avoid
+ // recalculating it.
+ int m_checkAdjust;
+
+ // Used by OpNestedAlternativeNext/End to hold the pointer to the
+ // value that will be pushed into the pattern's frame to return to,
+ // upon backtracking back into the disjunction.
+ DataLabelPtr m_returnAddress;
+ };
+
+ // BacktrackingState
+ // This class encapsulates information about the state of code generation
+ // whilst generating the code for backtracking, when a term fails to match.
+ // Upon entry to code generation of the backtracking code for a given node,
+ // the Backtracking state will hold references to all control flow sources
+ // that are outputs in need of further backtracking from the prior node
+ // generated (which is the subsequent operation in the regular expression,
+ // and in the m_ops Vector, since we generated backtracking backwards).
+ // These references to control flow take the form of:
+ // - A jump list of jumps, to be linked to code that will backtrack them
+ // further.
+ // - A set of DataLabelPtr values, to be populated with values to be
+ // treated effectively as return addresses backtracking into complex
+ // subpatterns.
+ // - A flag indicating that the current sequence of generated code up to
+ // this point requires backtracking.
+ class BacktrackingState {
+ public:
+ BacktrackingState()
+ : m_pendingFallthrough(false)
+ {
+ }
+
+ // Add a jump or jumps, a return address, or set the flag indicating
+ // that the current 'fallthrough' control flow requires backtracking.
+ void append(const Jump& jump)
+ {
+ m_laterFailures.append(jump);
+ }
+ void append(JumpList& jumpList)
+ {
+ m_laterFailures.append(jumpList);
+ }
+ void append(const DataLabelPtr& returnAddress)
+ {
+ m_pendingReturns.append(returnAddress);
+ }
+ void fallthrough()
+ {
+ ASSERT(!m_pendingFallthrough);
+ m_pendingFallthrough = true;
+ }
+
+ // These methods clear the backtracking state, either linking to the
+ // current location, a provided label, or copying the backtracking out
+ // to a JumpList. All actions may require code generation to take place,
+ // and as such are passed a pointer to the assembler.
+ void link(MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ Label here(assembler);
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], here));
+ m_pendingReturns.clear();
+ }
+ m_laterFailures.link(assembler);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+ void linkTo(Label label, MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], label));
+ m_pendingReturns.clear();
+ }
+ if (m_pendingFallthrough)
+ assembler->jump(label);
+ m_laterFailures.linkTo(label, assembler);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+ void takeBacktracksToJumpList(JumpList& jumpList, MacroAssembler* assembler)
+ {
+ if (m_pendingReturns.size()) {
+ Label here(assembler);
+ for (unsigned i = 0; i < m_pendingReturns.size(); ++i)
+ m_backtrackRecords.append(ReturnAddressRecord(m_pendingReturns[i], here));
+ m_pendingReturns.clear();
+ m_pendingFallthrough = true;
+ }
+ if (m_pendingFallthrough)
+ jumpList.append(assembler->jump());
+ jumpList.append(m_laterFailures);
+ m_laterFailures.clear();
+ m_pendingFallthrough = false;
+ }
+
+ bool isEmpty()
+ {
+ return m_laterFailures.empty() && m_pendingReturns.isEmpty() && !m_pendingFallthrough;
+ }
+
+ // Called at the end of code generation to link all return addresses.
+ void linkDataLabels(LinkBuffer& linkBuffer)
+ {
+ ASSERT(isEmpty());
+ for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
+ linkBuffer.patch(m_backtrackRecords[i].m_dataLabel, linkBuffer.locationOf(m_backtrackRecords[i].m_backtrackLocation));
+ }
+
+ private:
+ struct ReturnAddressRecord {
+ ReturnAddressRecord(DataLabelPtr dataLabel, Label backtrackLocation)
+ : m_dataLabel(dataLabel)
+ , m_backtrackLocation(backtrackLocation)
+ {
+ }
+
+ DataLabelPtr m_dataLabel;
+ Label m_backtrackLocation;
+ };
+
+ JumpList m_laterFailures;
+ bool m_pendingFallthrough;
+ Vector<DataLabelPtr, 4> m_pendingReturns;
+ Vector<ReturnAddressRecord, 4> m_backtrackRecords;
+ };
+
+ // Generation methods:
+ // ===================
+
+ // This method provides a default implementation of backtracking common
+ // to many terms; terms commonly jump out of the forwards matching path
+ // on any failed conditions, and add these jumps to the m_jumps list. If
+ // no special handling is required we can often just backtrack to m_jumps.
+ void backtrackTermDefault(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ m_backtrackingState.append(op.m_jumps);
+ }
+
+ void generateAssertionBOL(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ if (m_pattern.m_multiline) {
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ if (!term->inputPosition)
+ matchDest.append(branch32(Equal, index, Imm32(m_checked)));
+
+ readCharacter((term->inputPosition - m_checked) - 1, character);
+ matchCharacterClass(character, matchDest, m_pattern.newlineCharacterClass());
+ op.m_jumps.append(jump());
+
+ matchDest.link(this);
+ } else {
+ // Erk, really should poison out these alternatives early. :-/
+ if (term->inputPosition)
+ op.m_jumps.append(jump());
+ else
+ op.m_jumps.append(branch32(NotEqual, index, Imm32(m_checked)));
+ }
+ }
+ void backtrackAssertionBOL(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateAssertionEOL(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ if (m_pattern.m_multiline) {
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ if (term->inputPosition == m_checked)
+ matchDest.append(atEndOfInput());
+
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, m_pattern.newlineCharacterClass());
+ op.m_jumps.append(jump());
+
+ matchDest.link(this);
+ } else {
+ if (term->inputPosition == m_checked)
+ op.m_jumps.append(notAtEndOfInput());
+ // Erk, really should poison out these alternatives early. :-/
+ else
+ op.m_jumps.append(jump());
+ }
+ }
+ void backtrackAssertionEOL(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ // Also falls though on nextIsNotWordChar.
+ void matchAssertionWordchar(size_t opIndex, JumpList& nextIsWordChar, JumpList& nextIsNotWordChar)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ if (term->inputPosition == m_checked)
+ nextIsNotWordChar.append(atEndOfInput());
+
+ readCharacter((term->inputPosition - m_checked), character);
+ matchCharacterClass(character, nextIsWordChar, m_pattern.wordcharCharacterClass());
+ }
+
+ void generateAssertionWordBoundary(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ Jump atBegin;
+ JumpList matchDest;
+ if (!term->inputPosition)
+ atBegin = branch32(Equal, index, Imm32(m_checked));
+ readCharacter((term->inputPosition - m_checked) - 1, character);
+ matchCharacterClass(character, matchDest, m_pattern.wordcharCharacterClass());
+ if (!term->inputPosition)
+ atBegin.link(this);
+
+ // We fall through to here if the last character was not a wordchar.
+ JumpList nonWordCharThenWordChar;
+ JumpList nonWordCharThenNonWordChar;
+ if (term->invert()) {
+ matchAssertionWordchar(opIndex, nonWordCharThenNonWordChar, nonWordCharThenWordChar);
+ nonWordCharThenWordChar.append(jump());
+ } else {
+ matchAssertionWordchar(opIndex, nonWordCharThenWordChar, nonWordCharThenNonWordChar);
+ nonWordCharThenNonWordChar.append(jump());
+ }
+ op.m_jumps.append(nonWordCharThenNonWordChar);
+
+ // We jump here if the last character was a wordchar.
+ matchDest.link(this);
+ JumpList wordCharThenWordChar;
+ JumpList wordCharThenNonWordChar;
+ if (term->invert()) {
+ matchAssertionWordchar(opIndex, wordCharThenNonWordChar, wordCharThenWordChar);
+ wordCharThenWordChar.append(jump());
+ } else {
+ matchAssertionWordchar(opIndex, wordCharThenWordChar, wordCharThenNonWordChar);
+ // This can fall-though!
+ }
+
+ op.m_jumps.append(wordCharThenWordChar);
+
+ nonWordCharThenWordChar.link(this);
+ wordCharThenNonWordChar.link(this);
+ }
+ void backtrackAssertionWordBoundary(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterOnce(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+
+ if (op.m_isDeadCode)
+ return;
+
+ // m_ops always ends with a OpBodyAlternativeEnd or OpMatchFailed
+ // node, so there must always be at least one more node.
+ ASSERT(opIndex + 1 < m_ops.size());
+ YarrOp* nextOp = &m_ops[opIndex + 1];
+
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ if ((ch > 0xff) && (m_charSize == Char8)) {
+ // Have a 16 bit pattern character and an 8 bit string - short circuit
+ op.m_jumps.append(jump());
+ return;
+ }
+
+ const RegisterID character = regT0;
+ int maxCharactersAtOnce = m_charSize == Char8 ? 4 : 2;
+ unsigned ignoreCaseMask = 0;
+#if CPU(BIG_ENDIAN)
+ int allCharacters = ch << (m_charSize == Char8 ? 24 : 16);
+#else
+ int allCharacters = ch;
+#endif
+ int numberCharacters;
+ int startTermPosition = term->inputPosition;
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch))
+#if CPU(BIG_ENDIAN)
+ ignoreCaseMask |= 32 << (m_charSize == Char8 ? 24 : 16);
+#else
+ ignoreCaseMask |= 32;
+#endif
+
+ for (numberCharacters = 1; numberCharacters < maxCharactersAtOnce && nextOp->m_op == OpTerm; ++numberCharacters, nextOp = &m_ops[opIndex + numberCharacters]) {
+ PatternTerm* nextTerm = nextOp->m_term;
+
+ if (nextTerm->type != PatternTerm::TypePatternCharacter
+ || nextTerm->quantityType != QuantifierFixedCount
+ || nextTerm->quantityCount != 1
+ || nextTerm->inputPosition != (startTermPosition + numberCharacters))
+ break;
+
+ nextOp->m_isDeadCode = true;
+
+#if CPU(BIG_ENDIAN)
+ int shiftAmount = (m_charSize == Char8 ? 24 : 16) - ((m_charSize == Char8 ? 8 : 16) * numberCharacters);
+#else
+ int shiftAmount = (m_charSize == Char8 ? 8 : 16) * numberCharacters;
+#endif
+
+ UChar currentCharacter = nextTerm->patternCharacter;
+
+ if ((currentCharacter > 0xff) && (m_charSize == Char8)) {
+ // Have a 16 bit pattern character and an 8 bit string - short circuit
+ op.m_jumps.append(jump());
+ return;
+ }
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(currentCharacter) || isCanonicallyUnique(currentCharacter));
+
+ allCharacters |= (currentCharacter << shiftAmount);
+
+ if ((m_pattern.m_ignoreCase) && (isASCIIAlpha(currentCharacter)))
+ ignoreCaseMask |= 32 << shiftAmount;
+ }
+
+ if (m_charSize == Char8) {
+ switch (numberCharacters) {
+ case 1:
+ op.m_jumps.append(jumpIfCharNotEquals(ch, startTermPosition - m_checked, character));
+ return;
+ case 2: {
+ BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load16Unaligned(address, character);
+ break;
+ }
+ case 3: {
+ BaseIndex highAddress(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load16Unaligned(highAddress, character);
+ if (ignoreCaseMask)
+ or32(Imm32(ignoreCaseMask), character);
+ op.m_jumps.append(branch32(NotEqual, character, Imm32((allCharacters & 0xffff) | ignoreCaseMask)));
+ op.m_jumps.append(jumpIfCharNotEquals(allCharacters >> 16, startTermPosition + 2 - m_checked, character));
+ return;
+ }
+ case 4: {
+ BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load32WithUnalignedHalfWords(address, character);
+ break;
+ }
+ }
+ } else {
+ switch (numberCharacters) {
+ case 1:
+ op.m_jumps.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+ return;
+ case 2:
+ BaseIndex address(input, index, TimesTwo, (term->inputPosition - m_checked) * sizeof(UChar));
+ load32WithUnalignedHalfWords(address, character);
+ break;
+ }
+ }
+
+ if (ignoreCaseMask)
+ or32(Imm32(ignoreCaseMask), character);
+ op.m_jumps.append(branch32(NotEqual, character, Imm32(allCharacters | ignoreCaseMask)));
+ return;
+ }
+ void backtrackPatternCharacterOnce(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterFixed(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(index, countRegister);
+ sub32(Imm32(term->quantityCount.unsafeGet()), countRegister);
+
+ Label loop(this);
+ BaseIndex address(input, countRegister, m_charScale, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(m_charSize == Char8 ? sizeof(char) : sizeof(UChar))).unsafeGet());
+
+ if (m_charSize == Char8)
+ load8(address, character);
+ else
+ load16(address, character);
+
+ // For case-insesitive compares, non-ascii characters that have different
+ // upper & lower case representations are converted to a character class.
+ ASSERT(!m_pattern.m_ignoreCase || isASCIIAlpha(ch) || isCanonicallyUnique(ch));
+ if (m_pattern.m_ignoreCase && isASCIIAlpha(ch)) {
+ or32(TrustedImm32(0x20), character);
+ ch |= 0x20;
+ }
+
+ op.m_jumps.append(branch32(NotEqual, character, Imm32(ch)));
+ add32(TrustedImm32(1), countRegister);
+ branch32(NotEqual, countRegister, index).linkTo(loop, this);
+ }
+ void backtrackPatternCharacterFixed(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generatePatternCharacterGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
+ JumpList failures;
+ Label loop(this);
+ failures.append(atEndOfInput());
+ failures.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+ if (term->quantityCount == quantifyInfinite)
+ jump(loop);
+ else
+ branch32(NotEqual, countRegister, Imm32(term->quantityCount.unsafeGet())).linkTo(loop, this);
+
+ failures.link(this);
+ }
+ op.m_reentry = label();
+
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackPatternCharacterGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+ m_backtrackingState.append(branchTest32(Zero, countRegister));
+ sub32(TrustedImm32(1), countRegister);
+ sub32(TrustedImm32(1), index);
+ jump(op.m_reentry);
+ }
+
+ void generatePatternCharacterNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+ op.m_reentry = label();
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackPatternCharacterNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+ UChar ch = term->patternCharacter;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
+ JumpList nonGreedyFailures;
+ nonGreedyFailures.append(atEndOfInput());
+ if (term->quantityCount != quantifyInfinite)
+ nonGreedyFailures.append(branch32(Equal, countRegister, Imm32(term->quantityCount.unsafeGet())));
+ nonGreedyFailures.append(jumpIfCharNotEquals(ch, term->inputPosition - m_checked, character));
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+
+ jump(op.m_reentry);
+ nonGreedyFailures.link(this);
+ }
+
+ sub32(countRegister, index);
+ m_backtrackingState.fallthrough();
+ }
+
+ void generateCharacterClassOnce(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ op.m_jumps.append(matchDest);
+ else {
+ op.m_jumps.append(jump());
+ matchDest.link(this);
+ }
+ }
+ void backtrackCharacterClassOnce(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateCharacterClassFixed(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(index, countRegister);
+ sub32(Imm32(term->quantityCount.unsafeGet()), countRegister);
+
+ Label loop(this);
+ JumpList matchDest;
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, countRegister, TimesOne, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(sizeof(char))).unsafeGet()), character);
+ else
+ load16(BaseIndex(input, countRegister, TimesTwo, (Checked<int>(term->inputPosition - m_checked + Checked<int64_t>(term->quantityCount)) * static_cast<int>(sizeof(UChar))).unsafeGet()), character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ op.m_jumps.append(matchDest);
+ else {
+ op.m_jumps.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ branch32(NotEqual, countRegister, index).linkTo(loop, this);
+ }
+ void backtrackCharacterClassFixed(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ void generateCharacterClassGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+
+ JumpList failures;
+ Label loop(this);
+ failures.append(atEndOfInput());
+
+ if (term->invert()) {
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, failures, term->characterClass);
+ } else {
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+ failures.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+ if (term->quantityCount != quantifyInfinite) {
+ branch32(NotEqual, countRegister, Imm32(term->quantityCount.unsafeGet())).linkTo(loop, this);
+ failures.append(jump());
+ } else
+ jump(loop);
+
+ failures.link(this);
+ op.m_reentry = label();
+
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackCharacterClassGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+ m_backtrackingState.append(branchTest32(Zero, countRegister));
+ sub32(TrustedImm32(1), countRegister);
+ sub32(TrustedImm32(1), index);
+ jump(op.m_reentry);
+ }
+
+ void generateCharacterClassNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID countRegister = regT1;
+
+ move(TrustedImm32(0), countRegister);
+ op.m_reentry = label();
+ storeToFrame(countRegister, term->frameLocation);
+ }
+ void backtrackCharacterClassNonGreedy(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID countRegister = regT1;
+
+ JumpList nonGreedyFailures;
+
+ m_backtrackingState.link(this);
+
+ loadFromFrame(term->frameLocation, countRegister);
+
+ nonGreedyFailures.append(atEndOfInput());
+ nonGreedyFailures.append(branch32(Equal, countRegister, Imm32(term->quantityCount.unsafeGet())));
+
+ JumpList matchDest;
+ readCharacter(term->inputPosition - m_checked, character);
+ matchCharacterClass(character, matchDest, term->characterClass);
+
+ if (term->invert())
+ nonGreedyFailures.append(matchDest);
+ else {
+ nonGreedyFailures.append(jump());
+ matchDest.link(this);
+ }
+
+ add32(TrustedImm32(1), countRegister);
+ add32(TrustedImm32(1), index);
+
+ jump(op.m_reentry);
+
+ nonGreedyFailures.link(this);
+ sub32(countRegister, index);
+ m_backtrackingState.fallthrough();
+ }
+
+ void generateDotStarEnclosure(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ const RegisterID character = regT0;
+ const RegisterID matchPos = regT1;
+
+ JumpList foundBeginningNewLine;
+ JumpList saveStartIndex;
+ JumpList foundEndingNewLine;
+
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ getMatchStart(matchPos);
+
+ saveStartIndex.append(branchTest32(Zero, matchPos));
+ Label findBOLLoop(this);
+ sub32(TrustedImm32(1), matchPos);
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, matchPos, TimesOne, 0), character);
+ else
+ load16(BaseIndex(input, matchPos, TimesTwo, 0), character);
+ matchCharacterClass(character, foundBeginningNewLine, m_pattern.newlineCharacterClass());
+ branchTest32(NonZero, matchPos).linkTo(findBOLLoop, this);
+ saveStartIndex.append(jump());
+
+ foundBeginningNewLine.link(this);
+ add32(TrustedImm32(1), matchPos); // Advance past newline
+ saveStartIndex.link(this);
+
+ if (!m_pattern.m_multiline && term->anchors.bolAnchor)
+ op.m_jumps.append(branchTest32(NonZero, matchPos));
+
+ ASSERT(!m_pattern.m_body->m_hasFixedSize);
+ setMatchStart(matchPos);
+
+ move(index, matchPos);
+
+ Label findEOLLoop(this);
+ foundEndingNewLine.append(branch32(Equal, matchPos, length));
+ if (m_charSize == Char8)
+ load8(BaseIndex(input, matchPos, TimesOne, 0), character);
+ else
+ load16(BaseIndex(input, matchPos, TimesTwo, 0), character);
+ matchCharacterClass(character, foundEndingNewLine, m_pattern.newlineCharacterClass());
+ add32(TrustedImm32(1), matchPos);
+ jump(findEOLLoop);
+
+ foundEndingNewLine.link(this);
+
+ if (!m_pattern.m_multiline && term->anchors.eolAnchor)
+ op.m_jumps.append(branch32(NotEqual, matchPos, length));
+
+ move(matchPos, index);
+ }
+
+ void backtrackDotStarEnclosure(size_t opIndex)
+ {
+ backtrackTermDefault(opIndex);
+ }
+
+ // Code generation/backtracking for simple terms
+ // (pattern characters, character classes, and assertions).
+ // These methods farm out work to the set of functions above.
+ void generateTerm(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ switch (term->type) {
+ case PatternTerm::TypePatternCharacter:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ generatePatternCharacterOnce(opIndex);
+ else
+ generatePatternCharacterFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ generatePatternCharacterGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ generatePatternCharacterNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ generateCharacterClassOnce(opIndex);
+ else
+ generateCharacterClassFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ generateCharacterClassGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ generateCharacterClassNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeAssertionBOL:
+ generateAssertionBOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ generateAssertionEOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ generateAssertionWordBoundary(opIndex);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ case PatternTerm::TypeParentheticalAssertion:
+ RELEASE_ASSERT_NOT_REACHED();
+ case PatternTerm::TypeBackReference:
+ m_shouldFallBack = true;
+ break;
+ case PatternTerm::TypeDotStarEnclosure:
+ generateDotStarEnclosure(opIndex);
+ break;
+ }
+ }
+ void backtrackTerm(size_t opIndex)
+ {
+ YarrOp& op = m_ops[opIndex];
+ PatternTerm* term = op.m_term;
+
+ switch (term->type) {
+ case PatternTerm::TypePatternCharacter:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ backtrackPatternCharacterOnce(opIndex);
+ else
+ backtrackPatternCharacterFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ backtrackPatternCharacterGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ backtrackPatternCharacterNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ switch (term->quantityType) {
+ case QuantifierFixedCount:
+ if (term->quantityCount == 1)
+ backtrackCharacterClassOnce(opIndex);
+ else
+ backtrackCharacterClassFixed(opIndex);
+ break;
+ case QuantifierGreedy:
+ backtrackCharacterClassGreedy(opIndex);
+ break;
+ case QuantifierNonGreedy:
+ backtrackCharacterClassNonGreedy(opIndex);
+ break;
+ }
+ break;
+
+ case PatternTerm::TypeAssertionBOL:
+ backtrackAssertionBOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionEOL:
+ backtrackAssertionEOL(opIndex);
+ break;
+
+ case PatternTerm::TypeAssertionWordBoundary:
+ backtrackAssertionWordBoundary(opIndex);
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ case PatternTerm::TypeParentheticalAssertion:
+ RELEASE_ASSERT_NOT_REACHED();
+
+ case PatternTerm::TypeDotStarEnclosure:
+ backtrackDotStarEnclosure(opIndex);
+ break;
+
+ case PatternTerm::TypeBackReference:
+ m_shouldFallBack = true;
+ break;
+ }
+ }
+
+ void generate()
+ {
+ // Forwards generate the matching code.
+ ASSERT(m_ops.size());
+ size_t opIndex = 0;
+
+ do {
+ YarrOp& op = m_ops[opIndex];
+ switch (op.m_op) {
+
+ case OpTerm:
+ generateTerm(opIndex);
+ break;
+
+ // OpBodyAlternativeBegin/Next/End
+ //
+ // These nodes wrap the set of alternatives in the body of the regular expression.
+ // There may be either one or two chains of OpBodyAlternative nodes, one representing
+ // the 'once through' sequence of alternatives (if any exist), and one representing
+ // the repeating alternatives (again, if any exist).
+ //
+ // Upon normal entry to the Begin alternative, we will check that input is available.
+ // Reentry to the Begin alternative will take place after the check has taken place,
+ // and will assume that the input position has already been progressed as appropriate.
+ //
+ // Entry to subsequent Next/End alternatives occurs when the prior alternative has
+ // successfully completed a match - return a success state from JIT code.
+ //
+ // Next alternatives allow for reentry optimized to suit backtracking from its
+ // preceding alternative. It expects the input position to still be set to a position
+ // appropriate to its predecessor, and it will only perform an input check if the
+ // predecessor had a minimum size less than its own.
+ //
+ // In the case 'once through' expressions, the End node will also have a reentry
+ // point to jump to when the last alternative fails. Again, this expects the input
+ // position to still reflect that expected by the prior alternative.
+ case OpBodyAlternativeBegin: {
+ PatternAlternative* alternative = op.m_alternative;
+
+ // Upon entry at the head of the set of alternatives, check if input is available
+ // to run the first alternative. (This progresses the input position).
+ op.m_jumps.append(jumpIfNoAvailableInput(alternative->m_minimumSize));
+ // We will reenter after the check, and assume the input position to have been
+ // set as appropriate to this alternative.
+ op.m_reentry = label();
+
+ m_checked += alternative->m_minimumSize;
+ break;
+ }
+ case OpBodyAlternativeNext:
+ case OpBodyAlternativeEnd: {
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ PatternAlternative* alternative = op.m_alternative;
+
+ // If we get here, the prior alternative matched - return success.
+
+ // Adjust the stack pointer to remove the pattern's frame.
+ removeCallFrame();
+
+ // Load appropriate values into the return register and the first output
+ // slot, and return. In the case of pattern with a fixed size, we will
+ // not have yet set the value in the first
+ ASSERT(index != returnRegister);
+ if (m_pattern.m_body->m_hasFixedSize) {
+ move(index, returnRegister);
+ if (priorAlternative->m_minimumSize)
+ sub32(Imm32(priorAlternative->m_minimumSize), returnRegister);
+ if (compileMode == IncludeSubpatterns)
+ store32(returnRegister, output);
+ } else
+ getMatchStart(returnRegister);
+ if (compileMode == IncludeSubpatterns)
+ store32(index, Address(output, 4));
+ move(index, returnRegister2);
+
+ generateReturn();
+
+ // This is the divide between the tail of the prior alternative, above, and
+ // the head of the subsequent alternative, below.
+
+ if (op.m_op == OpBodyAlternativeNext) {
+ // This is the reentry point for the Next alternative. We expect any code
+ // that jumps here to do so with the input position matching that of the
+ // PRIOR alteranative, and we will only check input availability if we
+ // need to progress it forwards.
+ op.m_reentry = label();
+ if (alternative->m_minimumSize > priorAlternative->m_minimumSize) {
+ add32(Imm32(alternative->m_minimumSize - priorAlternative->m_minimumSize), index);
+ op.m_jumps.append(jumpIfNoAvailableInput());
+ } else if (priorAlternative->m_minimumSize > alternative->m_minimumSize)
+ sub32(Imm32(priorAlternative->m_minimumSize - alternative->m_minimumSize), index);
+ } else if (op.m_nextOp == notFound) {
+ // This is the reentry point for the End of 'once through' alternatives,
+ // jumped to when the last alternative fails to match.
+ op.m_reentry = label();
+ sub32(Imm32(priorAlternative->m_minimumSize), index);
+ }
+
+ if (op.m_op == OpBodyAlternativeNext)
+ m_checked += alternative->m_minimumSize;
+ m_checked -= priorAlternative->m_minimumSize;
+ break;
+ }
+
+ // OpSimpleNestedAlternativeBegin/Next/End
+ // OpNestedAlternativeBegin/Next/End
+ //
+ // These nodes are used to handle sets of alternatives that are nested within
+ // subpatterns and parenthetical assertions. The 'simple' forms are used where
+ // we do not need to be able to backtrack back into any alternative other than
+ // the last, the normal forms allow backtracking into any alternative.
+ //
+ // Each Begin/Next node is responsible for planting an input check to ensure
+ // sufficient input is available on entry. Next nodes additionally need to
+ // jump to the end - Next nodes use the End node's m_jumps list to hold this
+ // set of jumps.
+ //
+ // In the non-simple forms, successful alternative matches must store a
+ // 'return address' using a DataLabelPtr, used to store the address to jump
+ // to when backtracking, to get to the code for the appropriate alternative.
+ case OpSimpleNestedAlternativeBegin:
+ case OpNestedAlternativeBegin: {
+ PatternTerm* term = op.m_term;
+ PatternAlternative* alternative = op.m_alternative;
+ PatternDisjunction* disjunction = term->parentheses.disjunction;
+
+ // Calculate how much input we need to check for, and if non-zero check.
+ op.m_checkAdjust = alternative->m_minimumSize;
+ if ((term->quantityType == QuantifierFixedCount) && (term->type != PatternTerm::TypeParentheticalAssertion))
+ op.m_checkAdjust -= disjunction->m_minimumSize;
+ if (op.m_checkAdjust)
+ op.m_jumps.append(jumpIfNoAvailableInput(op.m_checkAdjust));
+
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeNext:
+ case OpNestedAlternativeNext: {
+ PatternTerm* term = op.m_term;
+ PatternAlternative* alternative = op.m_alternative;
+ PatternDisjunction* disjunction = term->parentheses.disjunction;
+
+ // In the non-simple case, store a 'return address' so we can backtrack correctly.
+ if (op.m_op == OpNestedAlternativeNext) {
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
+ }
+
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
+ // If we reach here then the last alternative has matched - jump to the
+ // End node, to skip over any further alternatives.
+ //
+ // FIXME: this is logically O(N^2) (though N can be expected to be very
+ // small). We could avoid this either by adding an extra jump to the JIT
+ // data structures, or by making backtracking code that jumps to Next
+ // alternatives are responsible for checking that input is available (if
+ // we didn't need to plant the input checks, then m_jumps would be free).
+ YarrOp* endOp = &m_ops[op.m_nextOp];
+ while (endOp->m_nextOp != notFound) {
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeNext || endOp->m_op == OpNestedAlternativeNext);
+ endOp = &m_ops[endOp->m_nextOp];
+ }
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeEnd || endOp->m_op == OpNestedAlternativeEnd);
+ endOp->m_jumps.append(jump());
+
+ // This is the entry point for the next alternative.
+ op.m_reentry = label();
+
+ // Calculate how much input we need to check for, and if non-zero check.
+ op.m_checkAdjust = alternative->m_minimumSize;
+ if ((term->quantityType == QuantifierFixedCount) && (term->type != PatternTerm::TypeParentheticalAssertion))
+ op.m_checkAdjust -= disjunction->m_minimumSize;
+ if (op.m_checkAdjust)
+ op.m_jumps.append(jumpIfNoAvailableInput(op.m_checkAdjust));
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeEnd:
+ case OpNestedAlternativeEnd: {
+ PatternTerm* term = op.m_term;
+
+ // In the non-simple case, store a 'return address' so we can backtrack correctly.
+ if (op.m_op == OpNestedAlternativeEnd) {
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
+ }
+
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
+ // If this set of alternatives contains more than one alternative,
+ // then the Next nodes will have planted jumps to the End, and added
+ // them to this node's m_jumps list.
+ op.m_jumps.link(this);
+ op.m_jumps.clear();
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ break;
+ }
+
+ // OpParenthesesSubpatternOnceBegin/End
+ //
+ // These nodes support (optionally) capturing subpatterns, that have a
+ // quantity count of 1 (this covers fixed once, and ?/?? quantifiers).
+ case OpParenthesesSubpatternOnceBegin: {
+ PatternTerm* term = op.m_term;
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ const RegisterID indexTemporary = regT0;
+ ASSERT(term->quantityCount == 1);
+
+ // Upon entry to a Greedy quantified set of parenthese store the index.
+ // We'll use this for two purposes:
+ // - To indicate which iteration we are on of mathing the remainder of
+ // the expression after the parentheses - the first, including the
+ // match within the parentheses, or the second having skipped over them.
+ // - To check for empty matches, which must be rejected.
+ //
+ // At the head of a NonGreedy set of parentheses we'll immediately set the
+ // value on the stack to -1 (indicating a match skipping the subpattern),
+ // and plant a jump to the end. We'll also plant a label to backtrack to
+ // to reenter the subpattern later, with a store to set up index on the
+ // second iteration.
+ //
+ // FIXME: for capturing parens, could use the index in the capture array?
+ if (term->quantityType == QuantifierGreedy)
+ storeToFrame(index, parenthesesFrameLocation);
+ else if (term->quantityType == QuantifierNonGreedy) {
+ storeToFrame(TrustedImm32(-1), parenthesesFrameLocation);
+ op.m_jumps.append(jump());
+ op.m_reentry = label();
+ storeToFrame(index, parenthesesFrameLocation);
+ }
+
+ // If the parenthese are capturing, store the starting index value to the
+ // captures array, offsetting as necessary.
+ //
+ // FIXME: could avoid offsetting this value in JIT code, apply
+ // offsets only afterwards, at the point the results array is
+ // being accessed.
+ if (term->capture() && compileMode == IncludeSubpatterns) {
+ int inputOffset = term->inputPosition - m_checked;
+ if (term->quantityType == QuantifierFixedCount)
+ inputOffset -= term->parentheses.disjunction->m_minimumSize;
+ if (inputOffset) {
+ move(index, indexTemporary);
+ add32(Imm32(inputOffset), indexTemporary);
+ setSubpatternStart(indexTemporary, term->parentheses.subpatternId);
+ } else
+ setSubpatternStart(index, term->parentheses.subpatternId);
+ }
+ break;
+ }
+ case OpParenthesesSubpatternOnceEnd: {
+ PatternTerm* term = op.m_term;
+ const RegisterID indexTemporary = regT0;
+ ASSERT(term->quantityCount == 1);
+
+#ifndef NDEBUG
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ if (term->quantityType != QuantifierFixedCount && !term->parentheses.disjunction->m_minimumSize) {
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+ }
+#endif
+
+ // If the parenthese are capturing, store the ending index value to the
+ // captures array, offsetting as necessary.
+ //
+ // FIXME: could avoid offsetting this value in JIT code, apply
+ // offsets only afterwards, at the point the results array is
+ // being accessed.
+ if (term->capture() && compileMode == IncludeSubpatterns) {
+ int inputOffset = term->inputPosition - m_checked;
+ if (inputOffset) {
+ move(index, indexTemporary);
+ add32(Imm32(inputOffset), indexTemporary);
+ setSubpatternEnd(indexTemporary, term->parentheses.subpatternId);
+ } else
+ setSubpatternEnd(index, term->parentheses.subpatternId);
+ }
+
+ // If the parentheses are quantified Greedy then add a label to jump back
+ // to if get a failed match from after the parentheses. For NonGreedy
+ // parentheses, link the jump from before the subpattern to here.
+ if (term->quantityType == QuantifierGreedy)
+ op.m_reentry = label();
+ else if (term->quantityType == QuantifierNonGreedy) {
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ beginOp.m_jumps.link(this);
+ }
+ break;
+ }
+
+ // OpParenthesesSubpatternTerminalBegin/End
+ case OpParenthesesSubpatternTerminalBegin: {
+ PatternTerm* term = op.m_term;
+ ASSERT(term->quantityType == QuantifierGreedy);
+ ASSERT(term->quantityCount == quantifyInfinite);
+ ASSERT(!term->capture());
+
+ // Upon entry set a label to loop back to.
+ op.m_reentry = label();
+
+ // Store the start index of the current match; we need to reject zero
+ // length matches.
+ storeToFrame(index, term->frameLocation);
+ break;
+ }
+ case OpParenthesesSubpatternTerminalEnd: {
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+#ifndef NDEBUG
+ PatternTerm* term = op.m_term;
+
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+#endif
+
+ // We know that the match is non-zero, we can accept it and
+ // loop back up to the head of the subpattern.
+ jump(beginOp.m_reentry);
+
+ // This is the entry point to jump to when we stop matching - we will
+ // do so once the subpattern cannot match any more.
+ op.m_reentry = label();
+ break;
+ }
+
+ // OpParentheticalAssertionBegin/End
+ case OpParentheticalAssertionBegin: {
+ PatternTerm* term = op.m_term;
+
+ // Store the current index - assertions should not update index, so
+ // we will need to restore it upon a successful match.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ storeToFrame(index, parenthesesFrameLocation);
+
+ // Check
+ op.m_checkAdjust = m_checked - term->inputPosition;
+ if (op.m_checkAdjust)
+ sub32(Imm32(op.m_checkAdjust), index);
+
+ m_checked -= op.m_checkAdjust;
+ break;
+ }
+ case OpParentheticalAssertionEnd: {
+ PatternTerm* term = op.m_term;
+
+ // Restore the input index value.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ loadFromFrame(parenthesesFrameLocation, index);
+
+ // If inverted, a successful match of the assertion must be treated
+ // as a failure, so jump to backtracking.
+ if (term->invert()) {
+ op.m_jumps.append(jump());
+ op.m_reentry = label();
+ }
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ break;
+ }
+
+ case OpMatchFailed:
+ removeCallFrame();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ break;
+ }
+
+ ++opIndex;
+ } while (opIndex < m_ops.size());
+ }
+
+ void backtrack()
+ {
+ // Backwards generate the backtracking code.
+ size_t opIndex = m_ops.size();
+ ASSERT(opIndex);
+
+ do {
+ --opIndex;
+ YarrOp& op = m_ops[opIndex];
+ switch (op.m_op) {
+
+ case OpTerm:
+ backtrackTerm(opIndex);
+ break;
+
+ // OpBodyAlternativeBegin/Next/End
+ //
+ // For each Begin/Next node representing an alternative, we need to decide what to do
+ // in two circumstances:
+ // - If we backtrack back into this node, from within the alternative.
+ // - If the input check at the head of the alternative fails (if this exists).
+ //
+ // We treat these two cases differently since in the former case we have slightly
+ // more information - since we are backtracking out of a prior alternative we know
+ // that at least enough input was available to run it. For example, given the regular
+ // expression /a|b/, if we backtrack out of the first alternative (a failed pattern
+ // character match of 'a'), then we need not perform an additional input availability
+ // check before running the second alternative.
+ //
+ // Backtracking required differs for the last alternative, which in the case of the
+ // repeating set of alternatives must loop. The code generated for the last alternative
+ // will also be used to handle all input check failures from any prior alternatives -
+ // these require similar functionality, in seeking the next available alternative for
+ // which there is sufficient input.
+ //
+ // Since backtracking of all other alternatives simply requires us to link backtracks
+ // to the reentry point for the subsequent alternative, we will only be generating any
+ // code when backtracking the last alternative.
+ case OpBodyAlternativeBegin:
+ case OpBodyAlternativeNext: {
+ PatternAlternative* alternative = op.m_alternative;
+
+ if (op.m_op == OpBodyAlternativeNext) {
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ m_checked += priorAlternative->m_minimumSize;
+ }
+ m_checked -= alternative->m_minimumSize;
+
+ // Is this the last alternative? If not, then if we backtrack to this point we just
+ // need to jump to try to match the next alternative.
+ if (m_ops[op.m_nextOp].m_op != OpBodyAlternativeEnd) {
+ m_backtrackingState.linkTo(m_ops[op.m_nextOp].m_reentry, this);
+ break;
+ }
+ YarrOp& endOp = m_ops[op.m_nextOp];
+
+ YarrOp* beginOp = &op;
+ while (beginOp->m_op != OpBodyAlternativeBegin) {
+ ASSERT(beginOp->m_op == OpBodyAlternativeNext);
+ beginOp = &m_ops[beginOp->m_previousOp];
+ }
+
+ bool onceThrough = endOp.m_nextOp == notFound;
+
+ // First, generate code to handle cases where we backtrack out of an attempted match
+ // of the last alternative. If this is a 'once through' set of alternatives then we
+ // have nothing to do - link this straight through to the End.
+ if (onceThrough)
+ m_backtrackingState.linkTo(endOp.m_reentry, this);
+ else {
+ // If we don't need to move the input poistion, and the pattern has a fixed size
+ // (in which case we omit the store of the start index until the pattern has matched)
+ // then we can just link the backtrack out of the last alternative straight to the
+ // head of the first alternative.
+ if (m_pattern.m_body->m_hasFixedSize
+ && (alternative->m_minimumSize > beginOp->m_alternative->m_minimumSize)
+ && (alternative->m_minimumSize - beginOp->m_alternative->m_minimumSize == 1))
+ m_backtrackingState.linkTo(beginOp->m_reentry, this);
+ else {
+ // We need to generate a trampoline of code to execute before looping back
+ // around to the first alternative.
+ m_backtrackingState.link(this);
+
+ // If the pattern size is not fixed, then store the start index, for use if we match.
+ if (!m_pattern.m_body->m_hasFixedSize) {
+ if (alternative->m_minimumSize == 1)
+ setMatchStart(index);
+ else {
+ move(index, regT0);
+ if (alternative->m_minimumSize)
+ sub32(Imm32(alternative->m_minimumSize - 1), regT0);
+ else
+ add32(TrustedImm32(1), regT0);
+ setMatchStart(regT0);
+ }
+ }
+
+ // Generate code to loop. Check whether the last alternative is longer than the
+ // first (e.g. /a|xy/ or /a|xyz/).
+ if (alternative->m_minimumSize > beginOp->m_alternative->m_minimumSize) {
+ // We want to loop, and increment input position. If the delta is 1, it is
+ // already correctly incremented, if more than one then decrement as appropriate.
+ unsigned delta = alternative->m_minimumSize - beginOp->m_alternative->m_minimumSize;
+ ASSERT(delta);
+ if (delta != 1)
+ sub32(Imm32(delta - 1), index);
+ jump(beginOp->m_reentry);
+ } else {
+ // If the first alternative has minimum size 0xFFFFFFFFu, then there cannot
+ // be sufficent input available to handle this, so just fall through.
+ unsigned delta = beginOp->m_alternative->m_minimumSize - alternative->m_minimumSize;
+ if (delta != 0xFFFFFFFFu) {
+ // We need to check input because we are incrementing the input.
+ add32(Imm32(delta + 1), index);
+ checkInput().linkTo(beginOp->m_reentry, this);
+ }
+ }
+ }
+ }
+
+ // We can reach this point in the code in two ways:
+ // - Fallthrough from the code above (a repeating alternative backtracked out of its
+ // last alternative, and did not have sufficent input to run the first).
+ // - We will loop back up to the following label when a releating alternative loops,
+ // following a failed input check.
+ //
+ // Either way, we have just failed the input check for the first alternative.
+ Label firstInputCheckFailed(this);
+
+ // Generate code to handle input check failures from alternatives except the last.
+ // prevOp is the alternative we're handling a bail out from (initially Begin), and
+ // nextOp is the alternative we will be attempting to reenter into.
+ //
+ // We will link input check failures from the forwards matching path back to the code
+ // that can handle them.
+ YarrOp* prevOp = beginOp;
+ YarrOp* nextOp = &m_ops[beginOp->m_nextOp];
+ while (nextOp->m_op != OpBodyAlternativeEnd) {
+ prevOp->m_jumps.link(this);
+
+ // We only get here if an input check fails, it is only worth checking again
+ // if the next alternative has a minimum size less than the last.
+ if (prevOp->m_alternative->m_minimumSize > nextOp->m_alternative->m_minimumSize) {
+ // FIXME: if we added an extra label to YarrOp, we could avoid needing to
+ // subtract delta back out, and reduce this code. Should performance test
+ // the benefit of this.
+ unsigned delta = prevOp->m_alternative->m_minimumSize - nextOp->m_alternative->m_minimumSize;
+ sub32(Imm32(delta), index);
+ Jump fail = jumpIfNoAvailableInput();
+ add32(Imm32(delta), index);
+ jump(nextOp->m_reentry);
+ fail.link(this);
+ } else if (prevOp->m_alternative->m_minimumSize < nextOp->m_alternative->m_minimumSize)
+ add32(Imm32(nextOp->m_alternative->m_minimumSize - prevOp->m_alternative->m_minimumSize), index);
+ prevOp = nextOp;
+ nextOp = &m_ops[nextOp->m_nextOp];
+ }
+
+ // We fall through to here if there is insufficient input to run the last alternative.
+
+ // If there is insufficient input to run the last alternative, then for 'once through'
+ // alternatives we are done - just jump back up into the forwards matching path at the End.
+ if (onceThrough) {
+ op.m_jumps.linkTo(endOp.m_reentry, this);
+ jump(endOp.m_reentry);
+ break;
+ }
+
+ // For repeating alternatives, link any input check failure from the last alternative to
+ // this point.
+ op.m_jumps.link(this);
+
+ bool needsToUpdateMatchStart = !m_pattern.m_body->m_hasFixedSize;
+
+ // Check for cases where input position is already incremented by 1 for the last
+ // alternative (this is particularly useful where the minimum size of the body
+ // disjunction is 0, e.g. /a*|b/).
+ if (needsToUpdateMatchStart && alternative->m_minimumSize == 1) {
+ // index is already incremented by 1, so just store it now!
+ setMatchStart(index);
+ needsToUpdateMatchStart = false;
+ }
+
+ // Check whether there is sufficient input to loop. Increment the input position by
+ // one, and check. Also add in the minimum disjunction size before checking - there
+ // is no point in looping if we're just going to fail all the input checks around
+ // the next iteration.
+ ASSERT(alternative->m_minimumSize >= m_pattern.m_body->m_minimumSize);
+ if (alternative->m_minimumSize == m_pattern.m_body->m_minimumSize) {
+ // If the last alternative had the same minimum size as the disjunction,
+ // just simply increment input pos by 1, no adjustment based on minimum size.
+ add32(TrustedImm32(1), index);
+ } else {
+ // If the minumum for the last alternative was one greater than than that
+ // for the disjunction, we're already progressed by 1, nothing to do!
+ unsigned delta = (alternative->m_minimumSize - m_pattern.m_body->m_minimumSize) - 1;
+ if (delta)
+ sub32(Imm32(delta), index);
+ }
+ Jump matchFailed = jumpIfNoAvailableInput();
+
+ if (needsToUpdateMatchStart) {
+ if (!m_pattern.m_body->m_minimumSize)
+ setMatchStart(index);
+ else {
+ move(index, regT0);
+ sub32(Imm32(m_pattern.m_body->m_minimumSize), regT0);
+ setMatchStart(regT0);
+ }
+ }
+
+ // Calculate how much more input the first alternative requires than the minimum
+ // for the body as a whole. If no more is needed then we dont need an additional
+ // input check here - jump straight back up to the start of the first alternative.
+ if (beginOp->m_alternative->m_minimumSize == m_pattern.m_body->m_minimumSize)
+ jump(beginOp->m_reentry);
+ else {
+ if (beginOp->m_alternative->m_minimumSize > m_pattern.m_body->m_minimumSize)
+ add32(Imm32(beginOp->m_alternative->m_minimumSize - m_pattern.m_body->m_minimumSize), index);
+ else
+ sub32(Imm32(m_pattern.m_body->m_minimumSize - beginOp->m_alternative->m_minimumSize), index);
+ checkInput().linkTo(beginOp->m_reentry, this);
+ jump(firstInputCheckFailed);
+ }
+
+ // We jump to here if we iterate to the point that there is insufficient input to
+ // run any matches, and need to return a failure state from JIT code.
+ matchFailed.link(this);
+
+ removeCallFrame();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ break;
+ }
+ case OpBodyAlternativeEnd: {
+ // We should never backtrack back into a body disjunction.
+ ASSERT(m_backtrackingState.isEmpty());
+
+ PatternAlternative* priorAlternative = m_ops[op.m_previousOp].m_alternative;
+ m_checked += priorAlternative->m_minimumSize;
+ break;
+ }
+
+ // OpSimpleNestedAlternativeBegin/Next/End
+ // OpNestedAlternativeBegin/Next/End
+ //
+ // Generate code for when we backtrack back out of an alternative into
+ // a Begin or Next node, or when the entry input count check fails. If
+ // there are more alternatives we need to jump to the next alternative,
+ // if not we backtrack back out of the current set of parentheses.
+ //
+ // In the case of non-simple nested assertions we need to also link the
+ // 'return address' appropriately to backtrack back out into the correct
+ // alternative.
+ case OpSimpleNestedAlternativeBegin:
+ case OpSimpleNestedAlternativeNext:
+ case OpNestedAlternativeBegin:
+ case OpNestedAlternativeNext: {
+ YarrOp& nextOp = m_ops[op.m_nextOp];
+ bool isBegin = op.m_previousOp == notFound;
+ bool isLastAlternative = nextOp.m_nextOp == notFound;
+ ASSERT(isBegin == (op.m_op == OpSimpleNestedAlternativeBegin || op.m_op == OpNestedAlternativeBegin));
+ ASSERT(isLastAlternative == (nextOp.m_op == OpSimpleNestedAlternativeEnd || nextOp.m_op == OpNestedAlternativeEnd));
+
+ // Treat an input check failure the same as a failed match.
+ m_backtrackingState.append(op.m_jumps);
+
+ // Set the backtracks to jump to the appropriate place. We may need
+ // to link the backtracks in one of three different way depending on
+ // the type of alternative we are dealing with:
+ // - A single alternative, with no simplings.
+ // - The last alternative of a set of two or more.
+ // - An alternative other than the last of a set of two or more.
+ //
+ // In the case of a single alternative on its own, we don't need to
+ // jump anywhere - if the alternative fails to match we can just
+ // continue to backtrack out of the parentheses without jumping.
+ //
+ // In the case of the last alternative in a set of more than one, we
+ // need to jump to return back out to the beginning. We'll do so by
+ // adding a jump to the End node's m_jumps list, and linking this
+ // when we come to generate the Begin node. For alternatives other
+ // than the last, we need to jump to the next alternative.
+ //
+ // If the alternative had adjusted the input position we must link
+ // backtracking to here, correct, and then jump on. If not we can
+ // link the backtracks directly to their destination.
+ if (op.m_checkAdjust) {
+ // Handle the cases where we need to link the backtracks here.
+ m_backtrackingState.link(this);
+ sub32(Imm32(op.m_checkAdjust), index);
+ if (!isLastAlternative) {
+ // An alternative that is not the last should jump to its successor.
+ jump(nextOp.m_reentry);
+ } else if (!isBegin) {
+ // The last of more than one alternatives must jump back to the beginning.
+ nextOp.m_jumps.append(jump());
+ } else {
+ // A single alternative on its own can fall through.
+ m_backtrackingState.fallthrough();
+ }
+ } else {
+ // Handle the cases where we can link the backtracks directly to their destinations.
+ if (!isLastAlternative) {
+ // An alternative that is not the last should jump to its successor.
+ m_backtrackingState.linkTo(nextOp.m_reentry, this);
+ } else if (!isBegin) {
+ // The last of more than one alternatives must jump back to the beginning.
+ m_backtrackingState.takeBacktracksToJumpList(nextOp.m_jumps, this);
+ }
+ // In the case of a single alternative on its own do nothing - it can fall through.
+ }
+
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
+ // At this point we've handled the backtracking back into this node.
+ // Now link any backtracks that need to jump to here.
+
+ // For non-simple alternatives, link the alternative's 'return address'
+ // so that we backtrack back out into the previous alternative.
+ if (op.m_op == OpNestedAlternativeNext)
+ m_backtrackingState.append(op.m_returnAddress);
+
+ // If there is more than one alternative, then the last alternative will
+ // have planted a jump to be linked to the end. This jump was added to the
+ // End node's m_jumps list. If we are back at the beginning, link it here.
+ if (isBegin) {
+ YarrOp* endOp = &m_ops[op.m_nextOp];
+ while (endOp->m_nextOp != notFound) {
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeNext || endOp->m_op == OpNestedAlternativeNext);
+ endOp = &m_ops[endOp->m_nextOp];
+ }
+ ASSERT(endOp->m_op == OpSimpleNestedAlternativeEnd || endOp->m_op == OpNestedAlternativeEnd);
+ m_backtrackingState.append(endOp->m_jumps);
+ }
+
+ if (!isBegin) {
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ }
+ m_checked -= op.m_checkAdjust;
+ break;
+ }
+ case OpSimpleNestedAlternativeEnd:
+ case OpNestedAlternativeEnd: {
+ PatternTerm* term = op.m_term;
+
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
+ // If we backtrack into the end of a simple subpattern do nothing;
+ // just continue through into the last alternative. If we backtrack
+ // into the end of a non-simple set of alterntives we need to jump
+ // to the backtracking return address set up during generation.
+ if (op.m_op == OpNestedAlternativeEnd) {
+ m_backtrackingState.link(this);
+
+ // Plant a jump to the return address.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ unsigned alternativeFrameLocation = parenthesesFrameLocation;
+ if (term->quantityType != QuantifierFixedCount)
+ alternativeFrameLocation += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ loadFromFrameAndJump(alternativeFrameLocation);
+
+ // Link the DataLabelPtr associated with the end of the last
+ // alternative to this point.
+ m_backtrackingState.append(op.m_returnAddress);
+ }
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked += lastOp.m_checkAdjust;
+ break;
+ }
+
+ // OpParenthesesSubpatternOnceBegin/End
+ //
+ // When we are backtracking back out of a capturing subpattern we need
+ // to clear the start index in the matches output array, to record that
+ // this subpattern has not been captured.
+ //
+ // When backtracking back out of a Greedy quantified subpattern we need
+ // to catch this, and try running the remainder of the alternative after
+ // the subpattern again, skipping the parentheses.
+ //
+ // Upon backtracking back into a quantified set of parentheses we need to
+ // check whether we were currently skipping the subpattern. If not, we
+ // can backtrack into them, if we were we need to either backtrack back
+ // out of the start of the parentheses, or jump back to the forwards
+ // matching start, depending of whether the match is Greedy or NonGreedy.
+ case OpParenthesesSubpatternOnceBegin: {
+ PatternTerm* term = op.m_term;
+ ASSERT(term->quantityCount == 1);
+
+ // We only need to backtrack to thispoint if capturing or greedy.
+ if ((term->capture() && compileMode == IncludeSubpatterns) || term->quantityType == QuantifierGreedy) {
+ m_backtrackingState.link(this);
+
+ // If capturing, clear the capture (we only need to reset start).
+ if (term->capture() && compileMode == IncludeSubpatterns)
+ clearSubpatternStart(term->parentheses.subpatternId);
+
+ // If Greedy, jump to the end.
+ if (term->quantityType == QuantifierGreedy) {
+ // Clear the flag in the stackframe indicating we ran through the subpattern.
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ storeToFrame(TrustedImm32(-1), parenthesesFrameLocation);
+ // Jump to after the parentheses, skipping the subpattern.
+ jump(m_ops[op.m_nextOp].m_reentry);
+ // A backtrack from after the parentheses, when skipping the subpattern,
+ // will jump back to here.
+ op.m_jumps.link(this);
+ }
+
+ m_backtrackingState.fallthrough();
+ }
+ break;
+ }
+ case OpParenthesesSubpatternOnceEnd: {
+ PatternTerm* term = op.m_term;
+
+ if (term->quantityType != QuantifierFixedCount) {
+ m_backtrackingState.link(this);
+
+ // Check whether we should backtrack back into the parentheses, or if we
+ // are currently in a state where we had skipped over the subpattern
+ // (in which case the flag value on the stack will be -1).
+ unsigned parenthesesFrameLocation = term->frameLocation;
+ Jump hadSkipped = branch32(Equal, Address(stackPointerRegister, parenthesesFrameLocation * sizeof(void*)), TrustedImm32(-1));
+
+ if (term->quantityType == QuantifierGreedy) {
+ // For Greedy parentheses, we skip after having already tried going
+ // through the subpattern, so if we get here we're done.
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ beginOp.m_jumps.append(hadSkipped);
+ } else {
+ // For NonGreedy parentheses, we try skipping the subpattern first,
+ // so if we get here we need to try running through the subpattern
+ // next. Jump back to the start of the parentheses in the forwards
+ // matching path.
+ ASSERT(term->quantityType == QuantifierNonGreedy);
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+ hadSkipped.linkTo(beginOp.m_reentry, this);
+ }
+
+ m_backtrackingState.fallthrough();
+ }
+
+ m_backtrackingState.append(op.m_jumps);
+ break;
+ }
+
+ // OpParenthesesSubpatternTerminalBegin/End
+ //
+ // Terminal subpatterns will always match - there is nothing after them to
+ // force a backtrack, and they have a minimum count of 0, and as such will
+ // always produce an acceptable result.
+ case OpParenthesesSubpatternTerminalBegin: {
+ // We will backtrack to this point once the subpattern cannot match any
+ // more. Since no match is accepted as a successful match (we are Greedy
+ // quantified with a minimum of zero) jump back to the forwards matching
+ // path at the end.
+ YarrOp& endOp = m_ops[op.m_nextOp];
+ m_backtrackingState.linkTo(endOp.m_reentry, this);
+ break;
+ }
+ case OpParenthesesSubpatternTerminalEnd:
+ // We should never be backtracking to here (hence the 'terminal' in the name).
+ ASSERT(m_backtrackingState.isEmpty());
+ m_backtrackingState.append(op.m_jumps);
+ break;
+
+ // OpParentheticalAssertionBegin/End
+ case OpParentheticalAssertionBegin: {
+ PatternTerm* term = op.m_term;
+ YarrOp& endOp = m_ops[op.m_nextOp];
+
+ // We need to handle the backtracks upon backtracking back out
+ // of a parenthetical assertion if either we need to correct
+ // the input index, or the assertion was inverted.
+ if (op.m_checkAdjust || term->invert()) {
+ m_backtrackingState.link(this);
+
+ if (op.m_checkAdjust)
+ add32(Imm32(op.m_checkAdjust), index);
+
+ // In an inverted assertion failure to match the subpattern
+ // is treated as a successful match - jump to the end of the
+ // subpattern. We already have adjusted the input position
+ // back to that before the assertion, which is correct.
+ if (term->invert())
+ jump(endOp.m_reentry);
+
+ m_backtrackingState.fallthrough();
+ }
+
+ // The End node's jump list will contain any backtracks into
+ // the end of the assertion. Also, if inverted, we will have
+ // added the failure caused by a successful match to this.
+ m_backtrackingState.append(endOp.m_jumps);
+
+ m_checked += op.m_checkAdjust;
+ break;
+ }
+ case OpParentheticalAssertionEnd: {
+ // FIXME: We should really be clearing any nested subpattern
+ // matches on bailing out from after the pattern. Firefox has
+ // this bug too (presumably because they use YARR!)
+
+ // Never backtrack into an assertion; later failures bail to before the begin.
+ m_backtrackingState.takeBacktracksToJumpList(op.m_jumps, this);
+
+ YarrOp& lastOp = m_ops[op.m_previousOp];
+ m_checked -= lastOp.m_checkAdjust;
+ break;
+ }
+
+ case OpMatchFailed:
+ break;
+ }
+
+ } while (opIndex);
+ }
+
+ // Compilation methods:
+ // ====================
+
+ // opCompileParenthesesSubpattern
+ // Emits ops for a subpattern (set of parentheses). These consist
+ // of a set of alternatives wrapped in an outer set of nodes for
+ // the parentheses.
+ // Supported types of parentheses are 'Once' (quantityCount == 1)
+ // and 'Terminal' (non-capturing parentheses quantified as greedy
+ // and infinite).
+ // Alternatives will use the 'Simple' set of ops if either the
+ // subpattern is terminal (in which case we will never need to
+ // backtrack), or if the subpattern only contains one alternative.
+ void opCompileParenthesesSubpattern(PatternTerm* term)
+ {
+ YarrOpCode parenthesesBeginOpCode;
+ YarrOpCode parenthesesEndOpCode;
+ YarrOpCode alternativeBeginOpCode = OpSimpleNestedAlternativeBegin;
+ YarrOpCode alternativeNextOpCode = OpSimpleNestedAlternativeNext;
+ YarrOpCode alternativeEndOpCode = OpSimpleNestedAlternativeEnd;
+
+ // We can currently only compile quantity 1 subpatterns that are
+ // not copies. We generate a copy in the case of a range quantifier,
+ // e.g. /(?:x){3,9}/, or /(?:x)+/ (These are effectively expanded to
+ // /(?:x){3,3}(?:x){0,6}/ and /(?:x)(?:x)*/ repectively). The problem
+ // comes where the subpattern is capturing, in which case we would
+ // need to restore the capture from the first subpattern upon a
+ // failure in the second.
+ if (term->quantityCount == 1 && !term->parentheses.isCopy) {
+ // Select the 'Once' nodes.
+ parenthesesBeginOpCode = OpParenthesesSubpatternOnceBegin;
+ parenthesesEndOpCode = OpParenthesesSubpatternOnceEnd;
+
+ // If there is more than one alternative we cannot use the 'simple' nodes.
+ if (term->parentheses.disjunction->m_alternatives.size() != 1) {
+ alternativeBeginOpCode = OpNestedAlternativeBegin;
+ alternativeNextOpCode = OpNestedAlternativeNext;
+ alternativeEndOpCode = OpNestedAlternativeEnd;
+ }
+ } else if (term->parentheses.isTerminal) {
+ // Select the 'Terminal' nodes.
+ parenthesesBeginOpCode = OpParenthesesSubpatternTerminalBegin;
+ parenthesesEndOpCode = OpParenthesesSubpatternTerminalEnd;
+ } else {
+ // This subpattern is not supported by the JIT.
+ m_shouldFallBack = true;
+ return;
+ }
+
+ size_t parenBegin = m_ops.size();
+ m_ops.append(parenthesesBeginOpCode);
+
+ m_ops.append(alternativeBeginOpCode);
+ m_ops.last().m_previousOp = notFound;
+ m_ops.last().m_term = term;
+ Vector<OwnPtr<PatternAlternative> >& alternatives = term->parentheses.disjunction->m_alternatives;
+ for (unsigned i = 0; i < alternatives.size(); ++i) {
+ size_t lastOpIndex = m_ops.size() - 1;
+
+ PatternAlternative* nestedAlternative = alternatives[i].get();
+ opCompileAlternative(nestedAlternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(alternativeNextOpCode));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = nestedAlternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+ thisOp.m_term = term;
+ }
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == alternativeNextOpCode);
+ lastOp.m_op = alternativeEndOpCode;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+
+ size_t parenEnd = m_ops.size();
+ m_ops.append(parenthesesEndOpCode);
+
+ m_ops[parenBegin].m_term = term;
+ m_ops[parenBegin].m_previousOp = notFound;
+ m_ops[parenBegin].m_nextOp = parenEnd;
+ m_ops[parenEnd].m_term = term;
+ m_ops[parenEnd].m_previousOp = parenBegin;
+ m_ops[parenEnd].m_nextOp = notFound;
+ }
+
+ // opCompileParentheticalAssertion
+ // Emits ops for a parenthetical assertion. These consist of an
+ // OpSimpleNestedAlternativeBegin/Next/End set of nodes wrapping
+ // the alternatives, with these wrapped by an outer pair of
+ // OpParentheticalAssertionBegin/End nodes.
+ // We can always use the OpSimpleNestedAlternative nodes in the
+ // case of parenthetical assertions since these only ever match
+ // once, and will never backtrack back into the assertion.
+ void opCompileParentheticalAssertion(PatternTerm* term)
+ {
+ size_t parenBegin = m_ops.size();
+ m_ops.append(OpParentheticalAssertionBegin);
+
+ m_ops.append(OpSimpleNestedAlternativeBegin);
+ m_ops.last().m_previousOp = notFound;
+ m_ops.last().m_term = term;
+ Vector<OwnPtr<PatternAlternative> >& alternatives = term->parentheses.disjunction->m_alternatives;
+ for (unsigned i = 0; i < alternatives.size(); ++i) {
+ size_t lastOpIndex = m_ops.size() - 1;
+
+ PatternAlternative* nestedAlternative = alternatives[i].get();
+ opCompileAlternative(nestedAlternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpSimpleNestedAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = nestedAlternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+ thisOp.m_term = term;
+ }
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == OpSimpleNestedAlternativeNext);
+ lastOp.m_op = OpSimpleNestedAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+
+ size_t parenEnd = m_ops.size();
+ m_ops.append(OpParentheticalAssertionEnd);
+
+ m_ops[parenBegin].m_term = term;
+ m_ops[parenBegin].m_previousOp = notFound;
+ m_ops[parenBegin].m_nextOp = parenEnd;
+ m_ops[parenEnd].m_term = term;
+ m_ops[parenEnd].m_previousOp = parenBegin;
+ m_ops[parenEnd].m_nextOp = notFound;
+ }
+
+ // opCompileAlternative
+ // Called to emit nodes for all terms in an alternative.
+ void opCompileAlternative(PatternAlternative* alternative)
+ {
+ optimizeAlternative(alternative);
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm* term = &alternative->m_terms[i];
+
+ switch (term->type) {
+ case PatternTerm::TypeParenthesesSubpattern:
+ opCompileParenthesesSubpattern(term);
+ break;
+
+ case PatternTerm::TypeParentheticalAssertion:
+ opCompileParentheticalAssertion(term);
+ break;
+
+ default:
+ m_ops.append(term);
+ }
+ }
+ }
+
+ // opCompileBody
+ // This method compiles the body disjunction of the regular expression.
+ // The body consists of two sets of alternatives - zero or more 'once
+ // through' (BOL anchored) alternatives, followed by zero or more
+ // repeated alternatives.
+ // For each of these two sets of alteratives, if not empty they will be
+ // wrapped in a set of OpBodyAlternativeBegin/Next/End nodes (with the
+ // 'begin' node referencing the first alternative, and 'next' nodes
+ // referencing any further alternatives. The begin/next/end nodes are
+ // linked together in a doubly linked list. In the case of repeating
+ // alternatives, the end node is also linked back to the beginning.
+ // If no repeating alternatives exist, then a OpMatchFailed node exists
+ // to return the failing result.
+ void opCompileBody(PatternDisjunction* disjunction)
+ {
+ Vector<OwnPtr<PatternAlternative> >& alternatives = disjunction->m_alternatives;
+ size_t currentAlternativeIndex = 0;
+
+ // Emit the 'once through' alternatives.
+ if (alternatives.size() && alternatives[0]->onceThrough()) {
+ m_ops.append(YarrOp(OpBodyAlternativeBegin));
+ m_ops.last().m_previousOp = notFound;
+
+ do {
+ size_t lastOpIndex = m_ops.size() - 1;
+ PatternAlternative* alternative = alternatives[currentAlternativeIndex].get();
+ opCompileAlternative(alternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = alternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+
+ ++currentAlternativeIndex;
+ } while (currentAlternativeIndex < alternatives.size() && alternatives[currentAlternativeIndex]->onceThrough());
+
+ YarrOp& lastOp = m_ops.last();
+
+ ASSERT(lastOp.m_op == OpBodyAlternativeNext);
+ lastOp.m_op = OpBodyAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = notFound;
+ }
+
+ if (currentAlternativeIndex == alternatives.size()) {
+ m_ops.append(YarrOp(OpMatchFailed));
+ return;
+ }
+
+ // Emit the repeated alternatives.
+ size_t repeatLoop = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeBegin));
+ m_ops.last().m_previousOp = notFound;
+ do {
+ size_t lastOpIndex = m_ops.size() - 1;
+ PatternAlternative* alternative = alternatives[currentAlternativeIndex].get();
+ ASSERT(!alternative->onceThrough());
+ opCompileAlternative(alternative);
+
+ size_t thisOpIndex = m_ops.size();
+ m_ops.append(YarrOp(OpBodyAlternativeNext));
+
+ YarrOp& lastOp = m_ops[lastOpIndex];
+ YarrOp& thisOp = m_ops[thisOpIndex];
+
+ lastOp.m_alternative = alternative;
+ lastOp.m_nextOp = thisOpIndex;
+ thisOp.m_previousOp = lastOpIndex;
+
+ ++currentAlternativeIndex;
+ } while (currentAlternativeIndex < alternatives.size());
+ YarrOp& lastOp = m_ops.last();
+ ASSERT(lastOp.m_op == OpBodyAlternativeNext);
+ lastOp.m_op = OpBodyAlternativeEnd;
+ lastOp.m_alternative = 0;
+ lastOp.m_nextOp = repeatLoop;
+ }
+
+ void generateEnter()
+ {
+#if CPU(X86_64)
+ push(X86Registers::ebp);
+ move(stackPointerRegister, X86Registers::ebp);
+ push(X86Registers::ebx);
+ // The ABI doesn't guarantee the upper bits are zero on unsigned arguments, so clear them ourselves.
+ zeroExtend32ToPtr(index, index);
+ zeroExtend32ToPtr(length, length);
+#if OS(WINDOWS)
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 6 * sizeof(void*)), output);
+#endif
+#elif CPU(X86)
+ push(X86Registers::ebp);
+ move(stackPointerRegister, X86Registers::ebp);
+ // TODO: do we need spill registers to fill the output pointer if there are no sub captures?
+ push(X86Registers::ebx);
+ push(X86Registers::edi);
+ push(X86Registers::esi);
+ // load output into edi (2 = saved ebp + return address).
+ #if COMPILER(MSVC)
+ loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), input);
+ loadPtr(Address(X86Registers::ebp, 3 * sizeof(void*)), index);
+ loadPtr(Address(X86Registers::ebp, 4 * sizeof(void*)), length);
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 5 * sizeof(void*)), output);
+ #else
+ if (compileMode == IncludeSubpatterns)
+ loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), output);
+ #endif
+#elif CPU(ARM)
+ push(ARMRegisters::r4);
+ push(ARMRegisters::r5);
+ push(ARMRegisters::r6);
+#if CPU(ARM_TRADITIONAL)
+ push(ARMRegisters::r8); // scratch register
+#endif
+ if (compileMode == IncludeSubpatterns)
+ move(ARMRegisters::r3, output);
+#elif CPU(SH4)
+ push(SH4Registers::r11);
+ push(SH4Registers::r13);
+#elif CPU(MIPS)
+ // Do nothing.
+#endif
+ }
+
+ void generateReturn()
+ {
+#if CPU(X86_64)
+#if OS(WINDOWS)
+ // Store the return value in the allocated space pointed by rcx.
+ store64(returnRegister, Address(X86Registers::ecx));
+ store64(returnRegister2, Address(X86Registers::ecx, sizeof(void*)));
+ move(X86Registers::ecx, returnRegister);
+#endif
+ pop(X86Registers::ebx);
+ pop(X86Registers::ebp);
+#elif CPU(X86)
+ pop(X86Registers::esi);
+ pop(X86Registers::edi);
+ pop(X86Registers::ebx);
+ pop(X86Registers::ebp);
+#elif CPU(ARM)
+#if CPU(ARM_TRADITIONAL)
+ pop(ARMRegisters::r8); // scratch register
+#endif
+ pop(ARMRegisters::r6);
+ pop(ARMRegisters::r5);
+ pop(ARMRegisters::r4);
+#elif CPU(SH4)
+ pop(SH4Registers::r13);
+ pop(SH4Registers::r11);
+#elif CPU(MIPS)
+ // Do nothing
+#endif
+ ret();
+ }
+
+public:
+ YarrGenerator(YarrPattern& pattern, YarrCharSize charSize)
+ : m_pattern(pattern)
+ , m_charSize(charSize)
+ , m_charScale(m_charSize == Char8 ? TimesOne: TimesTwo)
+ , m_shouldFallBack(false)
+ , m_checked(0)
+ {
+ }
+
+ void compile(JSGlobalData* globalData, YarrCodeBlock& jitObject)
+ {
+ generateEnter();
+
+ Jump hasInput = checkInput();
+ move(TrustedImmPtr((void*)WTF::notFound), returnRegister);
+ move(TrustedImm32(0), returnRegister2);
+ generateReturn();
+ hasInput.link(this);
+
+ if (compileMode == IncludeSubpatterns) {
+ for (unsigned i = 0; i < m_pattern.m_numSubpatterns + 1; ++i)
+ store32(TrustedImm32(-1), Address(output, (i << 1) * sizeof(int)));
+ }
+
+ if (!m_pattern.m_body->m_hasFixedSize)
+ setMatchStart(index);
+
+ initCallFrame();
+
+ // Compile the pattern to the internal 'YarrOp' representation.
+ opCompileBody(m_pattern.m_body);
+
+ // If we encountered anything we can't handle in the JIT code
+ // (e.g. backreferences) then return early.
+ if (m_shouldFallBack) {
+ jitObject.setFallBack(true);
+ return;
+ }
+
+ generate();
+ backtrack();
+
+ // Link & finalize the code.
+ LinkBuffer linkBuffer(*globalData, this, REGEXP_CODE_ID);
+ m_backtrackingState.linkDataLabels(linkBuffer);
+
+ if (compileMode == MatchOnly) {
+ if (m_charSize == Char8)
+ jitObject.set8BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ("Match-only 8-bit regular expression")));
+ else
+ jitObject.set16BitCodeMatchOnly(FINALIZE_CODE(linkBuffer, ("Match-only 16-bit regular expression")));
+ } else {
+ if (m_charSize == Char8)
+ jitObject.set8BitCode(FINALIZE_CODE(linkBuffer, ("8-bit regular expression")));
+ else
+ jitObject.set16BitCode(FINALIZE_CODE(linkBuffer, ("16-bit regular expression")));
+ }
+ jitObject.setFallBack(m_shouldFallBack);
+ }
+
+private:
+ YarrPattern& m_pattern;
+
+ YarrCharSize m_charSize;
+
+ Scale m_charScale;
+
+ // Used to detect regular expression constructs that are not currently
+ // supported in the JIT; fall back to the interpreter when this is detected.
+ bool m_shouldFallBack;
+
+ // The regular expression expressed as a linear sequence of operations.
+ Vector<YarrOp, 128> m_ops;
+
+ // This records the current input offset being applied due to the current
+ // set of alternatives we are nested within. E.g. when matching the
+ // character 'b' within the regular expression /abc/, we will know that
+ // the minimum size for the alternative is 3, checked upon entry to the
+ // alternative, and that 'b' is at offset 1 from the start, and as such
+ // when matching 'b' we need to apply an offset of -2 to the load.
+ //
+ // FIXME: This should go away. Rather than tracking this value throughout
+ // code generation, we should gather this information up front & store it
+ // on the YarrOp structure.
+ int m_checked;
+
+ // This class records state whilst generating the backtracking path of code.
+ BacktrackingState m_backtrackingState;
+};
+
+void jitCompile(YarrPattern& pattern, YarrCharSize charSize, JSGlobalData* globalData, YarrCodeBlock& jitObject, YarrJITCompileMode mode)
+{
+ if (mode == MatchOnly)
+ YarrGenerator<MatchOnly>(pattern, charSize).compile(globalData, jitObject);
+ else
+ YarrGenerator<IncludeSubpatterns>(pattern, charSize).compile(globalData, jitObject);
+}
+
+}}
+
+#endif
diff --git a/src/3rdparty/masm/yarr/YarrJIT.h b/src/3rdparty/masm/yarr/YarrJIT.h
new file mode 100644
index 0000000000..bb7033fdea
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrJIT.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrJIT_h
+#define YarrJIT_h
+
+#if ENABLE(YARR_JIT)
+
+#include "JSGlobalData.h"
+#include "MacroAssemblerCodeRef.h"
+#include "MatchResult.h"
+#include "Yarr.h"
+#include "YarrPattern.h"
+
+#if CPU(X86) && !COMPILER(MSVC)
+#define YARR_CALL __attribute__ ((regparm (3)))
+#else
+#define YARR_CALL
+#endif
+
+namespace JSC {
+
+class JSGlobalData;
+class ExecutablePool;
+
+namespace Yarr {
+
+class YarrCodeBlock {
+#if CPU(X86_64)
+ typedef MatchResult (*YarrJITCode8)(const LChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef MatchResult (*YarrJITCode16)(const UChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef MatchResult (*YarrJITCodeMatchOnly8)(const LChar* input, unsigned start, unsigned length) YARR_CALL;
+ typedef MatchResult (*YarrJITCodeMatchOnly16)(const UChar* input, unsigned start, unsigned length) YARR_CALL;
+#else
+ typedef EncodedMatchResult (*YarrJITCode8)(const LChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCode16)(const UChar* input, unsigned start, unsigned length, int* output) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCodeMatchOnly8)(const LChar* input, unsigned start, unsigned length) YARR_CALL;
+ typedef EncodedMatchResult (*YarrJITCodeMatchOnly16)(const UChar* input, unsigned start, unsigned length) YARR_CALL;
+#endif
+
+public:
+ YarrCodeBlock()
+ : m_needFallBack(false)
+ {
+ }
+
+ ~YarrCodeBlock()
+ {
+ }
+
+ void setFallBack(bool fallback) { m_needFallBack = fallback; }
+ bool isFallBack() { return m_needFallBack; }
+
+ bool has8BitCode() { return m_ref8.size(); }
+ bool has16BitCode() { return m_ref16.size(); }
+ void set8BitCode(MacroAssemblerCodeRef ref) { m_ref8 = ref; }
+ void set16BitCode(MacroAssemblerCodeRef ref) { m_ref16 = ref; }
+
+ bool has8BitCodeMatchOnly() { return m_matchOnly8.size(); }
+ bool has16BitCodeMatchOnly() { return m_matchOnly16.size(); }
+ void set8BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly8 = matchOnly; }
+ void set16BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly16 = matchOnly; }
+
+ MatchResult execute(const LChar* input, unsigned start, unsigned length, int* output)
+ {
+ ASSERT(has8BitCode());
+ return MatchResult(reinterpret_cast<YarrJITCode8>(m_ref8.code().executableAddress())(input, start, length, output));
+ }
+
+ MatchResult execute(const UChar* input, unsigned start, unsigned length, int* output)
+ {
+ ASSERT(has16BitCode());
+ return MatchResult(reinterpret_cast<YarrJITCode16>(m_ref16.code().executableAddress())(input, start, length, output));
+ }
+
+ MatchResult execute(const LChar* input, unsigned start, unsigned length)
+ {
+ ASSERT(has8BitCodeMatchOnly());
+ return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly8>(m_matchOnly8.code().executableAddress())(input, start, length));
+ }
+
+ MatchResult execute(const UChar* input, unsigned start, unsigned length)
+ {
+ ASSERT(has16BitCodeMatchOnly());
+ return MatchResult(reinterpret_cast<YarrJITCodeMatchOnly16>(m_matchOnly16.code().executableAddress())(input, start, length));
+ }
+
+#if ENABLE(REGEXP_TRACING)
+ void *getAddr() { return m_ref.code().executableAddress(); }
+#endif
+
+ void clear()
+ {
+ m_ref8 = MacroAssemblerCodeRef();
+ m_ref16 = MacroAssemblerCodeRef();
+ m_matchOnly8 = MacroAssemblerCodeRef();
+ m_matchOnly16 = MacroAssemblerCodeRef();
+ m_needFallBack = false;
+ }
+
+private:
+ MacroAssemblerCodeRef m_ref8;
+ MacroAssemblerCodeRef m_ref16;
+ MacroAssemblerCodeRef m_matchOnly8;
+ MacroAssemblerCodeRef m_matchOnly16;
+ bool m_needFallBack;
+};
+
+enum YarrJITCompileMode {
+ MatchOnly,
+ IncludeSubpatterns
+};
+void jitCompile(YarrPattern&, YarrCharSize, JSGlobalData*, YarrCodeBlock& jitObject, YarrJITCompileMode = IncludeSubpatterns);
+
+} } // namespace JSC::Yarr
+
+#endif
+
+#endif // YarrJIT_h
diff --git a/src/3rdparty/masm/yarr/YarrParser.h b/src/3rdparty/masm/yarr/YarrParser.h
new file mode 100644
index 0000000000..8c5d71b5fe
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrParser.h
@@ -0,0 +1,880 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrParser_h
+#define YarrParser_h
+
+#include "Yarr.h"
+#include <wtf/ASCIICType.h>
+#include <wtf/text/WTFString.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+#define REGEXP_ERROR_PREFIX "Invalid regular expression: "
+
+enum BuiltInCharacterClassID {
+ DigitClassID,
+ SpaceClassID,
+ WordClassID,
+ NewlineClassID,
+};
+
+// The Parser class should not be used directly - only via the Yarr::parse() method.
+template<class Delegate, typename CharType>
+class Parser {
+private:
+ template<class FriendDelegate>
+ friend const char* parse(FriendDelegate&, const String& pattern, unsigned backReferenceLimit);
+
+ enum ErrorCode {
+ NoError,
+ PatternTooLarge,
+ QuantifierOutOfOrder,
+ QuantifierWithoutAtom,
+ QuantifierTooLarge,
+ MissingParentheses,
+ ParenthesesUnmatched,
+ ParenthesesTypeInvalid,
+ CharacterClassUnmatched,
+ CharacterClassOutOfOrder,
+ EscapeUnterminated,
+ NumberOfErrorCodes
+ };
+
+ /*
+ * CharacterClassParserDelegate:
+ *
+ * The class CharacterClassParserDelegate is used in the parsing of character
+ * classes. This class handles detection of character ranges. This class
+ * implements enough of the delegate interface such that it can be passed to
+ * parseEscape() as an EscapeDelegate. This allows parseEscape() to be reused
+ * to perform the parsing of escape characters in character sets.
+ */
+ class CharacterClassParserDelegate {
+ public:
+ CharacterClassParserDelegate(Delegate& delegate, ErrorCode& err)
+ : m_delegate(delegate)
+ , m_err(err)
+ , m_state(Empty)
+ , m_character(0)
+ {
+ }
+
+ /*
+ * begin():
+ *
+ * Called at beginning of construction.
+ */
+ void begin(bool invert)
+ {
+ m_delegate.atomCharacterClassBegin(invert);
+ }
+
+ /*
+ * atomPatternCharacter():
+ *
+ * This method is called either from parseCharacterClass() (for an unescaped
+ * character in a character class), or from parseEscape(). In the former case
+ * the value true will be passed for the argument 'hyphenIsRange', and in this
+ * mode we will allow a hypen to be treated as indicating a range (i.e. /[a-z]/
+ * is different to /[a\-z]/).
+ */
+ void atomPatternCharacter(UChar ch, bool hyphenIsRange = false)
+ {
+ switch (m_state) {
+ case AfterCharacterClass:
+ // Following a builtin character class we need look out for a hyphen.
+ // We're looking for invalid ranges, such as /[\d-x]/ or /[\d-\d]/.
+ // If we see a hyphen following a charater class then unlike usual
+ // we'll report it to the delegate immediately, and put ourself into
+ // a poisoned state. Any following calls to add another character or
+ // character class will result in an error. (A hypen following a
+ // character-class is itself valid, but only at the end of a regex).
+ if (hyphenIsRange && ch == '-') {
+ m_delegate.atomCharacterClassAtom('-');
+ m_state = AfterCharacterClassHyphen;
+ return;
+ }
+ // Otherwise just fall through - cached character so treat this as Empty.
+
+ case Empty:
+ m_character = ch;
+ m_state = CachedCharacter;
+ return;
+
+ case CachedCharacter:
+ if (hyphenIsRange && ch == '-')
+ m_state = CachedCharacterHyphen;
+ else {
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_character = ch;
+ }
+ return;
+
+ case CachedCharacterHyphen:
+ if (ch < m_character) {
+ m_err = CharacterClassOutOfOrder;
+ return;
+ }
+ m_delegate.atomCharacterClassRange(m_character, ch);
+ m_state = Empty;
+ return;
+
+ // See coment in atomBuiltInCharacterClass below.
+ // This too is technically an error, per ECMA-262, and again we
+ // we chose to allow this. Note a subtlely here that while we
+ // diverge from the spec's definition of CharacterRange we do
+ // remain in compliance with the grammar. For example, consider
+ // the expression /[\d-a-z]/. We comply with the grammar in
+ // this case by not allowing a-z to be matched as a range.
+ case AfterCharacterClassHyphen:
+ m_delegate.atomCharacterClassAtom(ch);
+ m_state = Empty;
+ return;
+ }
+ }
+
+ /*
+ * atomBuiltInCharacterClass():
+ *
+ * Adds a built-in character class, called by parseEscape().
+ */
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert)
+ {
+ switch (m_state) {
+ case CachedCharacter:
+ // Flush the currently cached character, then fall through.
+ m_delegate.atomCharacterClassAtom(m_character);
+
+ case Empty:
+ case AfterCharacterClass:
+ m_state = AfterCharacterClass;
+ m_delegate.atomCharacterClassBuiltIn(classID, invert);
+ return;
+
+ // If we hit either of these cases, we have an invalid range that
+ // looks something like /[x-\d]/ or /[\d-\d]/.
+ // According to ECMA-262 this should be a syntax error, but
+ // empirical testing shows this to break teh webz. Instead we
+ // comply with to the ECMA-262 grammar, and assume the grammar to
+ // have matched the range correctly, but tweak our interpretation
+ // of CharacterRange. Effectively we implicitly handle the hyphen
+ // as if it were escaped, e.g. /[\w-_]/ is treated as /[\w\-_]/.
+ case CachedCharacterHyphen:
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_delegate.atomCharacterClassAtom('-');
+ // fall through
+ case AfterCharacterClassHyphen:
+ m_delegate.atomCharacterClassBuiltIn(classID, invert);
+ m_state = Empty;
+ return;
+ }
+ }
+
+ /*
+ * end():
+ *
+ * Called at end of construction.
+ */
+ void end()
+ {
+ if (m_state == CachedCharacter)
+ m_delegate.atomCharacterClassAtom(m_character);
+ else if (m_state == CachedCharacterHyphen) {
+ m_delegate.atomCharacterClassAtom(m_character);
+ m_delegate.atomCharacterClassAtom('-');
+ }
+ m_delegate.atomCharacterClassEnd();
+ }
+
+ // parseEscape() should never call these delegate methods when
+ // invoked with inCharacterClass set.
+ NO_RETURN_DUE_TO_ASSERT void assertionWordBoundary(bool) { RELEASE_ASSERT_NOT_REACHED(); }
+ NO_RETURN_DUE_TO_ASSERT void atomBackReference(unsigned) { RELEASE_ASSERT_NOT_REACHED(); }
+
+ private:
+ Delegate& m_delegate;
+ ErrorCode& m_err;
+ enum CharacterClassConstructionState {
+ Empty,
+ CachedCharacter,
+ CachedCharacterHyphen,
+ AfterCharacterClass,
+ AfterCharacterClassHyphen,
+ } m_state;
+ UChar m_character;
+ };
+
+ Parser(Delegate& delegate, const String& pattern, unsigned backReferenceLimit)
+ : m_delegate(delegate)
+ , m_backReferenceLimit(backReferenceLimit)
+ , m_err(NoError)
+ , m_data(pattern.getCharacters<CharType>())
+ , m_size(pattern.length())
+ , m_index(0)
+ , m_parenthesesNestingDepth(0)
+ {
+ }
+
+ /*
+ * parseEscape():
+ *
+ * Helper for parseTokens() AND parseCharacterClass().
+ * Unlike the other parser methods, this function does not report tokens
+ * directly to the member delegate (m_delegate), instead tokens are
+ * emitted to the delegate provided as an argument. In the case of atom
+ * escapes, parseTokens() will call parseEscape() passing m_delegate as
+ * an argument, and as such the escape will be reported to the delegate.
+ *
+ * However this method may also be used by parseCharacterClass(), in which
+ * case a CharacterClassParserDelegate will be passed as the delegate that
+ * tokens should be added to. A boolean flag is also provided to indicate
+ * whether that an escape in a CharacterClass is being parsed (some parsing
+ * rules change in this context).
+ *
+ * The boolean value returned by this method indicates whether the token
+ * parsed was an atom (outside of a characted class \b and \B will be
+ * interpreted as assertions).
+ */
+ template<bool inCharacterClass, class EscapeDelegate>
+ bool parseEscape(EscapeDelegate& delegate)
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '\\');
+ consume();
+
+ if (atEndOfPattern()) {
+ m_err = EscapeUnterminated;
+ return false;
+ }
+
+ switch (peek()) {
+ // Assertions
+ case 'b':
+ consume();
+ if (inCharacterClass)
+ delegate.atomPatternCharacter('\b');
+ else {
+ delegate.assertionWordBoundary(false);
+ return false;
+ }
+ break;
+ case 'B':
+ consume();
+ if (inCharacterClass)
+ delegate.atomPatternCharacter('B');
+ else {
+ delegate.assertionWordBoundary(true);
+ return false;
+ }
+ break;
+
+ // CharacterClassEscape
+ case 'd':
+ consume();
+ delegate.atomBuiltInCharacterClass(DigitClassID, false);
+ break;
+ case 's':
+ consume();
+ delegate.atomBuiltInCharacterClass(SpaceClassID, false);
+ break;
+ case 'w':
+ consume();
+ delegate.atomBuiltInCharacterClass(WordClassID, false);
+ break;
+ case 'D':
+ consume();
+ delegate.atomBuiltInCharacterClass(DigitClassID, true);
+ break;
+ case 'S':
+ consume();
+ delegate.atomBuiltInCharacterClass(SpaceClassID, true);
+ break;
+ case 'W':
+ consume();
+ delegate.atomBuiltInCharacterClass(WordClassID, true);
+ break;
+
+ // DecimalEscape
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ // To match Firefox, we parse an invalid backreference in the range [1-7] as an octal escape.
+ // First, try to parse this as backreference.
+ if (!inCharacterClass) {
+ ParseState state = saveState();
+
+ unsigned backReference = consumeNumber();
+ if (backReference <= m_backReferenceLimit) {
+ delegate.atomBackReference(backReference);
+ break;
+ }
+
+ restoreState(state);
+ }
+
+ // Not a backreference, and not octal.
+ if (peek() >= '8') {
+ delegate.atomPatternCharacter('\\');
+ break;
+ }
+
+ // Fall-through to handle this as an octal escape.
+ }
+
+ // Octal escape
+ case '0':
+ delegate.atomPatternCharacter(consumeOctal());
+ break;
+
+ // ControlEscape
+ case 'f':
+ consume();
+ delegate.atomPatternCharacter('\f');
+ break;
+ case 'n':
+ consume();
+ delegate.atomPatternCharacter('\n');
+ break;
+ case 'r':
+ consume();
+ delegate.atomPatternCharacter('\r');
+ break;
+ case 't':
+ consume();
+ delegate.atomPatternCharacter('\t');
+ break;
+ case 'v':
+ consume();
+ delegate.atomPatternCharacter('\v');
+ break;
+
+ // ControlLetter
+ case 'c': {
+ ParseState state = saveState();
+ consume();
+ if (!atEndOfPattern()) {
+ int control = consume();
+
+ // To match Firefox, inside a character class, we also accept numbers and '_' as control characters.
+ if (inCharacterClass ? WTF::isASCIIAlphanumeric(control) || (control == '_') : WTF::isASCIIAlpha(control)) {
+ delegate.atomPatternCharacter(control & 0x1f);
+ break;
+ }
+ }
+ restoreState(state);
+ delegate.atomPatternCharacter('\\');
+ break;
+ }
+
+ // HexEscape
+ case 'x': {
+ consume();
+ int x = tryConsumeHex(2);
+ if (x == -1)
+ delegate.atomPatternCharacter('x');
+ else
+ delegate.atomPatternCharacter(x);
+ break;
+ }
+
+ // UnicodeEscape
+ case 'u': {
+ consume();
+ int u = tryConsumeHex(4);
+ if (u == -1)
+ delegate.atomPatternCharacter('u');
+ else
+ delegate.atomPatternCharacter(u);
+ break;
+ }
+
+ // IdentityEscape
+ default:
+ delegate.atomPatternCharacter(consume());
+ }
+
+ return true;
+ }
+
+ /*
+ * parseAtomEscape(), parseCharacterClassEscape():
+ *
+ * These methods alias to parseEscape().
+ */
+ bool parseAtomEscape()
+ {
+ return parseEscape<false>(m_delegate);
+ }
+ void parseCharacterClassEscape(CharacterClassParserDelegate& delegate)
+ {
+ parseEscape<true>(delegate);
+ }
+
+ /*
+ * parseCharacterClass():
+ *
+ * Helper for parseTokens(); calls dirctly and indirectly (via parseCharacterClassEscape)
+ * to an instance of CharacterClassParserDelegate, to describe the character class to the
+ * delegate.
+ */
+ void parseCharacterClass()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '[');
+ consume();
+
+ CharacterClassParserDelegate characterClassConstructor(m_delegate, m_err);
+
+ characterClassConstructor.begin(tryConsume('^'));
+
+ while (!atEndOfPattern()) {
+ switch (peek()) {
+ case ']':
+ consume();
+ characterClassConstructor.end();
+ return;
+
+ case '\\':
+ parseCharacterClassEscape(characterClassConstructor);
+ break;
+
+ default:
+ characterClassConstructor.atomPatternCharacter(consume(), true);
+ }
+
+ if (m_err)
+ return;
+ }
+
+ m_err = CharacterClassUnmatched;
+ }
+
+ /*
+ * parseParenthesesBegin():
+ *
+ * Helper for parseTokens(); checks for parentheses types other than regular capturing subpatterns.
+ */
+ void parseParenthesesBegin()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == '(');
+ consume();
+
+ if (tryConsume('?')) {
+ if (atEndOfPattern()) {
+ m_err = ParenthesesTypeInvalid;
+ return;
+ }
+
+ switch (consume()) {
+ case ':':
+ m_delegate.atomParenthesesSubpatternBegin(false);
+ break;
+
+ case '=':
+ m_delegate.atomParentheticalAssertionBegin();
+ break;
+
+ case '!':
+ m_delegate.atomParentheticalAssertionBegin(true);
+ break;
+
+ default:
+ m_err = ParenthesesTypeInvalid;
+ }
+ } else
+ m_delegate.atomParenthesesSubpatternBegin();
+
+ ++m_parenthesesNestingDepth;
+ }
+
+ /*
+ * parseParenthesesEnd():
+ *
+ * Helper for parseTokens(); checks for parse errors (due to unmatched parentheses).
+ */
+ void parseParenthesesEnd()
+ {
+ ASSERT(!m_err);
+ ASSERT(peek() == ')');
+ consume();
+
+ if (m_parenthesesNestingDepth > 0)
+ m_delegate.atomParenthesesEnd();
+ else
+ m_err = ParenthesesUnmatched;
+
+ --m_parenthesesNestingDepth;
+ }
+
+ /*
+ * parseQuantifier():
+ *
+ * Helper for parseTokens(); checks for parse errors and non-greedy quantifiers.
+ */
+ void parseQuantifier(bool lastTokenWasAnAtom, unsigned min, unsigned max)
+ {
+ ASSERT(!m_err);
+ ASSERT(min <= max);
+
+ if (min == UINT_MAX) {
+ m_err = QuantifierTooLarge;
+ return;
+ }
+
+ if (lastTokenWasAnAtom)
+ m_delegate.quantifyAtom(min, max, !tryConsume('?'));
+ else
+ m_err = QuantifierWithoutAtom;
+ }
+
+ /*
+ * parseTokens():
+ *
+ * This method loops over the input pattern reporting tokens to the delegate.
+ * The method returns when a parse error is detected, or the end of the pattern
+ * is reached. One piece of state is tracked around the loop, which is whether
+ * the last token passed to the delegate was an atom (this is necessary to detect
+ * a parse error when a quantifier provided without an atom to quantify).
+ */
+ void parseTokens()
+ {
+ bool lastTokenWasAnAtom = false;
+
+ while (!atEndOfPattern()) {
+ switch (peek()) {
+ case '|':
+ consume();
+ m_delegate.disjunction();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '(':
+ parseParenthesesBegin();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case ')':
+ parseParenthesesEnd();
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '^':
+ consume();
+ m_delegate.assertionBOL();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '$':
+ consume();
+ m_delegate.assertionEOL();
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '.':
+ consume();
+ m_delegate.atomBuiltInCharacterClass(NewlineClassID, true);
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '[':
+ parseCharacterClass();
+ lastTokenWasAnAtom = true;
+ break;
+
+ case '\\':
+ lastTokenWasAnAtom = parseAtomEscape();
+ break;
+
+ case '*':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 0, quantifyInfinite);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '+':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 1, quantifyInfinite);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '?':
+ consume();
+ parseQuantifier(lastTokenWasAnAtom, 0, 1);
+ lastTokenWasAnAtom = false;
+ break;
+
+ case '{': {
+ ParseState state = saveState();
+
+ consume();
+ if (peekIsDigit()) {
+ unsigned min = consumeNumber();
+ unsigned max = min;
+
+ if (tryConsume(','))
+ max = peekIsDigit() ? consumeNumber() : quantifyInfinite;
+
+ if (tryConsume('}')) {
+ if (min <= max)
+ parseQuantifier(lastTokenWasAnAtom, min, max);
+ else
+ m_err = QuantifierOutOfOrder;
+ lastTokenWasAnAtom = false;
+ break;
+ }
+ }
+
+ restoreState(state);
+ } // if we did not find a complete quantifer, fall through to the default case.
+
+ default:
+ m_delegate.atomPatternCharacter(consume());
+ lastTokenWasAnAtom = true;
+ }
+
+ if (m_err)
+ return;
+ }
+
+ if (m_parenthesesNestingDepth > 0)
+ m_err = MissingParentheses;
+ }
+
+ /*
+ * parse():
+ *
+ * This method calls parseTokens() to parse over the input and converts any
+ * error code to a const char* for a result.
+ */
+ const char* parse()
+ {
+ if (m_size > MAX_PATTERN_SIZE)
+ m_err = PatternTooLarge;
+ else
+ parseTokens();
+ ASSERT(atEndOfPattern() || m_err);
+
+ // The order of this array must match the ErrorCode enum.
+ static const char* errorMessages[NumberOfErrorCodes] = {
+ 0, // NoError
+ REGEXP_ERROR_PREFIX "regular expression too large",
+ REGEXP_ERROR_PREFIX "numbers out of order in {} quantifier",
+ REGEXP_ERROR_PREFIX "nothing to repeat",
+ REGEXP_ERROR_PREFIX "number too large in {} quantifier",
+ REGEXP_ERROR_PREFIX "missing )",
+ REGEXP_ERROR_PREFIX "unmatched parentheses",
+ REGEXP_ERROR_PREFIX "unrecognized character after (?",
+ REGEXP_ERROR_PREFIX "missing terminating ] for character class",
+ REGEXP_ERROR_PREFIX "range out of order in character class",
+ REGEXP_ERROR_PREFIX "\\ at end of pattern"
+ };
+
+ return errorMessages[m_err];
+ }
+
+ // Misc helper functions:
+
+ typedef unsigned ParseState;
+
+ ParseState saveState()
+ {
+ return m_index;
+ }
+
+ void restoreState(ParseState state)
+ {
+ m_index = state;
+ }
+
+ bool atEndOfPattern()
+ {
+ ASSERT(m_index <= m_size);
+ return m_index == m_size;
+ }
+
+ int peek()
+ {
+ ASSERT(m_index < m_size);
+ return m_data[m_index];
+ }
+
+ bool peekIsDigit()
+ {
+ return !atEndOfPattern() && WTF::isASCIIDigit(peek());
+ }
+
+ unsigned peekDigit()
+ {
+ ASSERT(peekIsDigit());
+ return peek() - '0';
+ }
+
+ int consume()
+ {
+ ASSERT(m_index < m_size);
+ return m_data[m_index++];
+ }
+
+ unsigned consumeDigit()
+ {
+ ASSERT(peekIsDigit());
+ return consume() - '0';
+ }
+
+ unsigned consumeNumber()
+ {
+ unsigned n = consumeDigit();
+ // check for overflow.
+ for (unsigned newValue; peekIsDigit() && ((newValue = n * 10 + peekDigit()) >= n); ) {
+ n = newValue;
+ consume();
+ }
+ return n;
+ }
+
+ unsigned consumeOctal()
+ {
+ ASSERT(WTF::isASCIIOctalDigit(peek()));
+
+ unsigned n = consumeDigit();
+ while (n < 32 && !atEndOfPattern() && WTF::isASCIIOctalDigit(peek()))
+ n = n * 8 + consumeDigit();
+ return n;
+ }
+
+ bool tryConsume(UChar ch)
+ {
+ if (atEndOfPattern() || (m_data[m_index] != ch))
+ return false;
+ ++m_index;
+ return true;
+ }
+
+ int tryConsumeHex(int count)
+ {
+ ParseState state = saveState();
+
+ int n = 0;
+ while (count--) {
+ if (atEndOfPattern() || !WTF::isASCIIHexDigit(peek())) {
+ restoreState(state);
+ return -1;
+ }
+ n = (n << 4) | WTF::toASCIIHexValue(consume());
+ }
+ return n;
+ }
+
+ Delegate& m_delegate;
+ unsigned m_backReferenceLimit;
+ ErrorCode m_err;
+ const CharType* m_data;
+ unsigned m_size;
+ unsigned m_index;
+ unsigned m_parenthesesNestingDepth;
+
+ // Derived by empirical testing of compile time in PCRE and WREC.
+ static const unsigned MAX_PATTERN_SIZE = 1024 * 1024;
+};
+
+/*
+ * Yarr::parse():
+ *
+ * The parse method is passed a pattern to be parsed and a delegate upon which
+ * callbacks will be made to record the parsed tokens forming the regex.
+ * Yarr::parse() returns null on success, or a const C string providing an error
+ * message where a parse error occurs.
+ *
+ * The Delegate must implement the following interface:
+ *
+ * void assertionBOL();
+ * void assertionEOL();
+ * void assertionWordBoundary(bool invert);
+ *
+ * void atomPatternCharacter(UChar ch);
+ * void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert);
+ * void atomCharacterClassBegin(bool invert)
+ * void atomCharacterClassAtom(UChar ch)
+ * void atomCharacterClassRange(UChar begin, UChar end)
+ * void atomCharacterClassBuiltIn(BuiltInCharacterClassID classID, bool invert)
+ * void atomCharacterClassEnd()
+ * void atomParenthesesSubpatternBegin(bool capture = true);
+ * void atomParentheticalAssertionBegin(bool invert = false);
+ * void atomParenthesesEnd();
+ * void atomBackReference(unsigned subpatternId);
+ *
+ * void quantifyAtom(unsigned min, unsigned max, bool greedy);
+ *
+ * void disjunction();
+ *
+ * The regular expression is described by a sequence of assertion*() and atom*()
+ * callbacks to the delegate, describing the terms in the regular expression.
+ * Following an atom a quantifyAtom() call may occur to indicate that the previous
+ * atom should be quantified. In the case of atoms described across multiple
+ * calls (parentheses and character classes) the call to quantifyAtom() will come
+ * after the call to the atom*End() method, never after atom*Begin().
+ *
+ * Character classes may either be described by a single call to
+ * atomBuiltInCharacterClass(), or by a sequence of atomCharacterClass*() calls.
+ * In the latter case, ...Begin() will be called, followed by a sequence of
+ * calls to ...Atom(), ...Range(), and ...BuiltIn(), followed by a call to ...End().
+ *
+ * Sequences of atoms and assertions are broken into alternatives via calls to
+ * disjunction(). Assertions, atoms, and disjunctions emitted between calls to
+ * atomParenthesesBegin() and atomParenthesesEnd() form the body of a subpattern.
+ * atomParenthesesBegin() is passed a subpatternId. In the case of a regular
+ * capturing subpattern, this will be the subpatternId associated with these
+ * parentheses, and will also by definition be the lowest subpatternId of these
+ * parentheses and of any nested paretheses. The atomParenthesesEnd() method
+ * is passed the subpatternId of the last capturing subexpression nested within
+ * these paretheses. In the case of a capturing subpattern with no nested
+ * capturing subpatterns, the same subpatternId will be passed to the begin and
+ * end functions. In the case of non-capturing subpatterns the subpatternId
+ * passed to the begin method is also the first possible subpatternId that might
+ * be nested within these paretheses. If a set of non-capturing parentheses does
+ * not contain any capturing subpatterns, then the subpatternId passed to begin
+ * will be greater than the subpatternId passed to end.
+ */
+
+template<class Delegate>
+const char* parse(Delegate& delegate, const String& pattern, unsigned backReferenceLimit = quantifyInfinite)
+{
+ if (pattern.is8Bit())
+ return Parser<Delegate, LChar>(delegate, pattern, backReferenceLimit).parse();
+ return Parser<Delegate, UChar>(delegate, pattern, backReferenceLimit).parse();
+}
+
+} } // namespace JSC::Yarr
+
+#endif // YarrParser_h
diff --git a/src/3rdparty/masm/yarr/YarrPattern.cpp b/src/3rdparty/masm/yarr/YarrPattern.cpp
new file mode 100644
index 0000000000..3ce0216e5f
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrPattern.cpp
@@ -0,0 +1,880 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrPattern.h"
+
+#include "Yarr.h"
+#include "YarrCanonicalizeUCS2.h"
+#include "YarrParser.h"
+#include <wtf/Vector.h>
+
+using namespace WTF;
+
+namespace JSC { namespace Yarr {
+
+#include "RegExpJitTables.h"
+
+class CharacterClassConstructor {
+public:
+ CharacterClassConstructor(bool isCaseInsensitive = false)
+ : m_isCaseInsensitive(isCaseInsensitive)
+ {
+ }
+
+ void reset()
+ {
+ m_matches.clear();
+ m_ranges.clear();
+ m_matchesUnicode.clear();
+ m_rangesUnicode.clear();
+ }
+
+ void append(const CharacterClass* other)
+ {
+ for (size_t i = 0; i < other->m_matches.size(); ++i)
+ addSorted(m_matches, other->m_matches[i]);
+ for (size_t i = 0; i < other->m_ranges.size(); ++i)
+ addSortedRange(m_ranges, other->m_ranges[i].begin, other->m_ranges[i].end);
+ for (size_t i = 0; i < other->m_matchesUnicode.size(); ++i)
+ addSorted(m_matchesUnicode, other->m_matchesUnicode[i]);
+ for (size_t i = 0; i < other->m_rangesUnicode.size(); ++i)
+ addSortedRange(m_rangesUnicode, other->m_rangesUnicode[i].begin, other->m_rangesUnicode[i].end);
+ }
+
+ void putChar(UChar ch)
+ {
+ // Handle ascii cases.
+ if (ch <= 0x7f) {
+ if (m_isCaseInsensitive && isASCIIAlpha(ch)) {
+ addSorted(m_matches, toASCIIUpper(ch));
+ addSorted(m_matches, toASCIILower(ch));
+ } else
+ addSorted(m_matches, ch);
+ return;
+ }
+
+ // Simple case, not a case-insensitive match.
+ if (!m_isCaseInsensitive) {
+ addSorted(m_matchesUnicode, ch);
+ return;
+ }
+
+ // Add multiple matches, if necessary.
+ UCS2CanonicalizationRange* info = rangeInfoFor(ch);
+ if (info->type == CanonicalizeUnique)
+ addSorted(m_matchesUnicode, ch);
+ else
+ putUnicodeIgnoreCase(ch, info);
+ }
+
+ void putUnicodeIgnoreCase(UChar ch, UCS2CanonicalizationRange* info)
+ {
+ ASSERT(m_isCaseInsensitive);
+ ASSERT(ch > 0x7f);
+ ASSERT(ch >= info->begin && ch <= info->end);
+ ASSERT(info->type != CanonicalizeUnique);
+ if (info->type == CanonicalizeSet) {
+ for (uint16_t* set = characterSetInfo[info->value]; (ch = *set); ++set)
+ addSorted(m_matchesUnicode, ch);
+ } else {
+ addSorted(m_matchesUnicode, ch);
+ addSorted(m_matchesUnicode, getCanonicalPair(info, ch));
+ }
+ }
+
+ void putRange(UChar lo, UChar hi)
+ {
+ if (lo <= 0x7f) {
+ char asciiLo = lo;
+ char asciiHi = std::min(hi, (UChar)0x7f);
+ addSortedRange(m_ranges, lo, asciiHi);
+
+ if (m_isCaseInsensitive) {
+ if ((asciiLo <= 'Z') && (asciiHi >= 'A'))
+ addSortedRange(m_ranges, std::max(asciiLo, 'A')+('a'-'A'), std::min(asciiHi, 'Z')+('a'-'A'));
+ if ((asciiLo <= 'z') && (asciiHi >= 'a'))
+ addSortedRange(m_ranges, std::max(asciiLo, 'a')+('A'-'a'), std::min(asciiHi, 'z')+('A'-'a'));
+ }
+ }
+ if (hi <= 0x7f)
+ return;
+
+ lo = std::max(lo, (UChar)0x80);
+ addSortedRange(m_rangesUnicode, lo, hi);
+
+ if (!m_isCaseInsensitive)
+ return;
+
+ UCS2CanonicalizationRange* info = rangeInfoFor(lo);
+ while (true) {
+ // Handle the range [lo .. end]
+ UChar end = std::min<UChar>(info->end, hi);
+
+ switch (info->type) {
+ case CanonicalizeUnique:
+ // Nothing to do - no canonical equivalents.
+ break;
+ case CanonicalizeSet: {
+ UChar ch;
+ for (uint16_t* set = characterSetInfo[info->value]; (ch = *set); ++set)
+ addSorted(m_matchesUnicode, ch);
+ break;
+ }
+ case CanonicalizeRangeLo:
+ addSortedRange(m_rangesUnicode, lo + info->value, end + info->value);
+ break;
+ case CanonicalizeRangeHi:
+ addSortedRange(m_rangesUnicode, lo - info->value, end - info->value);
+ break;
+ case CanonicalizeAlternatingAligned:
+ // Use addSortedRange since there is likely an abutting range to combine with.
+ if (lo & 1)
+ addSortedRange(m_rangesUnicode, lo - 1, lo - 1);
+ if (!(end & 1))
+ addSortedRange(m_rangesUnicode, end + 1, end + 1);
+ break;
+ case CanonicalizeAlternatingUnaligned:
+ // Use addSortedRange since there is likely an abutting range to combine with.
+ if (!(lo & 1))
+ addSortedRange(m_rangesUnicode, lo - 1, lo - 1);
+ if (end & 1)
+ addSortedRange(m_rangesUnicode, end + 1, end + 1);
+ break;
+ }
+
+ if (hi == end)
+ return;
+
+ ++info;
+ lo = info->begin;
+ };
+
+ }
+
+ PassOwnPtr<CharacterClass> charClass()
+ {
+ OwnPtr<CharacterClass> characterClass = adoptPtr(new CharacterClass);
+
+ characterClass->m_matches.swap(m_matches);
+ characterClass->m_ranges.swap(m_ranges);
+ characterClass->m_matchesUnicode.swap(m_matchesUnicode);
+ characterClass->m_rangesUnicode.swap(m_rangesUnicode);
+
+ return characterClass.release();
+ }
+
+private:
+ void addSorted(Vector<UChar>& matches, UChar ch)
+ {
+ unsigned pos = 0;
+ unsigned range = matches.size();
+
+ // binary chop, find position to insert char.
+ while (range) {
+ unsigned index = range >> 1;
+
+ int val = matches[pos+index] - ch;
+ if (!val)
+ return;
+ else if (val > 0)
+ range = index;
+ else {
+ pos += (index+1);
+ range -= (index+1);
+ }
+ }
+
+ if (pos == matches.size())
+ matches.append(ch);
+ else
+ matches.insert(pos, ch);
+ }
+
+ void addSortedRange(Vector<CharacterRange>& ranges, UChar lo, UChar hi)
+ {
+ unsigned end = ranges.size();
+
+ // Simple linear scan - I doubt there are that many ranges anyway...
+ // feel free to fix this with something faster (eg binary chop).
+ for (unsigned i = 0; i < end; ++i) {
+ // does the new range fall before the current position in the array
+ if (hi < ranges[i].begin) {
+ // optional optimization: concatenate appending ranges? - may not be worthwhile.
+ if (hi == (ranges[i].begin - 1)) {
+ ranges[i].begin = lo;
+ return;
+ }
+ ranges.insert(i, CharacterRange(lo, hi));
+ return;
+ }
+ // Okay, since we didn't hit the last case, the end of the new range is definitely at or after the begining
+ // If the new range start at or before the end of the last range, then the overlap (if it starts one after the
+ // end of the last range they concatenate, which is just as good.
+ if (lo <= (ranges[i].end + 1)) {
+ // found an intersect! we'll replace this entry in the array.
+ ranges[i].begin = std::min(ranges[i].begin, lo);
+ ranges[i].end = std::max(ranges[i].end, hi);
+
+ // now check if the new range can subsume any subsequent ranges.
+ unsigned next = i+1;
+ // each iteration of the loop we will either remove something from the list, or break the loop.
+ while (next < ranges.size()) {
+ if (ranges[next].begin <= (ranges[i].end + 1)) {
+ // the next entry now overlaps / concatenates this one.
+ ranges[i].end = std::max(ranges[i].end, ranges[next].end);
+ ranges.remove(next);
+ } else
+ break;
+ }
+
+ return;
+ }
+ }
+
+ // CharacterRange comes after all existing ranges.
+ ranges.append(CharacterRange(lo, hi));
+ }
+
+ bool m_isCaseInsensitive;
+
+ Vector<UChar> m_matches;
+ Vector<CharacterRange> m_ranges;
+ Vector<UChar> m_matchesUnicode;
+ Vector<CharacterRange> m_rangesUnicode;
+};
+
+class YarrPatternConstructor {
+public:
+ YarrPatternConstructor(YarrPattern& pattern)
+ : m_pattern(pattern)
+ , m_characterClassConstructor(pattern.m_ignoreCase)
+ , m_invertParentheticalAssertion(false)
+ {
+ OwnPtr<PatternDisjunction> body = adoptPtr(new PatternDisjunction);
+ m_pattern.m_body = body.get();
+ m_alternative = body->addNewAlternative();
+ m_pattern.m_disjunctions.append(body.release());
+ }
+
+ ~YarrPatternConstructor()
+ {
+ }
+
+ void reset()
+ {
+ m_pattern.reset();
+ m_characterClassConstructor.reset();
+
+ OwnPtr<PatternDisjunction> body = adoptPtr(new PatternDisjunction);
+ m_pattern.m_body = body.get();
+ m_alternative = body->addNewAlternative();
+ m_pattern.m_disjunctions.append(body.release());
+ }
+
+ void assertionBOL()
+ {
+ if (!m_alternative->m_terms.size() & !m_invertParentheticalAssertion) {
+ m_alternative->m_startsWithBOL = true;
+ m_alternative->m_containsBOL = true;
+ m_pattern.m_containsBOL = true;
+ }
+ m_alternative->m_terms.append(PatternTerm::BOL());
+ }
+ void assertionEOL()
+ {
+ m_alternative->m_terms.append(PatternTerm::EOL());
+ }
+ void assertionWordBoundary(bool invert)
+ {
+ m_alternative->m_terms.append(PatternTerm::WordBoundary(invert));
+ }
+
+ void atomPatternCharacter(UChar ch)
+ {
+ // We handle case-insensitive checking of unicode characters which do have both
+ // cases by handling them as if they were defined using a CharacterClass.
+ if (!m_pattern.m_ignoreCase || isASCII(ch)) {
+ m_alternative->m_terms.append(PatternTerm(ch));
+ return;
+ }
+
+ UCS2CanonicalizationRange* info = rangeInfoFor(ch);
+ if (info->type == CanonicalizeUnique) {
+ m_alternative->m_terms.append(PatternTerm(ch));
+ return;
+ }
+
+ m_characterClassConstructor.putUnicodeIgnoreCase(ch, info);
+ OwnPtr<CharacterClass> newCharacterClass = m_characterClassConstructor.charClass();
+ m_alternative->m_terms.append(PatternTerm(newCharacterClass.get(), false));
+ m_pattern.m_userCharacterClasses.append(newCharacterClass.release());
+ }
+
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID classID, bool invert)
+ {
+ switch (classID) {
+ case DigitClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.digitsCharacterClass(), invert));
+ break;
+ case SpaceClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.spacesCharacterClass(), invert));
+ break;
+ case WordClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.wordcharCharacterClass(), invert));
+ break;
+ case NewlineClassID:
+ m_alternative->m_terms.append(PatternTerm(m_pattern.newlineCharacterClass(), invert));
+ break;
+ }
+ }
+
+ void atomCharacterClassBegin(bool invert = false)
+ {
+ m_invertCharacterClass = invert;
+ }
+
+ void atomCharacterClassAtom(UChar ch)
+ {
+ m_characterClassConstructor.putChar(ch);
+ }
+
+ void atomCharacterClassRange(UChar begin, UChar end)
+ {
+ m_characterClassConstructor.putRange(begin, end);
+ }
+
+ void atomCharacterClassBuiltIn(BuiltInCharacterClassID classID, bool invert)
+ {
+ ASSERT(classID != NewlineClassID);
+
+ switch (classID) {
+ case DigitClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nondigitsCharacterClass() : m_pattern.digitsCharacterClass());
+ break;
+
+ case SpaceClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nonspacesCharacterClass() : m_pattern.spacesCharacterClass());
+ break;
+
+ case WordClassID:
+ m_characterClassConstructor.append(invert ? m_pattern.nonwordcharCharacterClass() : m_pattern.wordcharCharacterClass());
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ void atomCharacterClassEnd()
+ {
+ OwnPtr<CharacterClass> newCharacterClass = m_characterClassConstructor.charClass();
+ m_alternative->m_terms.append(PatternTerm(newCharacterClass.get(), m_invertCharacterClass));
+ m_pattern.m_userCharacterClasses.append(newCharacterClass.release());
+ }
+
+ void atomParenthesesSubpatternBegin(bool capture = true)
+ {
+ unsigned subpatternId = m_pattern.m_numSubpatterns + 1;
+ if (capture)
+ m_pattern.m_numSubpatterns++;
+
+ OwnPtr<PatternDisjunction> parenthesesDisjunction = adoptPtr(new PatternDisjunction(m_alternative));
+ m_alternative->m_terms.append(PatternTerm(PatternTerm::TypeParenthesesSubpattern, subpatternId, parenthesesDisjunction.get(), capture, false));
+ m_alternative = parenthesesDisjunction->addNewAlternative();
+ m_pattern.m_disjunctions.append(parenthesesDisjunction.release());
+ }
+
+ void atomParentheticalAssertionBegin(bool invert = false)
+ {
+ OwnPtr<PatternDisjunction> parenthesesDisjunction = adoptPtr(new PatternDisjunction(m_alternative));
+ m_alternative->m_terms.append(PatternTerm(PatternTerm::TypeParentheticalAssertion, m_pattern.m_numSubpatterns + 1, parenthesesDisjunction.get(), false, invert));
+ m_alternative = parenthesesDisjunction->addNewAlternative();
+ m_invertParentheticalAssertion = invert;
+ m_pattern.m_disjunctions.append(parenthesesDisjunction.release());
+ }
+
+ void atomParenthesesEnd()
+ {
+ ASSERT(m_alternative->m_parent);
+ ASSERT(m_alternative->m_parent->m_parent);
+
+ PatternDisjunction* parenthesesDisjunction = m_alternative->m_parent;
+ m_alternative = m_alternative->m_parent->m_parent;
+
+ PatternTerm& lastTerm = m_alternative->lastTerm();
+
+ unsigned numParenAlternatives = parenthesesDisjunction->m_alternatives.size();
+ unsigned numBOLAnchoredAlts = 0;
+
+ for (unsigned i = 0; i < numParenAlternatives; i++) {
+ // Bubble up BOL flags
+ if (parenthesesDisjunction->m_alternatives[i]->m_startsWithBOL)
+ numBOLAnchoredAlts++;
+ }
+
+ if (numBOLAnchoredAlts) {
+ m_alternative->m_containsBOL = true;
+ // If all the alternatives in parens start with BOL, then so does this one
+ if (numBOLAnchoredAlts == numParenAlternatives)
+ m_alternative->m_startsWithBOL = true;
+ }
+
+ lastTerm.parentheses.lastSubpatternId = m_pattern.m_numSubpatterns;
+ m_invertParentheticalAssertion = false;
+ }
+
+ void atomBackReference(unsigned subpatternId)
+ {
+ ASSERT(subpatternId);
+ m_pattern.m_containsBackreferences = true;
+ m_pattern.m_maxBackReference = std::max(m_pattern.m_maxBackReference, subpatternId);
+
+ if (subpatternId > m_pattern.m_numSubpatterns) {
+ m_alternative->m_terms.append(PatternTerm::ForwardReference());
+ return;
+ }
+
+ PatternAlternative* currentAlternative = m_alternative;
+ ASSERT(currentAlternative);
+
+ // Note to self: if we waited until the AST was baked, we could also remove forwards refs
+ while ((currentAlternative = currentAlternative->m_parent->m_parent)) {
+ PatternTerm& term = currentAlternative->lastTerm();
+ ASSERT((term.type == PatternTerm::TypeParenthesesSubpattern) || (term.type == PatternTerm::TypeParentheticalAssertion));
+
+ if ((term.type == PatternTerm::TypeParenthesesSubpattern) && term.capture() && (subpatternId == term.parentheses.subpatternId)) {
+ m_alternative->m_terms.append(PatternTerm::ForwardReference());
+ return;
+ }
+ }
+
+ m_alternative->m_terms.append(PatternTerm(subpatternId));
+ }
+
+ // deep copy the argument disjunction. If filterStartsWithBOL is true,
+ // skip alternatives with m_startsWithBOL set true.
+ PatternDisjunction* copyDisjunction(PatternDisjunction* disjunction, bool filterStartsWithBOL = false)
+ {
+ OwnPtr<PatternDisjunction> newDisjunction;
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+ if (!filterStartsWithBOL || !alternative->m_startsWithBOL) {
+ if (!newDisjunction) {
+ newDisjunction = adoptPtr(new PatternDisjunction());
+ newDisjunction->m_parent = disjunction->m_parent;
+ }
+ PatternAlternative* newAlternative = newDisjunction->addNewAlternative();
+ newAlternative->m_terms.reserveInitialCapacity(alternative->m_terms.size());
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i)
+ newAlternative->m_terms.append(copyTerm(alternative->m_terms[i], filterStartsWithBOL));
+ }
+ }
+
+ if (!newDisjunction)
+ return 0;
+
+ PatternDisjunction* copiedDisjunction = newDisjunction.get();
+ m_pattern.m_disjunctions.append(newDisjunction.release());
+ return copiedDisjunction;
+ }
+
+ PatternTerm copyTerm(PatternTerm& term, bool filterStartsWithBOL = false)
+ {
+ if ((term.type != PatternTerm::TypeParenthesesSubpattern) && (term.type != PatternTerm::TypeParentheticalAssertion))
+ return PatternTerm(term);
+
+ PatternTerm termCopy = term;
+ termCopy.parentheses.disjunction = copyDisjunction(termCopy.parentheses.disjunction, filterStartsWithBOL);
+ return termCopy;
+ }
+
+ void quantifyAtom(unsigned min, unsigned max, bool greedy)
+ {
+ ASSERT(min <= max);
+ ASSERT(m_alternative->m_terms.size());
+
+ if (!max) {
+ m_alternative->removeLastTerm();
+ return;
+ }
+
+ PatternTerm& term = m_alternative->lastTerm();
+ ASSERT(term.type > PatternTerm::TypeAssertionWordBoundary);
+ ASSERT((term.quantityCount == 1) && (term.quantityType == QuantifierFixedCount));
+
+ if (term.type == PatternTerm::TypeParentheticalAssertion) {
+ // If an assertion is quantified with a minimum count of zero, it can simply be removed.
+ // This arises from the RepeatMatcher behaviour in the spec. Matching an assertion never
+ // results in any input being consumed, however the continuation passed to the assertion
+ // (called in steps, 8c and 9 of the RepeatMatcher definition, ES5.1 15.10.2.5) will
+ // reject all zero length matches (see step 2.1). A match from the continuation of the
+ // expression will still be accepted regardless (via steps 8a and 11) - the upshot of all
+ // this is that matches from the assertion are not required, and won't be accepted anyway,
+ // so no need to ever run it.
+ if (!min)
+ m_alternative->removeLastTerm();
+ // We never need to run an assertion more than once. Subsequent interations will be run
+ // with the same start index (since assertions are non-capturing) and the same captures
+ // (per step 4 of RepeatMatcher in ES5.1 15.10.2.5), and as such will always produce the
+ // same result and captures. If the first match succeeds then the subsequent (min - 1)
+ // matches will too. Any additional optional matches will fail (on the same basis as the
+ // minimum zero quantified assertions, above), but this will still result in a match.
+ return;
+ }
+
+ if (min == 0)
+ term.quantify(max, greedy ? QuantifierGreedy : QuantifierNonGreedy);
+ else if (min == max)
+ term.quantify(min, QuantifierFixedCount);
+ else {
+ term.quantify(min, QuantifierFixedCount);
+ m_alternative->m_terms.append(copyTerm(term));
+ // NOTE: this term is interesting from an analysis perspective, in that it can be ignored.....
+ m_alternative->lastTerm().quantify((max == quantifyInfinite) ? max : max - min, greedy ? QuantifierGreedy : QuantifierNonGreedy);
+ if (m_alternative->lastTerm().type == PatternTerm::TypeParenthesesSubpattern)
+ m_alternative->lastTerm().parentheses.isCopy = true;
+ }
+ }
+
+ void disjunction()
+ {
+ m_alternative = m_alternative->m_parent->addNewAlternative();
+ }
+
+ unsigned setupAlternativeOffsets(PatternAlternative* alternative, unsigned currentCallFrameSize, unsigned initialInputPosition)
+ {
+ alternative->m_hasFixedSize = true;
+ Checked<unsigned> currentInputPosition = initialInputPosition;
+
+ for (unsigned i = 0; i < alternative->m_terms.size(); ++i) {
+ PatternTerm& term = alternative->m_terms[i];
+
+ switch (term.type) {
+ case PatternTerm::TypeAssertionBOL:
+ case PatternTerm::TypeAssertionEOL:
+ case PatternTerm::TypeAssertionWordBoundary:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ break;
+
+ case PatternTerm::TypeBackReference:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoBackReference;
+ alternative->m_hasFixedSize = false;
+ break;
+
+ case PatternTerm::TypeForwardReference:
+ break;
+
+ case PatternTerm::TypePatternCharacter:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ if (term.quantityType != QuantifierFixedCount) {
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoPatternCharacter;
+ alternative->m_hasFixedSize = false;
+ } else
+ currentInputPosition += term.quantityCount;
+ break;
+
+ case PatternTerm::TypeCharacterClass:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ if (term.quantityType != QuantifierFixedCount) {
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoCharacterClass;
+ alternative->m_hasFixedSize = false;
+ } else
+ currentInputPosition += term.quantityCount;
+ break;
+
+ case PatternTerm::TypeParenthesesSubpattern:
+ // Note: for fixed once parentheses we will ensure at least the minimum is available; others are on their own.
+ term.frameLocation = currentCallFrameSize;
+ if (term.quantityCount == 1 && !term.parentheses.isCopy) {
+ if (term.quantityType != QuantifierFixedCount)
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParenthesesOnce;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition.unsafeGet());
+ // If quantity is fixed, then pre-check its minimum size.
+ if (term.quantityType == QuantifierFixedCount)
+ currentInputPosition += term.parentheses.disjunction->m_minimumSize;
+ term.inputPosition = currentInputPosition.unsafeGet();
+ } else if (term.parentheses.isTerminal) {
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParenthesesTerminal;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition.unsafeGet());
+ term.inputPosition = currentInputPosition.unsafeGet();
+ } else {
+ term.inputPosition = currentInputPosition.unsafeGet();
+ setupDisjunctionOffsets(term.parentheses.disjunction, 0, currentInputPosition.unsafeGet());
+ currentCallFrameSize += YarrStackSpaceForBackTrackInfoParentheses;
+ }
+ // Fixed count of 1 could be accepted, if they have a fixed size *AND* if all alternatives are of the same length.
+ alternative->m_hasFixedSize = false;
+ break;
+
+ case PatternTerm::TypeParentheticalAssertion:
+ term.inputPosition = currentInputPosition.unsafeGet();
+ term.frameLocation = currentCallFrameSize;
+ currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize + YarrStackSpaceForBackTrackInfoParentheticalAssertion, currentInputPosition.unsafeGet());
+ break;
+
+ case PatternTerm::TypeDotStarEnclosure:
+ alternative->m_hasFixedSize = false;
+ term.inputPosition = initialInputPosition;
+ break;
+ }
+ }
+
+ alternative->m_minimumSize = (currentInputPosition - initialInputPosition).unsafeGet();
+ return currentCallFrameSize;
+ }
+
+ unsigned setupDisjunctionOffsets(PatternDisjunction* disjunction, unsigned initialCallFrameSize, unsigned initialInputPosition)
+ {
+ if ((disjunction != m_pattern.m_body) && (disjunction->m_alternatives.size() > 1))
+ initialCallFrameSize += YarrStackSpaceForBackTrackInfoAlternative;
+
+ unsigned minimumInputSize = UINT_MAX;
+ unsigned maximumCallFrameSize = 0;
+ bool hasFixedSize = true;
+
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt) {
+ PatternAlternative* alternative = disjunction->m_alternatives[alt].get();
+ unsigned currentAlternativeCallFrameSize = setupAlternativeOffsets(alternative, initialCallFrameSize, initialInputPosition);
+ minimumInputSize = std::min(minimumInputSize, alternative->m_minimumSize);
+ maximumCallFrameSize = std::max(maximumCallFrameSize, currentAlternativeCallFrameSize);
+ hasFixedSize &= alternative->m_hasFixedSize;
+ }
+
+ ASSERT(minimumInputSize != UINT_MAX);
+ ASSERT(maximumCallFrameSize >= initialCallFrameSize);
+
+ disjunction->m_hasFixedSize = hasFixedSize;
+ disjunction->m_minimumSize = minimumInputSize;
+ disjunction->m_callFrameSize = maximumCallFrameSize;
+ return maximumCallFrameSize;
+ }
+
+ void setupOffsets()
+ {
+ setupDisjunctionOffsets(m_pattern.m_body, 0, 0);
+ }
+
+ // This optimization identifies sets of parentheses that we will never need to backtrack.
+ // In these cases we do not need to store state from prior iterations.
+ // We can presently avoid backtracking for:
+ // * where the parens are at the end of the regular expression (last term in any of the
+ // alternatives of the main body disjunction).
+ // * where the parens are non-capturing, and quantified unbounded greedy (*).
+ // * where the parens do not contain any capturing subpatterns.
+ void checkForTerminalParentheses()
+ {
+ // This check is much too crude; should be just checking whether the candidate
+ // node contains nested capturing subpatterns, not the whole expression!
+ if (m_pattern.m_numSubpatterns)
+ return;
+
+ Vector<OwnPtr<PatternAlternative> >& alternatives = m_pattern.m_body->m_alternatives;
+ for (size_t i = 0; i < alternatives.size(); ++i) {
+ Vector<PatternTerm>& terms = alternatives[i]->m_terms;
+ if (terms.size()) {
+ PatternTerm& term = terms.last();
+ if (term.type == PatternTerm::TypeParenthesesSubpattern
+ && term.quantityType == QuantifierGreedy
+ && term.quantityCount == quantifyInfinite
+ && !term.capture())
+ term.parentheses.isTerminal = true;
+ }
+ }
+ }
+
+ void optimizeBOL()
+ {
+ // Look for expressions containing beginning of line (^) anchoring and unroll them.
+ // e.g. /^a|^b|c/ becomes /^a|^b|c/ which is executed once followed by /c/ which loops
+ // This code relies on the parsing code tagging alternatives with m_containsBOL and
+ // m_startsWithBOL and rolling those up to containing alternatives.
+ // At this point, this is only valid for non-multiline expressions.
+ PatternDisjunction* disjunction = m_pattern.m_body;
+
+ if (!m_pattern.m_containsBOL || m_pattern.m_multiline)
+ return;
+
+ PatternDisjunction* loopDisjunction = copyDisjunction(disjunction, true);
+
+ // Set alternatives in disjunction to "onceThrough"
+ for (unsigned alt = 0; alt < disjunction->m_alternatives.size(); ++alt)
+ disjunction->m_alternatives[alt]->setOnceThrough();
+
+ if (loopDisjunction) {
+ // Move alternatives from loopDisjunction to disjunction
+ for (unsigned alt = 0; alt < loopDisjunction->m_alternatives.size(); ++alt)
+ disjunction->m_alternatives.append(loopDisjunction->m_alternatives[alt].release());
+
+ loopDisjunction->m_alternatives.clear();
+ }
+ }
+
+ bool containsCapturingTerms(PatternAlternative* alternative, size_t firstTermIndex, size_t lastTermIndex)
+ {
+ Vector<PatternTerm>& terms = alternative->m_terms;
+
+ for (size_t termIndex = firstTermIndex; termIndex <= lastTermIndex; ++termIndex) {
+ PatternTerm& term = terms[termIndex];
+
+ if (term.m_capture)
+ return true;
+
+ if (term.type == PatternTerm::TypeParenthesesSubpattern) {
+ PatternDisjunction* nestedDisjunction = term.parentheses.disjunction;
+ for (unsigned alt = 0; alt < nestedDisjunction->m_alternatives.size(); ++alt) {
+ if (containsCapturingTerms(nestedDisjunction->m_alternatives[alt].get(), 0, nestedDisjunction->m_alternatives[alt]->m_terms.size() - 1))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ // This optimization identifies alternatives in the form of
+ // [^].*[?]<expression>.*[$] for expressions that don't have any
+ // capturing terms. The alternative is changed to <expression>
+ // followed by processing of the dot stars to find and adjust the
+ // beginning and the end of the match.
+ void optimizeDotStarWrappedExpressions()
+ {
+ Vector<OwnPtr<PatternAlternative> >& alternatives = m_pattern.m_body->m_alternatives;
+ if (alternatives.size() != 1)
+ return;
+
+ PatternAlternative* alternative = alternatives[0].get();
+ Vector<PatternTerm>& terms = alternative->m_terms;
+ if (terms.size() >= 3) {
+ bool startsWithBOL = false;
+ bool endsWithEOL = false;
+ size_t termIndex, firstExpressionTerm, lastExpressionTerm;
+
+ termIndex = 0;
+ if (terms[termIndex].type == PatternTerm::TypeAssertionBOL) {
+ startsWithBOL = true;
+ ++termIndex;
+ }
+
+ PatternTerm& firstNonAnchorTerm = terms[termIndex];
+ if ((firstNonAnchorTerm.type != PatternTerm::TypeCharacterClass) || (firstNonAnchorTerm.characterClass != m_pattern.newlineCharacterClass()) || !((firstNonAnchorTerm.quantityType == QuantifierGreedy) || (firstNonAnchorTerm.quantityType == QuantifierNonGreedy)))
+ return;
+
+ firstExpressionTerm = termIndex + 1;
+
+ termIndex = terms.size() - 1;
+ if (terms[termIndex].type == PatternTerm::TypeAssertionEOL) {
+ endsWithEOL = true;
+ --termIndex;
+ }
+
+ PatternTerm& lastNonAnchorTerm = terms[termIndex];
+ if ((lastNonAnchorTerm.type != PatternTerm::TypeCharacterClass) || (lastNonAnchorTerm.characterClass != m_pattern.newlineCharacterClass()) || (lastNonAnchorTerm.quantityType != QuantifierGreedy))
+ return;
+
+ lastExpressionTerm = termIndex - 1;
+
+ if (firstExpressionTerm > lastExpressionTerm)
+ return;
+
+ if (!containsCapturingTerms(alternative, firstExpressionTerm, lastExpressionTerm)) {
+ for (termIndex = terms.size() - 1; termIndex > lastExpressionTerm; --termIndex)
+ terms.remove(termIndex);
+
+ for (termIndex = firstExpressionTerm; termIndex > 0; --termIndex)
+ terms.remove(termIndex - 1);
+
+ terms.append(PatternTerm(startsWithBOL, endsWithEOL));
+
+ m_pattern.m_containsBOL = false;
+ }
+ }
+ }
+
+private:
+ YarrPattern& m_pattern;
+ PatternAlternative* m_alternative;
+ CharacterClassConstructor m_characterClassConstructor;
+ bool m_invertCharacterClass;
+ bool m_invertParentheticalAssertion;
+};
+
+const char* YarrPattern::compile(const String& patternString)
+{
+ YarrPatternConstructor constructor(*this);
+
+ if (const char* error = parse(constructor, patternString))
+ return error;
+
+ // If the pattern contains illegal backreferences reset & reparse.
+ // Quoting Netscape's "What's new in JavaScript 1.2",
+ // "Note: if the number of left parentheses is less than the number specified
+ // in \#, the \# is taken as an octal escape as described in the next row."
+ if (containsIllegalBackReference()) {
+ unsigned numSubpatterns = m_numSubpatterns;
+
+ constructor.reset();
+#if !ASSERT_DISABLED
+ const char* error =
+#endif
+ parse(constructor, patternString, numSubpatterns);
+
+ ASSERT(!error);
+ ASSERT(numSubpatterns == m_numSubpatterns);
+ }
+
+ constructor.checkForTerminalParentheses();
+ constructor.optimizeDotStarWrappedExpressions();
+ constructor.optimizeBOL();
+
+ constructor.setupOffsets();
+
+ return 0;
+}
+
+YarrPattern::YarrPattern(const String& pattern, bool ignoreCase, bool multiline, const char** error)
+ : m_ignoreCase(ignoreCase)
+ , m_multiline(multiline)
+ , m_containsBackreferences(false)
+ , m_containsBOL(false)
+ , m_numSubpatterns(0)
+ , m_maxBackReference(0)
+ , newlineCached(0)
+ , digitsCached(0)
+ , spacesCached(0)
+ , wordcharCached(0)
+ , nondigitsCached(0)
+ , nonspacesCached(0)
+ , nonwordcharCached(0)
+{
+ *error = compile(pattern);
+}
+
+} }
diff --git a/src/3rdparty/masm/yarr/YarrPattern.h b/src/3rdparty/masm/yarr/YarrPattern.h
new file mode 100644
index 0000000000..e7d187c2b3
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrPattern.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrPattern_h
+#define YarrPattern_h
+
+#include <wtf/CheckedArithmetic.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Vector.h>
+#include <wtf/text/WTFString.h>
+#include <wtf/unicode/Unicode.h>
+
+namespace JSC { namespace Yarr {
+
+struct PatternDisjunction;
+
+struct CharacterRange {
+ UChar begin;
+ UChar end;
+
+ CharacterRange(UChar begin, UChar end)
+ : begin(begin)
+ , end(end)
+ {
+ }
+};
+
+struct CharacterClass {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ // All CharacterClass instances have to have the full set of matches and ranges,
+ // they may have an optional m_table for faster lookups (which must match the
+ // specified matches and ranges)
+ CharacterClass()
+ : m_table(0)
+ {
+ }
+ CharacterClass(const char* table, bool inverted)
+ : m_table(table)
+ , m_tableInverted(inverted)
+ {
+ }
+ Vector<UChar> m_matches;
+ Vector<CharacterRange> m_ranges;
+ Vector<UChar> m_matchesUnicode;
+ Vector<CharacterRange> m_rangesUnicode;
+
+ const char* m_table;
+ bool m_tableInverted;
+};
+
+enum QuantifierType {
+ QuantifierFixedCount,
+ QuantifierGreedy,
+ QuantifierNonGreedy,
+};
+
+struct PatternTerm {
+ enum Type {
+ TypeAssertionBOL,
+ TypeAssertionEOL,
+ TypeAssertionWordBoundary,
+ TypePatternCharacter,
+ TypeCharacterClass,
+ TypeBackReference,
+ TypeForwardReference,
+ TypeParenthesesSubpattern,
+ TypeParentheticalAssertion,
+ TypeDotStarEnclosure,
+ } type;
+ bool m_capture :1;
+ bool m_invert :1;
+ union {
+ UChar patternCharacter;
+ CharacterClass* characterClass;
+ unsigned backReferenceSubpatternId;
+ struct {
+ PatternDisjunction* disjunction;
+ unsigned subpatternId;
+ unsigned lastSubpatternId;
+ bool isCopy;
+ bool isTerminal;
+ } parentheses;
+ struct {
+ bool bolAnchor : 1;
+ bool eolAnchor : 1;
+ } anchors;
+ };
+ QuantifierType quantityType;
+ Checked<unsigned> quantityCount;
+ int inputPosition;
+ unsigned frameLocation;
+
+ PatternTerm(UChar ch)
+ : type(PatternTerm::TypePatternCharacter)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ patternCharacter = ch;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(CharacterClass* charClass, bool invert)
+ : type(PatternTerm::TypeCharacterClass)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ characterClass = charClass;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(Type type, unsigned subpatternId, PatternDisjunction* disjunction, bool capture = false, bool invert = false)
+ : type(type)
+ , m_capture(capture)
+ , m_invert(invert)
+ {
+ parentheses.disjunction = disjunction;
+ parentheses.subpatternId = subpatternId;
+ parentheses.isCopy = false;
+ parentheses.isTerminal = false;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(Type type, bool invert = false)
+ : type(type)
+ , m_capture(false)
+ , m_invert(invert)
+ {
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(unsigned spatternId)
+ : type(TypeBackReference)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ backReferenceSubpatternId = spatternId;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ PatternTerm(bool bolAnchor, bool eolAnchor)
+ : type(TypeDotStarEnclosure)
+ , m_capture(false)
+ , m_invert(false)
+ {
+ anchors.bolAnchor = bolAnchor;
+ anchors.eolAnchor = eolAnchor;
+ quantityType = QuantifierFixedCount;
+ quantityCount = 1;
+ }
+
+ static PatternTerm ForwardReference()
+ {
+ return PatternTerm(TypeForwardReference);
+ }
+
+ static PatternTerm BOL()
+ {
+ return PatternTerm(TypeAssertionBOL);
+ }
+
+ static PatternTerm EOL()
+ {
+ return PatternTerm(TypeAssertionEOL);
+ }
+
+ static PatternTerm WordBoundary(bool invert)
+ {
+ return PatternTerm(TypeAssertionWordBoundary, invert);
+ }
+
+ bool invert()
+ {
+ return m_invert;
+ }
+
+ bool capture()
+ {
+ return m_capture;
+ }
+
+ void quantify(unsigned count, QuantifierType type)
+ {
+ quantityCount = count;
+ quantityType = type;
+ }
+};
+
+struct PatternAlternative {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PatternAlternative(PatternDisjunction* disjunction)
+ : m_parent(disjunction)
+ , m_onceThrough(false)
+ , m_hasFixedSize(false)
+ , m_startsWithBOL(false)
+ , m_containsBOL(false)
+ {
+ }
+
+ PatternTerm& lastTerm()
+ {
+ ASSERT(m_terms.size());
+ return m_terms[m_terms.size() - 1];
+ }
+
+ void removeLastTerm()
+ {
+ ASSERT(m_terms.size());
+ m_terms.shrink(m_terms.size() - 1);
+ }
+
+ void setOnceThrough()
+ {
+ m_onceThrough = true;
+ }
+
+ bool onceThrough()
+ {
+ return m_onceThrough;
+ }
+
+ Vector<PatternTerm> m_terms;
+ PatternDisjunction* m_parent;
+ unsigned m_minimumSize;
+ bool m_onceThrough : 1;
+ bool m_hasFixedSize : 1;
+ bool m_startsWithBOL : 1;
+ bool m_containsBOL : 1;
+};
+
+struct PatternDisjunction {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ PatternDisjunction(PatternAlternative* parent = 0)
+ : m_parent(parent)
+ , m_hasFixedSize(false)
+ {
+ }
+
+ PatternAlternative* addNewAlternative()
+ {
+ PatternAlternative* alternative = new PatternAlternative(this);
+ m_alternatives.append(adoptPtr(alternative));
+ return alternative;
+ }
+
+ Vector<OwnPtr<PatternAlternative> > m_alternatives;
+ PatternAlternative* m_parent;
+ unsigned m_minimumSize;
+ unsigned m_callFrameSize;
+ bool m_hasFixedSize;
+};
+
+// You probably don't want to be calling these functions directly
+// (please to be calling newlineCharacterClass() et al on your
+// friendly neighborhood YarrPattern instance to get nicely
+// cached copies).
+CharacterClass* newlineCreate();
+CharacterClass* digitsCreate();
+CharacterClass* spacesCreate();
+CharacterClass* wordcharCreate();
+CharacterClass* nondigitsCreate();
+CharacterClass* nonspacesCreate();
+CharacterClass* nonwordcharCreate();
+
+struct TermChain {
+ TermChain(PatternTerm term)
+ : term(term)
+ {}
+
+ PatternTerm term;
+ Vector<TermChain> hotTerms;
+};
+
+struct YarrPattern {
+ JS_EXPORT_PRIVATE YarrPattern(const String& pattern, bool ignoreCase, bool multiline, const char** error);
+
+ void reset()
+ {
+ m_numSubpatterns = 0;
+ m_maxBackReference = 0;
+
+ m_containsBackreferences = false;
+ m_containsBOL = false;
+
+ newlineCached = 0;
+ digitsCached = 0;
+ spacesCached = 0;
+ wordcharCached = 0;
+ nondigitsCached = 0;
+ nonspacesCached = 0;
+ nonwordcharCached = 0;
+
+ m_disjunctions.clear();
+ m_userCharacterClasses.clear();
+ }
+
+ bool containsIllegalBackReference()
+ {
+ return m_maxBackReference > m_numSubpatterns;
+ }
+
+ CharacterClass* newlineCharacterClass()
+ {
+ if (!newlineCached)
+ m_userCharacterClasses.append(adoptPtr(newlineCached = newlineCreate()));
+ return newlineCached;
+ }
+ CharacterClass* digitsCharacterClass()
+ {
+ if (!digitsCached)
+ m_userCharacterClasses.append(adoptPtr(digitsCached = digitsCreate()));
+ return digitsCached;
+ }
+ CharacterClass* spacesCharacterClass()
+ {
+ if (!spacesCached)
+ m_userCharacterClasses.append(adoptPtr(spacesCached = spacesCreate()));
+ return spacesCached;
+ }
+ CharacterClass* wordcharCharacterClass()
+ {
+ if (!wordcharCached)
+ m_userCharacterClasses.append(adoptPtr(wordcharCached = wordcharCreate()));
+ return wordcharCached;
+ }
+ CharacterClass* nondigitsCharacterClass()
+ {
+ if (!nondigitsCached)
+ m_userCharacterClasses.append(adoptPtr(nondigitsCached = nondigitsCreate()));
+ return nondigitsCached;
+ }
+ CharacterClass* nonspacesCharacterClass()
+ {
+ if (!nonspacesCached)
+ m_userCharacterClasses.append(adoptPtr(nonspacesCached = nonspacesCreate()));
+ return nonspacesCached;
+ }
+ CharacterClass* nonwordcharCharacterClass()
+ {
+ if (!nonwordcharCached)
+ m_userCharacterClasses.append(adoptPtr(nonwordcharCached = nonwordcharCreate()));
+ return nonwordcharCached;
+ }
+
+ bool m_ignoreCase : 1;
+ bool m_multiline : 1;
+ bool m_containsBackreferences : 1;
+ bool m_containsBOL : 1;
+ unsigned m_numSubpatterns;
+ unsigned m_maxBackReference;
+ PatternDisjunction* m_body;
+ Vector<OwnPtr<PatternDisjunction>, 4> m_disjunctions;
+ Vector<OwnPtr<CharacterClass> > m_userCharacterClasses;
+
+private:
+ const char* compile(const String& patternString);
+
+ CharacterClass* newlineCached;
+ CharacterClass* digitsCached;
+ CharacterClass* spacesCached;
+ CharacterClass* wordcharCached;
+ CharacterClass* nondigitsCached;
+ CharacterClass* nonspacesCached;
+ CharacterClass* nonwordcharCached;
+};
+
+} } // namespace JSC::Yarr
+
+#endif // YarrPattern_h
diff --git a/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp b/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp
new file mode 100644
index 0000000000..aa98c4a354
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "YarrSyntaxChecker.h"
+
+#include "YarrParser.h"
+
+namespace JSC { namespace Yarr {
+
+class SyntaxChecker {
+public:
+ void assertionBOL() {}
+ void assertionEOL() {}
+ void assertionWordBoundary(bool) {}
+ void atomPatternCharacter(UChar) {}
+ void atomBuiltInCharacterClass(BuiltInCharacterClassID, bool) {}
+ void atomCharacterClassBegin(bool = false) {}
+ void atomCharacterClassAtom(UChar) {}
+ void atomCharacterClassRange(UChar, UChar) {}
+ void atomCharacterClassBuiltIn(BuiltInCharacterClassID, bool) {}
+ void atomCharacterClassEnd() {}
+ void atomParenthesesSubpatternBegin(bool = true) {}
+ void atomParentheticalAssertionBegin(bool = false) {}
+ void atomParenthesesEnd() {}
+ void atomBackReference(unsigned) {}
+ void quantifyAtom(unsigned, unsigned, bool) {}
+ void disjunction() {}
+};
+
+const char* checkSyntax(const String& pattern)
+{
+ SyntaxChecker syntaxChecker;
+ return parse(syntaxChecker, pattern);
+}
+
+}} // JSC::YARR
diff --git a/src/3rdparty/masm/yarr/YarrSyntaxChecker.h b/src/3rdparty/masm/yarr/YarrSyntaxChecker.h
new file mode 100644
index 0000000000..104ced3ab4
--- /dev/null
+++ b/src/3rdparty/masm/yarr/YarrSyntaxChecker.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YarrSyntaxChecker_h
+#define YarrSyntaxChecker_h
+
+#include <wtf/text/WTFString.h>
+
+namespace JSC { namespace Yarr {
+
+const char* checkSyntax(const String& pattern);
+
+}} // JSC::YARR
+
+#endif // YarrSyntaxChecker_h
+
diff --git a/src/3rdparty/masm/yarr/yarr.pri b/src/3rdparty/masm/yarr/yarr.pri
new file mode 100644
index 0000000000..7e9b4d3f3b
--- /dev/null
+++ b/src/3rdparty/masm/yarr/yarr.pri
@@ -0,0 +1,12 @@
+# -------------------------------------------------------------------
+# Project file for YARR
+#
+# See 'Tools/qmake/README' for an overview of the build system
+# -------------------------------------------------------------------
+
+SOURCES += \
+ $$PWD/YarrInterpreter.cpp \
+ $$PWD/YarrPattern.cpp \
+ $$PWD/YarrSyntaxChecker.cpp \
+ $$PWD/YarrCanonicalizeUCS2.cpp
+
diff --git a/src/qml/qml/v4vm/debugging.cpp b/src/qml/qml/v4vm/debugging.cpp
new file mode 100644
index 0000000000..6137e64f89
--- /dev/null
+++ b/src/qml/qml/v4vm/debugging.cpp
@@ -0,0 +1,308 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "debugging.h"
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <iostream>
+
+#define LOW_LEVEL_DEBUGGING_HELPERS
+
+using namespace QQmlJS;
+using namespace QQmlJS::Debugging;
+
+FunctionState::FunctionState(VM::ExecutionContext *context)
+ : _context(context)
+{
+ if (debugger())
+ debugger()->enterFunction(this);
+}
+
+FunctionState::~FunctionState()
+{
+ if (debugger())
+ debugger()->leaveFunction(this);
+}
+
+VM::Value *FunctionState::argument(unsigned idx)
+{
+ VM::CallContext *c = _context->asCallContext();
+ if (!c || idx >= c->argumentCount)
+ return 0;
+ return c->arguments + idx;
+}
+
+VM::Value *FunctionState::local(unsigned idx)
+{
+ VM::CallContext *c = _context->asCallContext();
+ if (c && idx < c->variableCount())
+ return c->locals + idx;
+ return 0;
+}
+
+#ifdef LOW_LEVEL_DEBUGGING_HELPERS
+Debugger *globalInstance = 0;
+
+void printStackTrace()
+{
+ if (globalInstance)
+ globalInstance->printStackTrace();
+ else
+ std::cerr << "No debugger." << std::endl;
+}
+#endif // DO_TRACE_INSTR
+
+Debugger::Debugger(VM::ExecutionEngine *engine)
+ : _engine(engine)
+{
+#ifdef LOW_LEVEL_DEBUGGING_HELPERS
+ globalInstance = this;
+#endif // DO_TRACE_INSTR
+}
+
+Debugger::~Debugger()
+{
+#ifdef LOW_LEVEL_DEBUGGING_HELPERS
+ globalInstance = 0;
+#endif // DO_TRACE_INSTR
+
+ qDeleteAll(_functionInfo.values());
+}
+
+void Debugger::addFunction(V4IR::Function *function)
+{
+ _functionInfo.insert(function, new FunctionDebugInfo(function));
+}
+
+void Debugger::setSourceLocation(V4IR::Function *function, unsigned line, unsigned column)
+{
+ _functionInfo[function]->setSourceLocation(line, column);
+}
+
+void Debugger::mapFunction(VM::Function *vmf, V4IR::Function *irf)
+{
+ _vmToIr.insert(vmf, irf);
+}
+
+FunctionDebugInfo *Debugger::debugInfo(VM::FunctionObject *function) const
+{
+ if (!function)
+ return 0;
+
+ if (function->function)
+ return _functionInfo[irFunction(function->function)];
+ else
+ return 0;
+}
+
+QString Debugger::name(VM::FunctionObject *function) const
+{
+ if (FunctionDebugInfo *i = debugInfo(function))
+ return i->name;
+
+ return QString();
+}
+
+void Debugger::aboutToCall(VM::FunctionObject *function, VM::ExecutionContext *context)
+{
+ _callStack.append(CallInfo(context, function));
+}
+
+void Debugger::justLeft(VM::ExecutionContext *context)
+{
+ int idx = callIndex(context);
+ if (idx < 0)
+ qDebug() << "Oops, leaving a function that was not registered...?";
+ else
+ _callStack.resize(idx);
+}
+
+void Debugger::enterFunction(FunctionState *state)
+{
+ _callStack[callIndex(state->context())].state = state;
+
+#ifdef DO_TRACE_INSTR
+ QString n = name(_callStack[callIndex(state->context())].function);
+ std::cerr << "*** Entering \"" << qPrintable(n) << "\" with " << state->context()->argumentCount << " args" << std::endl;
+// for (unsigned i = 0; i < state->context()->variableEnvironment->argumentCount; ++i)
+// std::cerr << " " << i << ": " << currentArg(i) << std::endl;
+#endif // DO_TRACE_INSTR
+}
+
+void Debugger::leaveFunction(FunctionState *state)
+{
+ _callStack[callIndex(state->context())].state = 0;
+}
+
+void Debugger::aboutToThrow(const VM::Value &value)
+{
+ qDebug() << "*** We are about to throw...:" << value.toString(currentState()->context())->toQString();
+}
+
+FunctionState *Debugger::currentState() const
+{
+ if (_callStack.isEmpty())
+ return 0;
+ else
+ return _callStack.last().state;
+}
+
+const char *Debugger::currentArg(unsigned idx) const
+{
+ FunctionState *state = currentState();
+ return qPrintable(state->argument(idx)->toString(state->context())->toQString());
+}
+
+const char *Debugger::currentLocal(unsigned idx) const
+{
+ FunctionState *state = currentState();
+ return qPrintable(state->local(idx)->toString(state->context())->toQString());
+}
+
+const char *Debugger::currentTemp(unsigned idx) const
+{
+ FunctionState *state = currentState();
+ return qPrintable(state->temp(idx)->toString(state->context())->toQString());
+}
+
+void Debugger::printStackTrace() const
+{
+ for (int i = _callStack.size() - 1; i >=0; --i) {
+ QString n = name(_callStack[i].function);
+ std::cerr << "\tframe #" << i << ": " << qPrintable(n) << std::endl;
+ }
+}
+
+int Debugger::callIndex(VM::ExecutionContext *context)
+{
+ for (int idx = _callStack.size() - 1; idx >= 0; --idx) {
+ if (_callStack[idx].context == context)
+ return idx;
+ }
+
+ return -1;
+}
+
+V4IR::Function *Debugger::irFunction(VM::Function *vmf) const
+{
+ return _vmToIr[vmf];
+}
+
+static void realDumpValue(VM::Value v, VM::ExecutionContext *ctx, std::string prefix)
+{
+ using namespace VM;
+ using namespace std;
+ cout << prefix << "tag: " << hex << v.tag << dec << endl << prefix << "\t-> ";
+ switch (v.type()) {
+ case Value::Undefined_Type: cout << "Undefined" << endl; return;
+ case Value::Null_Type: cout << "Null" << endl; return;
+ case Value::Boolean_Type: cout << "Boolean"; break;
+ case Value::Integer_Type: cout << "Integer"; break;
+ case Value::Object_Type: cout << "Object"; break;
+ case Value::String_Type: cout << "String"; break;
+ default: cout << "UNKNOWN" << endl; return;
+ }
+ cout << endl;
+
+ if (v.isBoolean()) {
+ cout << prefix << "\t-> " << (v.booleanValue() ? "TRUE" : "FALSE") << endl;
+ return;
+ }
+
+ if (v.isInteger()) {
+ cout << prefix << "\t-> " << v.integerValue() << endl;
+ return;
+ }
+
+ if (v.isDouble()) {
+ cout << prefix << "\t-> " << v.doubleValue() << endl;
+ return;
+ }
+
+ if (v.isString()) {
+ // maybe check something on the Managed object?
+ cout << prefix << "\t-> @" << hex << v.stringValue() << endl;
+ cout << prefix << "\t-> \"" << qPrintable(v.stringValue()->toQString()) << "\"" << endl;
+ return;
+ }
+
+ Object *o = v.objectValue();
+ if (!o)
+ return;
+
+ cout << prefix << "\t-> @" << hex << o << endl;
+ cout << prefix << "object type: " << o->internalType() << endl << prefix << "\t-> ";
+ switch (o->internalType()) {
+ case VM::Managed::Type_Invalid: cout << "Invalid"; break;
+ case VM::Managed::Type_String: cout << "String"; break;
+ case VM::Managed::Type_Object: cout << "Object"; break;
+ case VM::Managed::Type_ArrayObject: cout << "ArrayObject"; break;
+ case VM::Managed::Type_FunctionObject: cout << "FunctionObject"; break;
+ case VM::Managed::Type_BooleanObject: cout << "BooleanObject"; break;
+ case VM::Managed::Type_NumberObject: cout << "NumberObject"; break;
+ case VM::Managed::Type_StringObject: cout << "StringObject"; break;
+ case VM::Managed::Type_DateObject: cout << "DateObject"; break;
+ case VM::Managed::Type_RegExpObject: cout << "RegExpObject"; break;
+ case VM::Managed::Type_ErrorObject: cout << "ErrorObject"; break;
+ case VM::Managed::Type_ArgumentsObject: cout << "ArgumentsObject"; break;
+ case VM::Managed::Type_JSONObject: cout << "JSONObject"; break;
+ case VM::Managed::Type_MathObject: cout << "MathObject"; break;
+ case VM::Managed::Type_ForeachIteratorObject: cout << "ForeachIteratorObject"; break;
+ default: cout << "UNKNOWN" << endl; return;
+ }
+ cout << endl;
+
+ cout << prefix << "properties:" << endl;
+ ForEachIteratorObject it(ctx, o);
+ for (Value name = it.nextPropertyName(); !name.isNull(); name = it.nextPropertyName()) {
+ cout << prefix << "\t\"" << qPrintable(name.stringValue()->toQString()) << "\"" << endl;
+ PropertyAttributes attrs;
+ Property *d = o->__getOwnProperty__(name.stringValue(), &attrs);
+ Value pval = o->getValue(ctx, d, attrs);
+ cout << prefix << "\tvalue:" << endl;
+ realDumpValue(pval, ctx, prefix + "\t");
+ }
+}
+
+void dumpValue(VM::Value v, VM::ExecutionContext *ctx)
+{
+ realDumpValue(v, ctx, std::string(""));
+}
diff --git a/src/qml/qml/v4vm/debugging.h b/src/qml/qml/v4vm/debugging.h
new file mode 100644
index 0000000000..b337f41c51
--- /dev/null
+++ b/src/qml/qml/v4vm/debugging.h
@@ -0,0 +1,157 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef DEBUGGING_H
+#define DEBUGGING_H
+
+#include "qv4global.h"
+#include "qv4engine.h"
+#include "qv4context.h"
+
+#include <QHash>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+
+namespace V4IR {
+struct BasicBlock;
+struct Function;
+} // namespace IR
+
+namespace Debugging {
+
+class Debugger;
+
+struct FunctionDebugInfo { // TODO: use opaque d-pointers here
+ QString name;
+ unsigned startLine, startColumn;
+
+ FunctionDebugInfo(V4IR::Function *function):
+ startLine(0), startColumn(0)
+ {
+ if (function->name)
+ name = *function->name;
+ }
+
+ void setSourceLocation(unsigned line, unsigned column)
+ { startLine = line; startColumn = column; }
+};
+
+class FunctionState
+{
+public:
+ FunctionState(VM::ExecutionContext *context);
+ virtual ~FunctionState();
+
+ virtual VM::Value *argument(unsigned idx);
+ virtual VM::Value *local(unsigned idx);
+ virtual VM::Value *temp(unsigned idx) = 0;
+
+ VM::ExecutionContext *context() const
+ { return _context; }
+
+ Debugger *debugger() const
+ { return _context->engine->debugger; }
+
+private:
+ VM::ExecutionContext *_context;
+};
+
+struct CallInfo
+{
+ VM::ExecutionContext *context;
+ VM::FunctionObject *function;
+ FunctionState *state;
+
+ CallInfo(VM::ExecutionContext *context = 0, VM::FunctionObject *function = 0, FunctionState *state = 0)
+ : context(context)
+ , function(function)
+ , state(state)
+ {}
+};
+
+class Q_V4_EXPORT Debugger
+{
+public:
+ Debugger(VM::ExecutionEngine *_engine);
+ ~Debugger();
+
+public: // compile-time interface
+ void addFunction(V4IR::Function *function);
+ void setSourceLocation(V4IR::Function *function, unsigned line, unsigned column);
+ void mapFunction(VM::Function *vmf, V4IR::Function *irf);
+
+public: // run-time querying interface
+ FunctionDebugInfo *debugInfo(VM::FunctionObject *function) const;
+ QString name(VM::FunctionObject *function) const;
+
+public: // execution hooks
+ void aboutToCall(VM::FunctionObject *function, VM::ExecutionContext *context);
+ void justLeft(VM::ExecutionContext *context);
+ void enterFunction(FunctionState *state);
+ void leaveFunction(FunctionState *state);
+ void aboutToThrow(const VM::Value &value);
+
+public: // debugging hooks
+ FunctionState *currentState() const;
+ const char *currentArg(unsigned idx) const;
+ const char *currentLocal(unsigned idx) const;
+ const char *currentTemp(unsigned idx) const;
+ void printStackTrace() const;
+
+private:
+ int callIndex(VM::ExecutionContext *context);
+ V4IR::Function *irFunction(VM::Function *vmf) const;
+
+private: // TODO: use opaque d-pointers here
+ VM::ExecutionEngine *_engine;
+ QHash<V4IR::Function *, FunctionDebugInfo *> _functionInfo;
+ QHash<VM::Function *, V4IR::Function *> _vmToIr;
+ QVector<CallInfo> _callStack;
+};
+
+} // namespace Debugging
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // DEBUGGING_H
diff --git a/src/qml/qml/v4vm/llvm_installation.pri b/src/qml/qml/v4vm/llvm_installation.pri
new file mode 100644
index 0000000000..99e955fd2b
--- /dev/null
+++ b/src/qml/qml/v4vm/llvm_installation.pri
@@ -0,0 +1,23 @@
+LLVM_CONFIG=llvm-config
+# Pick up the qmake variable or environment variable for LLVM_INSTALL_DIR. If either was set, change the LLVM_CONFIG to use that.
+isEmpty(LLVM_INSTALL_DIR):LLVM_INSTALL_DIR=$$(LLVM_INSTALL_DIR)
+!isEmpty(LLVM_INSTALL_DIR):LLVM_CONFIG=$$LLVM_INSTALL_DIR/bin/llvm-config
+exists ($${LLVM_CONFIG}) {
+ CONFIG += llvm-libs
+ message("Found LLVM in $$LLVM_INSTALL_DIR")
+}
+
+llvm-libs {
+ win32 {
+ LLVM_INCLUDEPATH = $$LLVM_INSTALL_DIR/include
+# TODO: check if the next line is needed somehow for the llvm_runtime target.
+ LLVM_LIBS += -ladvapi32 -lshell32
+ }
+
+ unix {
+ LLVM_INCLUDEPATH = $$system($$LLVM_CONFIG --includedir)
+ LLVM_LIBDIR = $$system($$LLVM_CONFIG --libdir)
+ }
+
+ LLVM_DEFINES += __STDC_LIMIT_MACROS __STDC_CONSTANT_MACROS
+}
diff --git a/src/qml/qml/v4vm/llvm_runtime.cpp b/src/qml/qml/v4vm/llvm_runtime.cpp
new file mode 100644
index 0000000000..629ee4028c
--- /dev/null
+++ b/src/qml/qml/v4vm/llvm_runtime.cpp
@@ -0,0 +1,513 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4runtime.h"
+#include "qv4context.h"
+#include "qv4engine.h"
+#include <stdio.h>
+#include <setjmp.h>
+
+using namespace QQmlJS::VM;
+
+extern "C" {
+
+Value __qmljs_llvm_return(ExecutionContext */*ctx*/, Value *result)
+{
+ return *result;
+}
+
+Value *__qmljs_llvm_get_argument(ExecutionContext *ctx, int index)
+{
+ assert(ctx->type == ExecutionContext::Type_CallContext);
+ return &static_cast<CallContext *>(ctx)->arguments[index];
+}
+
+void __qmljs_llvm_init_undefined(Value *result)
+{
+ *result = Value::undefinedValue();
+}
+
+void __qmljs_llvm_init_null(Value *result)
+{
+ *result = Value::nullValue();
+}
+
+void __qmljs_llvm_init_boolean(Value *result, bool value)
+{
+ *result = Value::fromBoolean(value);
+}
+
+void __qmljs_llvm_init_number(Value *result, double value)
+{
+ *result = Value::fromDouble(value);
+}
+
+void __qmljs_llvm_init_string(ExecutionContext *ctx, Value *result, const char *str)
+{
+ *result = Value::fromString(ctx->engine->newString(QString::fromUtf8(str)));
+}
+
+void __qmljs_llvm_init_closure(ExecutionContext *ctx, Value *result,
+ String *name, bool hasDirectEval,
+ bool usesArgumentsObject, bool isStrict,
+ bool hasNestedFunctions,
+ String **formals, unsigned formalCount,
+ String **locals, unsigned localCount)
+{
+ Function *clos = __qmljs_register_function(ctx, name, hasDirectEval,
+ usesArgumentsObject, isStrict,
+ hasNestedFunctions,
+ formals, formalCount,
+ locals, localCount);
+ __qmljs_init_closure(ctx, result, clos);
+}
+
+bool __qmljs_llvm_to_boolean(ExecutionContext *ctx, const Value *value)
+{
+ return __qmljs_to_boolean(*value);
+}
+
+void __qmljs_llvm_bit_and(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_bit_and(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_bit_or(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_bit_or(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_bit_xor(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_bit_xor(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_add(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_add(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_sub(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_sub(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_mul(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_mul(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_div(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_div(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_mod(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_mod(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_shl(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_shl(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_shr(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_shr(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_ushr(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_ushr(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_gt(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_gt(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_lt(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_lt(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_ge(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_ge(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_le(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_le(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_eq(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_eq(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_ne(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_ne(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_se(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_se(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_sne(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_sne(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_instanceof(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_instanceof(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_in(ExecutionContext *ctx, Value *result, Value *left, Value *right)
+{
+ __qmljs_in(ctx, result, *left, *right);
+}
+
+void __qmljs_llvm_uplus(ExecutionContext *ctx, Value *result, const Value *value)
+{
+ __qmljs_uplus(result, *value);
+}
+
+void __qmljs_llvm_uminus(ExecutionContext *ctx, Value *result, const Value *value)
+{
+ __qmljs_uminus(result, *value);
+}
+
+void __qmljs_llvm_compl(ExecutionContext *ctx, Value *result, const Value *value)
+{
+ __qmljs_compl(result, *value);
+}
+
+void __qmljs_llvm_not(ExecutionContext *ctx, Value *result, const Value *value)
+{
+ __qmljs_not(result, *value);
+}
+
+void __qmljs_llvm_inplace_bit_and_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_bit_and_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_bit_or_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_bit_or_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_bit_xor_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_bit_xor_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_add_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_add_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_sub_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_sub_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_mul_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_mul_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_div_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_div_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_mod_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_mod_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_shl_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_shl_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_shr_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_shr_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_ushr_name(ExecutionContext *ctx, String *dest, Value *src)
+{
+ __qmljs_inplace_ushr_name(ctx, dest, *src);
+}
+
+void __qmljs_llvm_inplace_bit_and_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_bit_and_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_bit_or_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_bit_or_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_bit_xor_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_bit_xor_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_add_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_add_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_sub_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_sub_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_mul_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_mul_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_div_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_div_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_mod_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_mod_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_shl_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_shl_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_shr_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_shr_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_ushr_element(ExecutionContext *ctx, Value *base, Value *index, Value *value)
+{
+ __qmljs_inplace_ushr_element(ctx, *base, *index, *value);
+}
+
+void __qmljs_llvm_inplace_bit_and_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_bit_and_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_bit_or_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_bit_or_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_bit_xor_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_bit_xor_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_add_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_add_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_sub_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_sub_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_mul_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_mul_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_div_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_div_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_mod_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_mod_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_shl_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_shl_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_shr_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_shr_member(ctx, *base, member, *value);
+}
+
+void __qmljs_llvm_inplace_ushr_member(ExecutionContext *ctx, Value *value, Value *base, String *member)
+{
+ __qmljs_inplace_ushr_member(ctx, *base, member, *value);
+}
+
+String *__qmljs_llvm_identifier_from_utf8(ExecutionContext *ctx, const char *str)
+{
+ return ctx->engine->newIdentifier(QString::fromUtf8(str));
+}
+
+void __qmljs_llvm_call_activation_property(ExecutionContext *context, Value *result, String *name, Value *args, int argc)
+{
+ __qmljs_call_activation_property(context, result, name, args, argc);
+}
+
+void __qmljs_llvm_call_value(ExecutionContext *context, Value *result, const Value *thisObject, const Value *func, Value *args, int argc)
+{
+ __qmljs_call_value(context, result, thisObject, *func, args, argc);
+}
+
+void __qmljs_llvm_construct_activation_property(ExecutionContext *context, Value *result, String *name, Value *args, int argc)
+{
+ __qmljs_construct_activation_property(context, result, name, args, argc);
+}
+
+void __qmljs_llvm_construct_value(ExecutionContext *context, Value *result, const Value *func, Value *args, int argc)
+{
+ __qmljs_construct_value(context, result, *func, args, argc);
+}
+
+void __qmljs_llvm_get_activation_property(ExecutionContext *ctx, Value *result, String *name)
+{
+ __qmljs_get_activation_property(ctx, result, name);
+}
+
+void __qmljs_llvm_set_activation_property(ExecutionContext *ctx, String *name, Value *value)
+{
+ __qmljs_set_activation_property(ctx, name, *value);
+}
+
+void __qmljs_llvm_get_property(ExecutionContext *ctx, Value *result, Value *object, String *name)
+{
+ __qmljs_get_property(ctx, result, *object, name);
+}
+
+void __qmljs_llvm_call_property(ExecutionContext *context, Value *result, const Value *base, String *name, Value *args, int argc)
+{
+ __qmljs_call_property(context, result, *base, name, args, argc);
+}
+
+void __qmljs_llvm_construct_property(ExecutionContext *context, Value *result, const Value *base, String *name, Value *args, int argc)
+{
+ __qmljs_construct_property(context, result, *base, name, args, argc);
+}
+
+void __qmljs_llvm_get_element(ExecutionContext *ctx, Value *result, Value *object, Value *index)
+{
+ __qmljs_get_element(ctx, result, *object, *index);
+}
+
+void __qmljs_llvm_set_element(ExecutionContext *ctx, Value *object, Value *index, Value *value)
+{
+ __qmljs_set_element(ctx, *object, *index, *value);
+}
+
+void __qmljs_llvm_set_property(ExecutionContext *ctx, Value *object, String *name, Value *value)
+{
+ __qmljs_set_property(ctx, *object, name, *value);
+}
+
+void __qmljs_llvm_builtin_declare_var(ExecutionContext *ctx, bool deletable, String *name)
+{
+ __qmljs_builtin_declare_var(ctx, deletable, name);
+}
+
+void __qmljs_llvm_typeof(ExecutionContext *ctx, Value *result, const Value *value)
+{
+ __qmljs_builtin_typeof(ctx, result, *value);
+}
+
+void __qmljs_llvm_throw(ExecutionContext *context, Value *value)
+{
+ __qmljs_throw(context, *value);
+}
+
+void __qmljs_llvm_delete_exception_handler(ExecutionContext *context)
+{
+ // ### FIXME.
+}
+
+void __qmljs_llvm_foreach_iterator_object(ExecutionContext *context, Value *result, Value *in)
+{
+ __qmljs_foreach_iterator_object(context, result, *in);
+}
+
+void __qmljs_llvm_foreach_next_property_name(Value *result, Value *it)
+{
+ __qmljs_foreach_next_property_name(result, *it);
+}
+
+void __qmljs_llvm_get_this_object(ExecutionContext *ctx, Value *result)
+{
+ *result = ctx->thisObject;
+}
+
+void __qmljs_llvm_delete_subscript(ExecutionContext *ctx, Value *result, Value *base, Value *index)
+{
+ __qmljs_delete_subscript(ctx, result, *base, *index);
+}
+
+void __qmljs_llvm_delete_member(ExecutionContext *ctx, Value *result, Value *base, String *name)
+{
+ __qmljs_delete_member(ctx, result, *base, name);
+}
+
+void __qmljs_llvm_delete_name(ExecutionContext *ctx, Value *result, String *name)
+{
+ __qmljs_delete_name(ctx, result, name);
+}
+
+} // extern "C"
diff --git a/src/qml/qml/v4vm/moth/moth.pri b/src/qml/qml/v4vm/moth/moth.pri
new file mode 100644
index 0000000000..73bd893286
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/moth.pri
@@ -0,0 +1,13 @@
+INCLUDEPATH += $$PWD
+
+HEADERS += \
+ $$PWD/qv4isel_moth_p.h \
+ $$PWD/qv4instr_moth_p.h \
+ $$PWD/qv4vme_moth_p.h
+
+SOURCES += \
+ $$PWD/qv4isel_moth.cpp \
+ $$PWD/qv4instr_moth.cpp \
+ $$PWD/qv4vme_moth.cpp
+
+#DEFINES += DO_TRACE_INSTR
diff --git a/src/qml/qml/v4vm/moth/qv4instr_moth.cpp b/src/qml/qml/v4vm/moth/qv4instr_moth.cpp
new file mode 100644
index 0000000000..a2bad39e00
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4instr_moth.cpp
@@ -0,0 +1,15 @@
+#include "qv4instr_moth_p.h"
+
+using namespace QQmlJS;
+using namespace QQmlJS::Moth;
+
+int Instr::size(Type type)
+{
+#define MOTH_RETURN_INSTR_SIZE(I, FMT) case I: return InstrMeta<(int)I>::Size;
+ switch (type) {
+ FOR_EACH_MOTH_INSTR(MOTH_RETURN_INSTR_SIZE)
+ default: return 0;
+ }
+#undef MOTH_RETURN_INSTR_SIZE
+}
+
diff --git a/src/qml/qml/v4vm/moth/qv4instr_moth_p.h b/src/qml/qml/v4vm/moth/qv4instr_moth_p.h
new file mode 100644
index 0000000000..b29fb13a74
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4instr_moth_p.h
@@ -0,0 +1,527 @@
+#ifndef QV4INSTR_MOTH_P_H
+#define QV4INSTR_MOTH_P_H
+
+#include <QtCore/qglobal.h>
+#include "qv4object.h"
+
+#define FOR_EACH_MOTH_INSTR(F) \
+ F(Ret, ret) \
+ F(LoadValue, loadValue) \
+ F(LoadClosure, loadClosure) \
+ F(MoveTemp, moveTemp) \
+ F(LoadName, loadName) \
+ F(StoreName, storeName) \
+ F(LoadElement, loadElement) \
+ F(StoreElement, storeElement) \
+ F(LoadProperty, loadProperty) \
+ F(StoreProperty, storeProperty) \
+ F(Push, push) \
+ F(EnterTry, enterTry) \
+ F(CallValue, callValue) \
+ F(CallProperty, callProperty) \
+ F(CallElement, callElement) \
+ F(CallActivationProperty, callActivationProperty) \
+ F(CallBuiltinThrow, callBuiltinThrow) \
+ F(CallBuiltinFinishTry, callBuiltinFinishTry) \
+ F(CallBuiltinPushScope, callBuiltinPushScope) \
+ F(CallBuiltinPopScope, callBuiltinPopScope) \
+ F(CallBuiltinForeachIteratorObject, callBuiltinForeachIteratorObject) \
+ F(CallBuiltinForeachNextPropertyName, callBuiltinForeachNextPropertyName) \
+ F(CallBuiltinDeleteMember, callBuiltinDeleteMember) \
+ F(CallBuiltinDeleteSubscript, callBuiltinDeleteSubscript) \
+ F(CallBuiltinDeleteName, callBuiltinDeleteName) \
+ F(CallBuiltinTypeofMember, callBuiltinTypeofMember) \
+ F(CallBuiltinTypeofSubscript, callBuiltinTypeofSubscript) \
+ F(CallBuiltinTypeofName, callBuiltinTypeofName) \
+ F(CallBuiltinTypeofValue, callBuiltinTypeofValue) \
+ F(CallBuiltinPostIncMember, callBuiltinPostIncMember) \
+ F(CallBuiltinPostIncSubscript, callBuiltinPostIncSubscript) \
+ F(CallBuiltinPostIncName, callBuiltinPostIncName) \
+ F(CallBuiltinPostIncValue, callBuiltinPostIncValue) \
+ F(CallBuiltinPostDecMember, callBuiltinPostDecMember) \
+ F(CallBuiltinPostDecSubscript, callBuiltinPostDecSubscript) \
+ F(CallBuiltinPostDecName, callBuiltinPostDecName) \
+ F(CallBuiltinPostDecValue, callBuiltinPostDecValue) \
+ F(CallBuiltinDeclareVar, callBuiltinDeclareVar) \
+ F(CallBuiltinDefineGetterSetter, callBuiltinDefineGetterSetter) \
+ F(CallBuiltinDefineProperty, callBuiltinDefineProperty) \
+ F(CallBuiltinDefineArray, callBuiltinDefineArray) \
+ F(CreateValue, createValue) \
+ F(CreateProperty, createProperty) \
+ F(CreateActivationProperty, createActivationProperty) \
+ F(Jump, jump) \
+ F(CJump, cjump) \
+ F(Unop, unop) \
+ F(Binop, binop) \
+ F(LoadThis, loadThis) \
+ F(InplaceElementOp, inplaceElementOp) \
+ F(InplaceMemberOp, inplaceMemberOp) \
+ F(InplaceNameOp, inplaceNameOp)
+
+#if defined(Q_CC_GNU) && (!defined(Q_CC_INTEL) || __INTEL_COMPILER >= 1200)
+# define MOTH_THREADED_INTERPRETER
+#endif
+
+#define MOTH_INSTR_ALIGN_MASK (Q_ALIGNOF(QQmlJS::Moth::Instr) - 1)
+
+#ifdef MOTH_THREADED_INTERPRETER
+# define MOTH_INSTR_HEADER void *code;
+#else
+# define MOTH_INSTR_HEADER quint8 instructionType;
+#endif
+
+#define MOTH_INSTR_ENUM(I, FMT) I,
+#define MOTH_INSTR_SIZE(I, FMT) ((sizeof(QQmlJS::Moth::Instr::instr_##FMT) + MOTH_INSTR_ALIGN_MASK) & ~MOTH_INSTR_ALIGN_MASK)
+
+
+namespace QQmlJS {
+namespace Moth {
+
+union Instr
+{
+ struct Param {
+ enum {
+ ValueType = 0,
+ ArgumentType = 1,
+ LocalType = 2,
+ TempType = 3,
+ ScopedLocalType = 4
+ };
+ VM::Value value;
+ unsigned type : 3;
+ unsigned scope : 29;
+ unsigned index;
+
+ bool isValue() const { return type == ValueType; }
+ bool isArgument() const { return type == ArgumentType; }
+ bool isLocal() const { return type == LocalType; }
+ bool isTemp() const { return type == TempType; }
+ bool isScopedLocal() const { return type == ScopedLocalType; }
+
+ static Param createValue(const VM::Value &v)
+ {
+ Param p;
+ p.type = ValueType;
+ p.scope = 0;
+ p.value = v;
+ return p;
+ }
+
+ static Param createArgument(unsigned idx, uint scope)
+ {
+ Param p;
+ p.type = ArgumentType;
+ p.scope = scope;
+ p.index = idx;
+ return p;
+ }
+
+ static Param createLocal(unsigned idx)
+ {
+ Param p;
+ p.type = LocalType;
+ p.scope = 0;
+ p.index = idx;
+ return p;
+ }
+
+ static Param createTemp(unsigned idx)
+ {
+ Param p;
+ p.type = TempType;
+ p.scope = 0;
+ p.index = idx;
+ return p;
+ }
+
+ static Param createScopedLocal(unsigned idx, uint scope)
+ {
+ Param p;
+ p.type = ScopedLocalType;
+ p.scope = scope;
+ p.index = idx;
+ return p;
+ }
+ };
+
+ enum Type {
+ FOR_EACH_MOTH_INSTR(MOTH_INSTR_ENUM)
+ };
+
+ struct instr_common {
+ MOTH_INSTR_HEADER
+ };
+ struct instr_ret {
+ MOTH_INSTR_HEADER
+ Param result;
+ };
+ struct instr_loadValue {
+ MOTH_INSTR_HEADER
+ Param value;
+ Param result;
+ };
+ struct instr_moveTemp {
+ MOTH_INSTR_HEADER
+ Param source;
+ Param result;
+ };
+ struct instr_loadClosure {
+ MOTH_INSTR_HEADER
+ VM::Function *value;
+ Param result;
+ };
+ struct instr_loadName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param result;
+ };
+ struct instr_storeName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param source;
+ };
+ struct instr_loadProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param base;
+ Param result;
+ };
+ struct instr_storeProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param base;
+ Param source;
+ };
+ struct instr_loadElement {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param result;
+ };
+ struct instr_storeElement {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param source;
+ };
+ struct instr_push {
+ MOTH_INSTR_HEADER
+ quint32 value;
+ };
+ struct instr_enterTry {
+ MOTH_INSTR_HEADER
+ ptrdiff_t tryOffset;
+ ptrdiff_t catchOffset;
+ VM::String *exceptionVarName;
+ Param exceptionVar;
+ };
+ struct instr_callValue {
+ MOTH_INSTR_HEADER
+ quint32 argc;
+ quint32 args;
+ Param dest;
+ Param result;
+ };
+ struct instr_callProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ quint32 argc;
+ quint32 args;
+ Param base;
+ Param result;
+ };
+ struct instr_callElement {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ quint32 argc;
+ quint32 args;
+ Param result;
+ };
+ struct instr_callActivationProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ quint32 argc;
+ quint32 args;
+ Param result;
+ };
+ struct instr_callBuiltinThrow {
+ MOTH_INSTR_HEADER
+ Param arg;
+ };
+ struct instr_callBuiltinFinishTry {
+ MOTH_INSTR_HEADER
+ };
+ struct instr_callBuiltinPushScope {
+ MOTH_INSTR_HEADER
+ Param arg;
+ };
+ struct instr_callBuiltinPopScope {
+ MOTH_INSTR_HEADER
+ };
+ struct instr_callBuiltinForeachIteratorObject {
+ MOTH_INSTR_HEADER
+ Param arg;
+ Param result;
+ };
+ struct instr_callBuiltinForeachNextPropertyName {
+ MOTH_INSTR_HEADER
+ Param arg;
+ Param result;
+ };
+ struct instr_callBuiltinDeleteMember {
+ MOTH_INSTR_HEADER
+ VM::String *member;
+ Param base;
+ Param result;
+ };
+ struct instr_callBuiltinDeleteSubscript {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param result;
+ };
+ struct instr_callBuiltinDeleteName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param result;
+ };
+ struct instr_callBuiltinTypeofMember {
+ MOTH_INSTR_HEADER
+ VM::String *member;
+ Param base;
+ Param result;
+ };
+ struct instr_callBuiltinTypeofSubscript {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param result;
+ };
+ struct instr_callBuiltinTypeofName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param result;
+ };
+ struct instr_callBuiltinTypeofValue {
+ MOTH_INSTR_HEADER
+ Param value;
+ Param result;
+ };
+ struct instr_callBuiltinPostIncMember {
+ MOTH_INSTR_HEADER
+ Param base;
+ VM::String *member;
+ Param result;
+ };
+ struct instr_callBuiltinPostIncSubscript {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param result;
+ };
+ struct instr_callBuiltinPostIncName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param result;
+ };
+ struct instr_callBuiltinPostIncValue {
+ MOTH_INSTR_HEADER
+ Param value;
+ Param result;
+ };
+ struct instr_callBuiltinPostDecMember {
+ MOTH_INSTR_HEADER
+ Param base;
+ VM::String *member;
+ Param result;
+ };
+ struct instr_callBuiltinPostDecSubscript {
+ MOTH_INSTR_HEADER
+ Param base;
+ Param index;
+ Param result;
+ };
+ struct instr_callBuiltinPostDecName {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param result;
+ };
+ struct instr_callBuiltinPostDecValue {
+ MOTH_INSTR_HEADER
+ Param value;
+ Param result;
+ };
+ struct instr_callBuiltinDeclareVar {
+ MOTH_INSTR_HEADER
+ VM::String *varName;
+ bool isDeletable;
+ };
+ struct instr_callBuiltinDefineGetterSetter {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param object;
+ Param getter;
+ Param setter;
+ };
+ struct instr_callBuiltinDefineProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ Param object;
+ Param value;
+ };
+ struct instr_callBuiltinDefineArray {
+ MOTH_INSTR_HEADER
+ quint32 argc;
+ quint32 args;
+ Param result;
+ };
+ struct instr_createValue {
+ MOTH_INSTR_HEADER
+ quint32 argc;
+ quint32 args;
+ Param func;
+ Param result;
+ };
+ struct instr_createProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ quint32 argc;
+ quint32 args;
+ Param base;
+ Param result;
+ };
+ struct instr_createActivationProperty {
+ MOTH_INSTR_HEADER
+ VM::String *name;
+ quint32 argc;
+ quint32 args;
+ Param result;
+ };
+ struct instr_jump {
+ MOTH_INSTR_HEADER
+ ptrdiff_t offset;
+ };
+ struct instr_cjump {
+ MOTH_INSTR_HEADER
+ ptrdiff_t offset;
+ Param condition;
+ };
+ struct instr_unop {
+ MOTH_INSTR_HEADER
+ VM::UnaryOpName alu;
+ Param source;
+ Param result;
+ };
+ struct instr_binop {
+ MOTH_INSTR_HEADER
+ VM::BinOp alu;
+ Param lhs;
+ Param rhs;
+ Param result;
+ };
+ struct instr_loadThis {
+ MOTH_INSTR_HEADER
+ Param result;
+ };
+ struct instr_inplaceElementOp {
+ MOTH_INSTR_HEADER
+ VM::InplaceBinOpElement alu;
+ Param base;
+ Param index;
+ Param source;
+ };
+ struct instr_inplaceMemberOp {
+ MOTH_INSTR_HEADER
+ VM::InplaceBinOpMember alu;
+ VM::String *member;
+ Param base;
+ Param source;
+ };
+ struct instr_inplaceNameOp {
+ MOTH_INSTR_HEADER
+ VM::InplaceBinOpName alu;
+ VM::String *name;
+ Param source;
+ };
+
+ instr_common common;
+ instr_ret ret;
+ instr_loadValue loadValue;
+ instr_moveTemp moveTemp;
+ instr_loadClosure loadClosure;
+ instr_loadName loadName;
+ instr_storeName storeName;
+ instr_loadElement loadElement;
+ instr_storeElement storeElement;
+ instr_loadProperty loadProperty;
+ instr_storeProperty storeProperty;
+ instr_push push;
+ instr_enterTry enterTry;
+ instr_callValue callValue;
+ instr_callProperty callProperty;
+ instr_callElement callElement;
+ instr_callActivationProperty callActivationProperty;
+ instr_callBuiltinThrow callBuiltinThrow;
+ instr_callBuiltinFinishTry callBuiltinFinishTry;
+ instr_callBuiltinPushScope callBuiltinPushScope;
+ instr_callBuiltinPopScope callBuiltinPopScope;
+ instr_callBuiltinForeachIteratorObject callBuiltinForeachIteratorObject;
+ instr_callBuiltinForeachNextPropertyName callBuiltinForeachNextPropertyName;
+ instr_callBuiltinDeleteMember callBuiltinDeleteMember;
+ instr_callBuiltinDeleteSubscript callBuiltinDeleteSubscript;
+ instr_callBuiltinDeleteName callBuiltinDeleteName;
+ instr_callBuiltinTypeofMember callBuiltinTypeofMember;
+ instr_callBuiltinTypeofSubscript callBuiltinTypeofSubscript;
+ instr_callBuiltinTypeofName callBuiltinTypeofName;
+ instr_callBuiltinTypeofValue callBuiltinTypeofValue;
+ instr_callBuiltinPostIncMember callBuiltinPostIncMember;
+ instr_callBuiltinPostIncSubscript callBuiltinPostIncSubscript;
+ instr_callBuiltinPostIncName callBuiltinPostIncName;
+ instr_callBuiltinPostIncValue callBuiltinPostIncValue;
+ instr_callBuiltinPostDecMember callBuiltinPostDecMember;
+ instr_callBuiltinPostDecSubscript callBuiltinPostDecSubscript;
+ instr_callBuiltinPostDecName callBuiltinPostDecName;
+ instr_callBuiltinPostDecValue callBuiltinPostDecValue;
+ instr_callBuiltinDeclareVar callBuiltinDeclareVar;
+ instr_callBuiltinDefineGetterSetter callBuiltinDefineGetterSetter;
+ instr_callBuiltinDefineProperty callBuiltinDefineProperty;
+ instr_callBuiltinDefineArray callBuiltinDefineArray;
+ instr_createValue createValue;
+ instr_createProperty createProperty;
+ instr_createActivationProperty createActivationProperty;
+ instr_jump jump;
+ instr_cjump cjump;
+ instr_unop unop;
+ instr_binop binop;
+ instr_loadThis loadThis;
+ instr_inplaceElementOp inplaceElementOp;
+ instr_inplaceMemberOp inplaceMemberOp;
+ instr_inplaceNameOp inplaceNameOp;
+
+ static int size(Type type);
+};
+
+template<int N>
+struct InstrMeta {
+};
+
+#define MOTH_INSTR_META_TEMPLATE(I, FMT) \
+ template<> struct InstrMeta<(int)Instr::I> { \
+ enum { Size = MOTH_INSTR_SIZE(I, FMT) }; \
+ typedef Instr::instr_##FMT DataType; \
+ static const DataType &data(const Instr &instr) { return instr.FMT; } \
+ static void setData(Instr &instr, const DataType &v) { instr.FMT = v; } \
+ };
+FOR_EACH_MOTH_INSTR(MOTH_INSTR_META_TEMPLATE);
+#undef MOTH_INSTR_META_TEMPLATE
+
+template<int InstrType>
+class InstrData : public InstrMeta<InstrType>::DataType
+{
+};
+
+} // namespace Moth
+} // namespace QQmlJS
+
+#endif // QV4INSTR_MOTH_P_H
diff --git a/src/qml/qml/v4vm/moth/qv4isel_moth.cpp b/src/qml/qml/v4vm/moth/qv4isel_moth.cpp
new file mode 100644
index 0000000000..00ae4e1029
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4isel_moth.cpp
@@ -0,0 +1,812 @@
+#include "qv4isel_util_p.h"
+#include "qv4isel_moth_p.h"
+#include "qv4vme_moth_p.h"
+#include "qv4functionobject.h"
+#include "qv4regexpobject.h"
+#include "debugging.h"
+
+using namespace QQmlJS;
+using namespace QQmlJS::Moth;
+
+namespace {
+
+inline VM::BinOp aluOpFunction(V4IR::AluOp op)
+{
+ switch (op) {
+ case V4IR::OpInvalid:
+ return 0;
+ case V4IR::OpIfTrue:
+ return 0;
+ case V4IR::OpNot:
+ return 0;
+ case V4IR::OpUMinus:
+ return 0;
+ case V4IR::OpUPlus:
+ return 0;
+ case V4IR::OpCompl:
+ return 0;
+ case V4IR::OpBitAnd:
+ return VM::__qmljs_bit_and;
+ case V4IR::OpBitOr:
+ return VM::__qmljs_bit_or;
+ case V4IR::OpBitXor:
+ return VM::__qmljs_bit_xor;
+ case V4IR::OpAdd:
+ return VM::__qmljs_add;
+ case V4IR::OpSub:
+ return VM::__qmljs_sub;
+ case V4IR::OpMul:
+ return VM::__qmljs_mul;
+ case V4IR::OpDiv:
+ return VM::__qmljs_div;
+ case V4IR::OpMod:
+ return VM::__qmljs_mod;
+ case V4IR::OpLShift:
+ return VM::__qmljs_shl;
+ case V4IR::OpRShift:
+ return VM::__qmljs_shr;
+ case V4IR::OpURShift:
+ return VM::__qmljs_ushr;
+ case V4IR::OpGt:
+ return VM::__qmljs_gt;
+ case V4IR::OpLt:
+ return VM::__qmljs_lt;
+ case V4IR::OpGe:
+ return VM::__qmljs_ge;
+ case V4IR::OpLe:
+ return VM::__qmljs_le;
+ case V4IR::OpEqual:
+ return VM::__qmljs_eq;
+ case V4IR::OpNotEqual:
+ return VM::__qmljs_ne;
+ case V4IR::OpStrictEqual:
+ return VM::__qmljs_se;
+ case V4IR::OpStrictNotEqual:
+ return VM::__qmljs_sne;
+ case V4IR::OpInstanceof:
+ return VM::__qmljs_instanceof;
+ case V4IR::OpIn:
+ return VM::__qmljs_in;
+ case V4IR::OpAnd:
+ return 0;
+ case V4IR::OpOr:
+ return 0;
+ default:
+ assert(!"Unknown AluOp");
+ return 0;
+ }
+};
+
+} // anonymous namespace
+
+InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine, V4IR::Module *module)
+ : EvalInstructionSelection(engine, module)
+ , _function(0)
+ , _vmFunction(0)
+ , _block(0)
+ , _codeStart(0)
+ , _codeNext(0)
+ , _codeEnd(0)
+{
+}
+
+InstructionSelection::~InstructionSelection()
+{
+}
+
+void InstructionSelection::run(VM::Function *vmFunction, V4IR::Function *function)
+{
+ V4IR::BasicBlock *block;
+
+ QHash<V4IR::BasicBlock *, QVector<ptrdiff_t> > patches;
+ QHash<V4IR::BasicBlock *, ptrdiff_t> addrs;
+
+ int codeSize = 4096;
+ uchar *codeStart = new uchar[codeSize];
+ uchar *codeNext = codeStart;
+ uchar *codeEnd = codeStart + codeSize;
+
+ qSwap(_function, function);
+ qSwap(_vmFunction, vmFunction);
+ qSwap(block, _block);
+ qSwap(patches, _patches);
+ qSwap(addrs, _addrs);
+ qSwap(codeStart, _codeStart);
+ qSwap(codeNext, _codeNext);
+ qSwap(codeEnd, _codeEnd);
+
+ int locals = frameSize();
+ assert(locals >= 0);
+
+ Instruction::Push push;
+ push.value = quint32(locals);
+ addInstruction(push);
+
+ foreach (_block, _function->basicBlocks) {
+ _addrs.insert(_block, _codeNext - _codeStart);
+
+ foreach (V4IR::Stmt *s, _block->statements)
+ s->accept(this);
+ }
+
+ patchJumpAddresses();
+
+ _vmFunction->code = VME::exec;
+ _vmFunction->codeData = squeezeCode();
+
+ qSwap(_function, function);
+ qSwap(_vmFunction, vmFunction);
+ qSwap(block, _block);
+ qSwap(patches, _patches);
+ qSwap(addrs, _addrs);
+ qSwap(codeStart, _codeStart);
+ qSwap(codeNext, _codeNext);
+ qSwap(codeEnd, _codeEnd);
+
+ delete[] codeStart;
+}
+
+void InstructionSelection::callValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ Instruction::CallValue call;
+ prepareCallArgs(args, call.argc, call.args);
+ call.dest = getParam(value);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ // call the property on the loaded base
+ Instruction::CallProperty call;
+ call.base = getParam(base);
+ call.name = identifier(name);
+ prepareCallArgs(args, call.argc, call.args);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ // call the property on the loaded base
+ Instruction::CallElement call;
+ call.base = getParam(base);
+ call.index = getParam(index);
+ prepareCallArgs(args, call.argc, call.args);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::constructActivationProperty(V4IR::Name *func,
+ V4IR::ExprList *args,
+ V4IR::Temp *result)
+{
+ Instruction::CreateActivationProperty create;
+ create.name = identifier(*func->id);
+ prepareCallArgs(args, create.argc, create.args);
+ create.result = getResultParam(result);
+ addInstruction(create);
+}
+
+void InstructionSelection::constructProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ Instruction::CreateProperty create;
+ create.base = getParam(base);
+ create.name = identifier(name);
+ prepareCallArgs(args, create.argc, create.args);
+ create.result = getResultParam(result);
+ addInstruction(create);
+}
+
+void InstructionSelection::constructValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ Instruction::CreateValue create;
+ create.func = getParam(value);
+ prepareCallArgs(args, create.argc, create.args);
+ create.result = getResultParam(result);
+ addInstruction(create);
+}
+
+void InstructionSelection::loadThisObject(V4IR::Temp *temp)
+{
+ Instruction::LoadThis load;
+ load.result = getResultParam(temp);
+ addInstruction(load);
+}
+
+void InstructionSelection::loadConst(V4IR::Const *sourceConst, V4IR::Temp *targetTemp)
+{
+ assert(sourceConst);
+
+ Instruction::LoadValue load;
+ load.value = getParam(sourceConst);
+ load.result = getResultParam(targetTemp);
+ addInstruction(load);
+}
+
+void InstructionSelection::loadString(const QString &str, V4IR::Temp *targetTemp)
+{
+ Instruction::LoadValue load;
+ load.value = Instr::Param::createValue(VM::Value::fromString(identifier(str)));
+ load.result = getResultParam(targetTemp);
+ addInstruction(load);
+}
+
+void InstructionSelection::loadRegexp(V4IR::RegExp *sourceRegexp, V4IR::Temp *targetTemp)
+{
+ VM::Value v = VM::Value::fromObject(engine()->newRegExpObject(
+ *sourceRegexp->value,
+ sourceRegexp->flags));
+ _vmFunction->generatedValues.append(v);
+
+ Instruction::LoadValue load;
+ load.value = Instr::Param::createValue(v);
+ load.result = getResultParam(targetTemp);
+ addInstruction(load);
+}
+
+void InstructionSelection::getActivationProperty(const V4IR::Name *name, V4IR::Temp *temp)
+{
+ Instruction::LoadName load;
+ load.name = identifier(*name->id);
+ load.result = getResultParam(temp);
+ addInstruction(load);
+}
+
+void InstructionSelection::setActivationProperty(V4IR::Temp *source, const QString &targetName)
+{
+ Instruction::StoreName store;
+ store.source = getParam(source);
+ store.name = identifier(targetName);
+ addInstruction(store);
+}
+
+void InstructionSelection::initClosure(V4IR::Closure *closure, V4IR::Temp *target)
+{
+ VM::Function *vmFunc = vmFunction(closure->value);
+ assert(vmFunc);
+ Instruction::LoadClosure load;
+ load.value = vmFunc;
+ load.result = getResultParam(target);
+ addInstruction(load);
+}
+
+void InstructionSelection::getProperty(V4IR::Temp *base, const QString &name, V4IR::Temp *target)
+{
+ Instruction::LoadProperty load;
+ load.base = getParam(base);
+ load.name = identifier(name);
+ load.result = getResultParam(target);
+ addInstruction(load);
+}
+
+void InstructionSelection::setProperty(V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName)
+{
+ Instruction::StoreProperty store;
+ store.base = getParam(targetBase);
+ store.name = identifier(targetName);
+ store.source = getParam(source);
+ addInstruction(store);
+}
+
+void InstructionSelection::getElement(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *target)
+{
+ Instruction::LoadElement load;
+ load.base = getParam(base);
+ load.index = getParam(index);
+ load.result = getResultParam(target);
+ addInstruction(load);
+}
+
+void InstructionSelection::setElement(V4IR::Temp *source, V4IR::Temp *targetBase, V4IR::Temp *targetIndex)
+{
+ Instruction::StoreElement store;
+ store.base = getParam(targetBase);
+ store.index = getParam(targetIndex);
+ store.source = getParam(source);
+ addInstruction(store);
+}
+
+void InstructionSelection::copyValue(V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp)
+{
+ Instruction::MoveTemp move;
+ move.source = getParam(sourceTemp);
+ move.result = getResultParam(targetTemp);
+ addInstruction(move);
+}
+
+void InstructionSelection::unop(V4IR::AluOp oper, V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp)
+{
+ VM::UnaryOpName op = 0;
+ switch (oper) {
+ case V4IR::OpIfTrue: assert(!"unreachable"); break;
+ case V4IR::OpNot: op = VM::__qmljs_not; break;
+ case V4IR::OpUMinus: op = VM::__qmljs_uminus; break;
+ case V4IR::OpUPlus: op = VM::__qmljs_uplus; break;
+ case V4IR::OpCompl: op = VM::__qmljs_compl; break;
+ case V4IR::OpIncrement: op = VM::__qmljs_increment; break;
+ case V4IR::OpDecrement: op = VM::__qmljs_decrement; break;
+ default: assert(!"unreachable"); break;
+ } // switch
+
+ if (op) {
+ Instruction::Unop unop;
+ unop.alu = op;
+ unop.source = getParam(sourceTemp);
+ unop.result = getResultParam(targetTemp);
+ addInstruction(unop);
+ } else {
+ qWarning(" UNOP1");
+ }
+}
+
+void InstructionSelection::binop(V4IR::AluOp oper, V4IR::Temp *leftSource, V4IR::Temp *rightSource, V4IR::Temp *target)
+{
+ Instruction::Binop binop;
+ binop.alu = aluOpFunction(oper);
+ binop.lhs = getParam(leftSource);
+ binop.rhs = getParam(rightSource);
+ binop.result = getResultParam(target);
+ addInstruction(binop);
+}
+
+void InstructionSelection::inplaceNameOp(V4IR::AluOp oper, V4IR::Temp *rightSource, const QString &targetName)
+{
+ VM::InplaceBinOpName op = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: op = VM::__qmljs_inplace_bit_and_name; break;
+ case V4IR::OpBitOr: op = VM::__qmljs_inplace_bit_or_name; break;
+ case V4IR::OpBitXor: op = VM::__qmljs_inplace_bit_xor_name; break;
+ case V4IR::OpAdd: op = VM::__qmljs_inplace_add_name; break;
+ case V4IR::OpSub: op = VM::__qmljs_inplace_sub_name; break;
+ case V4IR::OpMul: op = VM::__qmljs_inplace_mul_name; break;
+ case V4IR::OpDiv: op = VM::__qmljs_inplace_div_name; break;
+ case V4IR::OpMod: op = VM::__qmljs_inplace_mod_name; break;
+ case V4IR::OpLShift: op = VM::__qmljs_inplace_shl_name; break;
+ case V4IR::OpRShift: op = VM::__qmljs_inplace_shr_name; break;
+ case V4IR::OpURShift: op = VM::__qmljs_inplace_ushr_name; break;
+ default: break;
+ }
+
+ if (op) {
+ Instruction::InplaceNameOp ieo;
+ ieo.alu = op;
+ ieo.name = identifier(targetName);
+ ieo.source = getParam(rightSource);
+ addInstruction(ieo);
+ }
+}
+
+void InstructionSelection::inplaceElementOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBaseTemp, V4IR::Temp *targetIndexTemp)
+{
+ VM::InplaceBinOpElement op = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: op = VM::__qmljs_inplace_bit_and_element; break;
+ case V4IR::OpBitOr: op = VM::__qmljs_inplace_bit_or_element; break;
+ case V4IR::OpBitXor: op = VM::__qmljs_inplace_bit_xor_element; break;
+ case V4IR::OpAdd: op = VM::__qmljs_inplace_add_element; break;
+ case V4IR::OpSub: op = VM::__qmljs_inplace_sub_element; break;
+ case V4IR::OpMul: op = VM::__qmljs_inplace_mul_element; break;
+ case V4IR::OpDiv: op = VM::__qmljs_inplace_div_element; break;
+ case V4IR::OpMod: op = VM::__qmljs_inplace_mod_element; break;
+ case V4IR::OpLShift: op = VM::__qmljs_inplace_shl_element; break;
+ case V4IR::OpRShift: op = VM::__qmljs_inplace_shr_element; break;
+ case V4IR::OpURShift: op = VM::__qmljs_inplace_ushr_element; break;
+ default: break;
+ }
+
+ Instruction::InplaceElementOp ieo;
+ ieo.alu = op;
+ ieo.base = getParam(targetBaseTemp);
+ ieo.index = getParam(targetIndexTemp);
+ ieo.source = getParam(source);
+ addInstruction(ieo);
+}
+
+void InstructionSelection::inplaceMemberOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName)
+{
+ VM::InplaceBinOpMember op = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: op = VM::__qmljs_inplace_bit_and_member; break;
+ case V4IR::OpBitOr: op = VM::__qmljs_inplace_bit_or_member; break;
+ case V4IR::OpBitXor: op = VM::__qmljs_inplace_bit_xor_member; break;
+ case V4IR::OpAdd: op = VM::__qmljs_inplace_add_member; break;
+ case V4IR::OpSub: op = VM::__qmljs_inplace_sub_member; break;
+ case V4IR::OpMul: op = VM::__qmljs_inplace_mul_member; break;
+ case V4IR::OpDiv: op = VM::__qmljs_inplace_div_member; break;
+ case V4IR::OpMod: op = VM::__qmljs_inplace_mod_member; break;
+ case V4IR::OpLShift: op = VM::__qmljs_inplace_shl_member; break;
+ case V4IR::OpRShift: op = VM::__qmljs_inplace_shr_member; break;
+ case V4IR::OpURShift: op = VM::__qmljs_inplace_ushr_member; break;
+ default: break;
+ }
+
+ Instruction::InplaceMemberOp imo;
+ imo.alu = op;
+ imo.base = getParam(targetBase);
+ imo.member = identifier(targetName);
+ imo.source = getParam(source);
+ addInstruction(imo);
+}
+
+void InstructionSelection::prepareCallArgs(V4IR::ExprList *e, quint32 &argc, quint32 &args)
+{
+ bool singleArgIsTemp = false;
+ if (e && e->next == 0 && e->expr->asTemp()) {
+ // ok, only 1 argument in the call...
+ const int idx = e->expr->asTemp()->index;
+ // We can only pass a reference into the stack, which holds temps that
+ // are not arguments (idx >= 0) nor locals (idx >= localCound).
+ singleArgIsTemp = idx >= _function->locals.size() && e->expr->asTemp()->scope == 0;
+ }
+
+ if (singleArgIsTemp) {
+ // We pass single arguments as references to the stack, but only if it's not a local or an argument.
+ argc = 1;
+ args = e->expr->asTemp()->index - _function->locals.size();
+ } else if (e) {
+ // We need to move all the temps into the function arg array
+ int argLocation = outgoingArgumentTempStart();
+ assert(argLocation >= 0);
+ argc = 0;
+ args = argLocation;
+ while (e) {
+ Instruction::MoveTemp move;
+ move.source = getParam(e->expr);
+ move.result = Instr::Param::createTemp(argLocation);
+ addInstruction(move);
+ ++argLocation;
+ ++argc;
+ e = e->next;
+ }
+ } else {
+ argc = 0;
+ args = 0;
+ }
+}
+
+void InstructionSelection::visitJump(V4IR::Jump *s)
+{
+ Instruction::Jump jump;
+ jump.offset = 0;
+ ptrdiff_t loc = addInstruction(jump) + (((const char *)&jump.offset) - ((const char *)&jump));
+
+ _patches[s->target].append(loc);
+}
+
+void InstructionSelection::visitCJump(V4IR::CJump *s)
+{
+ Instr::Param condition;
+ if (V4IR::Temp *t = s->cond->asTemp()) {
+ condition = getResultParam(t);
+ } else if (V4IR::Binop *b = s->cond->asBinop()) {
+ condition = getResultParam(0);
+ Instruction::Binop binop;
+ binop.alu = aluOpFunction(b->op);
+ binop.lhs = getParam(b->left);
+ binop.rhs = getParam(b->right);
+ binop.result = condition;
+ addInstruction(binop);
+ } else {
+ Q_UNIMPLEMENTED();
+ }
+
+ Instruction::CJump jump;
+ jump.offset = 0;
+ jump.condition = condition;
+ ptrdiff_t trueLoc = addInstruction(jump) + (((const char *)&jump.offset) - ((const char *)&jump));
+ _patches[s->iftrue].append(trueLoc);
+
+ if (_block->index + 1 != s->iffalse->index) {
+ Instruction::Jump jump;
+ jump.offset = 0;
+ ptrdiff_t falseLoc = addInstruction(jump) + (((const char *)&jump.offset) - ((const char *)&jump));
+ _patches[s->iffalse].append(falseLoc);
+ }
+}
+
+void InstructionSelection::visitRet(V4IR::Ret *s)
+{
+ Instruction::Ret ret;
+ ret.result = getParam(s->expr);
+ addInstruction(ret);
+}
+
+void InstructionSelection::visitTry(V4IR::Try *t)
+{
+ Instruction::EnterTry enterTry;
+ enterTry.tryOffset = 0;
+ enterTry.catchOffset = 0;
+ enterTry.exceptionVarName = identifier(t->exceptionVarName);
+ enterTry.exceptionVar = getParam(t->exceptionVar);
+ ptrdiff_t enterTryLoc = addInstruction(enterTry);
+
+ ptrdiff_t tryLoc = enterTryLoc + (((const char *)&enterTry.tryOffset) - ((const char *)&enterTry));
+ _patches[t->tryBlock].append(tryLoc);
+
+ ptrdiff_t catchLoc = enterTryLoc + (((const char *)&enterTry.catchOffset) - ((const char *)&enterTry));
+ _patches[t->catchBlock].append(catchLoc);
+}
+
+void InstructionSelection::callBuiltinInvalid(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ Instruction::CallActivationProperty call;
+ call.name = identifier(*func->id);
+ prepareCallArgs(args, call.argc, call.args);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinTypeofMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinTypeofMember call;
+ call.base = getParam(base);
+ call.member = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinTypeofSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinTypeofSubscript call;
+ call.base = getParam(base);
+ call.index = getParam(index);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinTypeofName(const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinTypeofName call;
+ call.name = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinTypeofValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinTypeofValue call;
+ call.value = getParam(value);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDeleteMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinDeleteMember call;
+ call.base = getParam(base);
+ call.member = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDeleteSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinDeleteSubscript call;
+ call.base = getParam(base);
+ call.index = getParam(index);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDeleteName(const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinDeleteName call;
+ call.name = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDeleteValue(V4IR::Temp *result)
+{
+ Instruction::LoadValue load;
+ load.value = Instr::Param::createValue(VM::Value::fromBoolean(false));
+ load.result = getResultParam(result);
+ addInstruction(load);
+}
+
+void InstructionSelection::callBuiltinPostDecrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostDecMember call;
+ call.base = getParam(base);
+ call.member = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostDecrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostDecSubscript call;
+ call.base = getParam(base);
+ call.index = getParam(index);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostDecrementName(const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostDecName call;
+ call.name = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostDecrementValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostDecValue call;
+ call.value = getParam(value);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostIncrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostIncMember call;
+ call.base = getParam(base);
+ call.member = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostIncrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostIncSubscript call;
+ call.base = getParam(base);
+ call.index = getParam(index);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostIncrementName(const QString &name, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostIncName call;
+ call.name = identifier(name);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPostIncrementValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinPostIncValue call;
+ call.value = getParam(value);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinThrow(V4IR::Temp *arg)
+{
+ Instruction::CallBuiltinThrow call;
+ call.arg = getParam(arg);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinFinishTry()
+{
+ Instruction::CallBuiltinFinishTry call;
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinForeachIteratorObject(V4IR::Temp *arg, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinForeachIteratorObject call;
+ call.arg = getParam(arg);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinForeachNextPropertyname(V4IR::Temp *arg, V4IR::Temp *result)
+{
+ Instruction::CallBuiltinForeachNextPropertyName call;
+ call.arg = getParam(arg);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPushWithScope(V4IR::Temp *arg)
+{
+ Instruction::CallBuiltinPushScope call;
+ call.arg = getParam(arg);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinPopScope()
+{
+ Instruction::CallBuiltinPopScope call;
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name)
+{
+ Instruction::CallBuiltinDeclareVar call;
+ call.isDeletable = deletable;
+ call.varName = identifier(name);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDefineGetterSetter(V4IR::Temp *object, const QString &name, V4IR::Temp *getter, V4IR::Temp *setter)
+{
+ Instruction::CallBuiltinDefineGetterSetter call;
+ call.object = getParam(object);
+ call.name = identifier(name);
+ call.getter = getParam(getter);
+ call.setter = getParam(setter);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDefineProperty(V4IR::Temp *object, const QString &name, V4IR::Temp *value)
+{
+ Instruction::CallBuiltinDefineProperty call;
+ call.object = getParam(object);
+ call.name = identifier(name);
+ call.value = getParam(value);
+ addInstruction(call);
+}
+
+void InstructionSelection::callBuiltinDefineArray(V4IR::Temp *result, V4IR::ExprList *args)
+{
+ Instruction::CallBuiltinDefineArray call;
+ prepareCallArgs(args, call.argc, call.args);
+ call.result = getResultParam(result);
+ addInstruction(call);
+}
+
+ptrdiff_t InstructionSelection::addInstructionHelper(Instr::Type type, Instr &instr)
+{
+#ifdef MOTH_THREADED_INTERPRETER
+ instr.common.code = VME::instructionJumpTable()[static_cast<int>(type)];
+#else
+ instr.common.instructionType = type;
+#endif
+
+ int instructionSize = Instr::size(type);
+ if (_codeEnd - _codeNext < instructionSize) {
+ int currSize = _codeEnd - _codeStart;
+ uchar *newCode = new uchar[currSize * 2];
+ ::memset(newCode + currSize, 0, currSize);
+ ::memcpy(newCode, _codeStart, currSize);
+ _codeNext = _codeNext - _codeStart + newCode;
+ delete[] _codeStart;
+ _codeStart = newCode;
+ _codeEnd = _codeStart + currSize * 2;
+ }
+
+ ::memcpy(_codeNext, reinterpret_cast<const char *>(&instr), instructionSize);
+ ptrdiff_t ptrOffset = _codeNext - _codeStart;
+ _codeNext += instructionSize;
+
+ return ptrOffset;
+}
+
+void InstructionSelection::patchJumpAddresses()
+{
+ typedef QHash<V4IR::BasicBlock *, QVector<ptrdiff_t> >::ConstIterator PatchIt;
+ for (PatchIt i = _patches.begin(), ei = _patches.end(); i != ei; ++i) {
+ Q_ASSERT(_addrs.contains(i.key()));
+ ptrdiff_t target = _addrs.value(i.key());
+
+ const QVector<ptrdiff_t> &patchList = i.value();
+ for (int ii = 0, eii = patchList.count(); ii < eii; ++ii) {
+ ptrdiff_t patch = patchList.at(ii);
+
+ *((ptrdiff_t *)(_codeStart + patch)) = target - patch;
+ }
+ }
+
+ _patches.clear();
+ _addrs.clear();
+}
+
+uchar *InstructionSelection::squeezeCode() const
+{
+ int codeSize = _codeNext - _codeStart;
+ uchar *squeezed = new uchar[codeSize];
+ ::memcpy(squeezed, _codeStart, codeSize);
+ return squeezed;
+}
+
+VM::String *InstructionSelection::identifier(const QString &s)
+{
+ VM::String *str = engine()->newIdentifier(s);
+ _vmFunction->identifiers.append(str);
+ return str;
+}
diff --git a/src/qml/qml/v4vm/moth/qv4isel_moth_p.h b/src/qml/qml/v4vm/moth/qv4isel_moth_p.h
new file mode 100644
index 0000000000..5e057139e2
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4isel_moth_p.h
@@ -0,0 +1,169 @@
+#ifndef QV4ISEL_MOTH_P_H
+#define QV4ISEL_MOTH_P_H
+
+#include "qv4global.h"
+#include "qv4isel_p.h"
+#include "qv4jsir_p.h"
+#include "qv4object.h"
+#include "qv4instr_moth_p.h"
+
+namespace QQmlJS {
+namespace Moth {
+
+class Q_V4_EXPORT InstructionSelection:
+ public V4IR::InstructionSelection,
+ public EvalInstructionSelection
+{
+public:
+ InstructionSelection(VM::ExecutionEngine *engine, V4IR::Module *module);
+ ~InstructionSelection();
+
+ virtual void run(VM::Function *vmFunction, V4IR::Function *function);
+
+protected:
+ virtual void visitJump(V4IR::Jump *);
+ virtual void visitCJump(V4IR::CJump *);
+ virtual void visitRet(V4IR::Ret *);
+ virtual void visitTry(V4IR::Try *);
+
+ virtual void callBuiltinInvalid(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callBuiltinTypeofMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinTypeofSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinTypeofName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinTypeofValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinDeleteMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinDeleteSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinDeleteName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinDeleteValue(V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinThrow(V4IR::Temp *arg);
+ virtual void callBuiltinFinishTry();
+ virtual void callBuiltinForeachIteratorObject(V4IR::Temp *arg, V4IR::Temp *result);
+ virtual void callBuiltinForeachNextPropertyname(V4IR::Temp *arg, V4IR::Temp *result);
+ virtual void callBuiltinPushWithScope(V4IR::Temp *arg);
+ virtual void callBuiltinPopScope();
+ virtual void callBuiltinDeclareVar(bool deletable, const QString &name);
+ virtual void callBuiltinDefineGetterSetter(V4IR::Temp *object, const QString &name, V4IR::Temp *getter, V4IR::Temp *setter);
+ virtual void callBuiltinDefineProperty(V4IR::Temp *object, const QString &name, V4IR::Temp *value);
+ virtual void callBuiltinDefineArray(V4IR::Temp *result, V4IR::ExprList *args);
+ virtual void callValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void constructActivationProperty(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void constructProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void constructValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void loadThisObject(V4IR::Temp *temp);
+ virtual void loadConst(V4IR::Const *sourceConst, V4IR::Temp *targetTemp);
+ virtual void loadString(const QString &str, V4IR::Temp *targetTemp);
+ virtual void loadRegexp(V4IR::RegExp *sourceRegexp, V4IR::Temp *targetTemp);
+ virtual void getActivationProperty(const V4IR::Name *name, V4IR::Temp *temp);
+ virtual void setActivationProperty(V4IR::Temp *source, const QString &targetName);
+ virtual void initClosure(V4IR::Closure *closure, V4IR::Temp *target);
+ virtual void getProperty(V4IR::Temp *base, const QString &name, V4IR::Temp *target);
+ virtual void setProperty(V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName);
+ virtual void getElement(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *target);
+ virtual void setElement(V4IR::Temp *source, V4IR::Temp *targetBase, V4IR::Temp *targetIndex);
+ virtual void copyValue(V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp);
+ virtual void unop(V4IR::AluOp oper, V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp);
+ virtual void binop(V4IR::AluOp oper, V4IR::Temp *leftSource, V4IR::Temp *rightSource, V4IR::Temp *target);
+ virtual void inplaceNameOp(V4IR::AluOp oper, V4IR::Temp *rightSource, const QString &targetName);
+ virtual void inplaceElementOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBaseTemp, V4IR::Temp *targetIndexTemp);
+ virtual void inplaceMemberOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName);
+
+private:
+ struct Instruction {
+#define MOTH_INSTR_DATA_TYPEDEF(I, FMT) typedef InstrData<Instr::I> I;
+ FOR_EACH_MOTH_INSTR(MOTH_INSTR_DATA_TYPEDEF)
+#undef MOTH_INSTR_DATA_TYPEDEF
+ private:
+ Instruction();
+ };
+
+ Instr::Param getParam(V4IR::Expr *e)
+ {
+ typedef Instr::Param Param;
+ assert(e);
+
+ if (V4IR::Const *c = e->asConst()) {
+ return Param::createValue(convertToValue(c));
+ } else if (V4IR::Temp *t = e->asTemp()) {
+ const int index = t->index;
+ if (index < 0) {
+ return Param::createArgument(-index - 1, t->scope);
+ } else if (!t->scope) {
+ const int localCount = _function->locals.size();
+ if (index < localCount)
+ return Param::createLocal(index);
+ else
+ return Param::createTemp(index - localCount);
+ } else {
+ return Param::createScopedLocal(t->index, t->scope);
+ }
+ } else {
+ Q_UNIMPLEMENTED();
+ return Param();
+ }
+ }
+
+ Instr::Param getResultParam(V4IR::Temp *result)
+ {
+ if (result)
+ return getParam(result);
+ else
+ return Instr::Param::createTemp(scratchTempIndex());
+ }
+
+ void simpleMove(V4IR::Move *);
+ void prepareCallArgs(V4IR::ExprList *, quint32 &, quint32 &);
+
+ int outgoingArgumentTempStart() const { return _function->tempCount - _function->locals.size(); }
+ int scratchTempIndex() const { return outgoingArgumentTempStart() + _function->maxNumberOfArguments; }
+ int frameSize() const { return scratchTempIndex() + 1; }
+
+ template <int Instr>
+ inline ptrdiff_t addInstruction(const InstrData<Instr> &data);
+ ptrdiff_t addInstructionHelper(Instr::Type type, Instr &instr);
+ void patchJumpAddresses();
+ uchar *squeezeCode() const;
+
+ VM::String *identifier(const QString &s);
+
+ V4IR::Function *_function;
+ VM::Function *_vmFunction;
+ V4IR::BasicBlock *_block;
+
+ QHash<V4IR::BasicBlock *, QVector<ptrdiff_t> > _patches;
+ QHash<V4IR::BasicBlock *, ptrdiff_t> _addrs;
+
+ uchar *_codeStart;
+ uchar *_codeNext;
+ uchar *_codeEnd;
+};
+
+class Q_V4_EXPORT ISelFactory: public EvalISelFactory
+{
+public:
+ virtual ~ISelFactory() {}
+ virtual EvalInstructionSelection *create(VM::ExecutionEngine *engine, V4IR::Module *module)
+ { return new InstructionSelection(engine, module); }
+};
+
+template<int InstrT>
+ptrdiff_t InstructionSelection::addInstruction(const InstrData<InstrT> &data)
+{
+ Instr genericInstr;
+ InstrMeta<InstrT>::setData(genericInstr, data);
+ return addInstructionHelper(static_cast<Instr::Type>(InstrT), genericInstr);
+}
+
+} // namespace Moth
+} // namespace QQmlJS
+
+#endif // QV4ISEL_MOTH_P_H
diff --git a/src/qml/qml/v4vm/moth/qv4vme_moth.cpp b/src/qml/qml/v4vm/moth/qv4vme_moth.cpp
new file mode 100644
index 0000000000..a3c34bf340
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4vme_moth.cpp
@@ -0,0 +1,532 @@
+#include "qv4vme_moth_p.h"
+#include "qv4instr_moth_p.h"
+#include "qv4value.h"
+#include "debugging.h"
+
+#include <iostream>
+
+#include "qv4alloca_p.h"
+
+#ifdef DO_TRACE_INSTR
+# define TRACE_INSTR(I) fprintf(stderr, "executing a %s\n", #I);
+# define TRACE(n, str, ...) { char buf[4096]; snprintf(buf, 4096, str, __VA_ARGS__); fprintf(stderr, " %s : %s\n", #n, buf); }
+#else
+# define TRACE_INSTR(I)
+# define TRACE(n, str, ...)
+#endif // DO_TRACE_INSTR
+
+using namespace QQmlJS;
+using namespace QQmlJS::Moth;
+
+class FunctionState: public Debugging::FunctionState
+{
+public:
+ FunctionState(QQmlJS::VM::ExecutionContext *context, const uchar **code)
+ : Debugging::FunctionState(context)
+ , stack(0)
+ , stackSize(0)
+ , code(code)
+ {}
+
+ virtual VM::Value *temp(unsigned idx) { return stack + idx; }
+
+ void setStack(VM::Value *stack, unsigned stackSize)
+ { this->stack = stack; this->stackSize = stackSize; }
+
+private:
+ VM::Value *stack;
+ unsigned stackSize;
+ const uchar **code;
+};
+
+#define MOTH_BEGIN_INSTR_COMMON(I) { \
+ const InstrMeta<(int)Instr::I>::DataType &instr = InstrMeta<(int)Instr::I>::data(*genericInstr); \
+ code += InstrMeta<(int)Instr::I>::Size; \
+ Q_UNUSED(instr); \
+ TRACE_INSTR(I)
+
+#ifdef MOTH_THREADED_INTERPRETER
+
+# define MOTH_BEGIN_INSTR(I) op_##I: \
+ MOTH_BEGIN_INSTR_COMMON(I)
+
+# define MOTH_NEXT_INSTR(I) { \
+ genericInstr = reinterpret_cast<const Instr *>(code); \
+ goto *genericInstr->common.code; \
+ }
+
+# define MOTH_END_INSTR(I) } \
+ genericInstr = reinterpret_cast<const Instr *>(code); \
+ goto *genericInstr->common.code; \
+
+#else
+
+# define MOTH_BEGIN_INSTR(I) \
+ case Instr::I: \
+ MOTH_BEGIN_INSTR_COMMON(I)
+
+# define MOTH_NEXT_INSTR(I) { \
+ break; \
+ }
+
+# define MOTH_END_INSTR(I) } \
+ break;
+
+#endif
+
+#ifdef WITH_STATS
+namespace {
+struct VMStats {
+ quint64 paramIsValue;
+ quint64 paramIsArg;
+ quint64 paramIsLocal;
+ quint64 paramIsTemp;
+ quint64 paramIsScopedLocal;
+
+ VMStats()
+ : paramIsValue(0)
+ , paramIsArg(0)
+ , paramIsLocal(0)
+ , paramIsTemp(0)
+ , paramIsScopedLocal(0)
+ {}
+
+ ~VMStats()
+ { show(); }
+
+ void show() {
+ fprintf(stderr, "VM stats:\n");
+ fprintf(stderr, " value: %lu\n", paramIsValue);
+ fprintf(stderr, " arg: %lu\n", paramIsArg);
+ fprintf(stderr, " local: %lu\n", paramIsLocal);
+ fprintf(stderr, " temp: %lu\n", paramIsTemp);
+ fprintf(stderr, " scoped local: %lu\n", paramIsScopedLocal);
+ }
+};
+static VMStats vmStats;
+#define VMSTATS(what) ++vmStats.what
+}
+#else // !WITH_STATS
+#define VMSTATS(what) {}
+#endif // WITH_STATS
+
+static inline VM::Value *getValueRef(QQmlJS::VM::ExecutionContext *context,
+ VM::Value* stack,
+ const Instr::Param &param
+#if !defined(QT_NO_DEBUG)
+ , unsigned stackSize
+#endif
+ )
+{
+#ifdef DO_TRACE_INSTR
+ if (param.isValue()) {
+ fprintf(stderr, " value %s\n", param.value.toString(context)->toQString().toUtf8().constData());
+ } else if (param.isArgument()) {
+ fprintf(stderr, " argument %d@%d\n", param.index, param.scope);
+ } else if (param.isLocal()) {
+ fprintf(stderr, " local %d\n", param.index);
+ } else if (param.isTemp()) {
+ fprintf(stderr, " temp %d\n", param.index);
+ } else if (param.isScopedLocal()) {
+ fprintf(stderr, " temp %d@%d\n", param.index, param.scope);
+ } else {
+ Q_ASSERT(!"INVALID");
+ }
+#endif // DO_TRACE_INSTR
+
+ if (param.isValue()) {
+ VMSTATS(paramIsValue);
+ return const_cast<VM::Value *>(&param.value);
+ } else if (param.isArgument()) {
+ VMSTATS(paramIsArg);
+ VM::ExecutionContext *c = context;
+ uint scope = param.scope;
+ while (scope--)
+ c = c->outer;
+ VM::CallContext *cc = static_cast<VM::CallContext *>(c);
+ const unsigned arg = param.index;
+ Q_ASSERT(arg >= 0);
+ Q_ASSERT((unsigned) arg < cc->argumentCount);
+ Q_ASSERT(cc->arguments);
+ return cc->arguments + arg;
+ } else if (param.isLocal()) {
+ VMSTATS(paramIsLocal);
+ const unsigned index = param.index;
+ VM::CallContext *c = static_cast<VM::CallContext *>(context);
+ Q_ASSERT(index >= 0);
+ Q_ASSERT(index < context->variableCount());
+ Q_ASSERT(c->locals);
+ return c->locals + index;
+ } else if (param.isTemp()) {
+ VMSTATS(paramIsTemp);
+ Q_ASSERT(param.index < stackSize);
+ return stack + param.index;
+ } else if (param.isScopedLocal()) {
+ VMSTATS(paramIsScopedLocal);
+ VM::ExecutionContext *c = context;
+ uint scope = param.scope;
+ while (scope--)
+ c = c->outer;
+ const unsigned index = param.index;
+ VM::CallContext *cc = static_cast<VM::CallContext *>(c);
+ Q_ASSERT(index >= 0);
+ Q_ASSERT(index < cc->variableCount());
+ Q_ASSERT(cc->locals);
+ return cc->locals + index;
+ } else {
+ Q_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+#if defined(QT_NO_DEBUG)
+# define VALUE(param) *(VALUEPTR(param))
+
+// The non-temp case might need some tweaking for QML: there it would probably be a value instead of a local.
+# define VALUEPTR(param) \
+ param.isTemp() ? stack + param.index \
+ : (param.isLocal() ? static_cast<VM::CallContext *>(context)->locals + param.index \
+ : getValueRef(context, stack, param))
+#else
+# define VALUE(param) *getValueRef(context, stack, param, stackSize)
+# define VALUEPTR(param) getValueRef(context, stack, param, stackSize)
+#endif
+
+VM::Value VME::run(QQmlJS::VM::ExecutionContext *context, const uchar *&code,
+ VM::Value *stack, unsigned stackSize
+#ifdef MOTH_THREADED_INTERPRETER
+ , void ***storeJumpTable
+#endif
+ )
+{
+#ifdef DO_TRACE_INSTR
+ qDebug("Starting VME with context=%p and code=%p", context, code);
+#endif // DO_TRACE_INSTR
+
+#ifdef MOTH_THREADED_INTERPRETER
+ if (storeJumpTable) {
+#define MOTH_INSTR_ADDR(I, FMT) &&op_##I,
+ static void *jumpTable[] = {
+ FOR_EACH_MOTH_INSTR(MOTH_INSTR_ADDR)
+ };
+#undef MOTH_INSTR_ADDR
+ *storeJumpTable = jumpTable;
+ return VM::Value::undefinedValue();
+ }
+#endif
+
+ FunctionState state(context, &code);
+
+#ifdef MOTH_THREADED_INTERPRETER
+ const Instr *genericInstr = reinterpret_cast<const Instr *>(code);
+ goto *genericInstr->common.code;
+#else
+ for (;;) {
+ const Instr *genericInstr = reinterpret_cast<const Instr *>(code);
+ switch (genericInstr->common.instructionType) {
+#endif
+
+ MOTH_BEGIN_INSTR(MoveTemp)
+ VALUE(instr.result) = VALUE(instr.source);
+ MOTH_END_INSTR(MoveTemp)
+
+ MOTH_BEGIN_INSTR(LoadValue)
+// TRACE(value, "%s", instr.value.toString(context)->toQString().toUtf8().constData());
+ VALUE(instr.result) = VALUE(instr.value);
+ MOTH_END_INSTR(LoadValue)
+
+ MOTH_BEGIN_INSTR(LoadClosure)
+ __qmljs_init_closure(context, VALUEPTR(instr.result), instr.value);
+ MOTH_END_INSTR(LoadClosure)
+
+ MOTH_BEGIN_INSTR(LoadName)
+ TRACE(inline, "property name = %s", instr.name->toQString().toUtf8().constData());
+ __qmljs_get_activation_property(context, VALUEPTR(instr.result), instr.name);
+ MOTH_END_INSTR(LoadName)
+
+ MOTH_BEGIN_INSTR(StoreName)
+ TRACE(inline, "property name = %s", instr.name->toQString().toUtf8().constData());
+ __qmljs_set_activation_property(context, instr.name, VALUE(instr.source));
+ MOTH_END_INSTR(StoreName)
+
+ MOTH_BEGIN_INSTR(LoadElement)
+ __qmljs_get_element(context, VALUEPTR(instr.result), VALUE(instr.base), VALUE(instr.index));
+ MOTH_END_INSTR(LoadElement)
+
+ MOTH_BEGIN_INSTR(StoreElement)
+ __qmljs_set_element(context, VALUE(instr.base), VALUE(instr.index), VALUE(instr.source));
+ MOTH_END_INSTR(StoreElement)
+
+ MOTH_BEGIN_INSTR(LoadProperty)
+ __qmljs_get_property(context, VALUEPTR(instr.result), VALUE(instr.base), instr.name);
+ MOTH_END_INSTR(LoadProperty)
+
+ MOTH_BEGIN_INSTR(StoreProperty)
+ __qmljs_set_property(context, VALUE(instr.base), instr.name, VALUE(instr.source));
+ MOTH_END_INSTR(StoreProperty)
+
+ MOTH_BEGIN_INSTR(Push)
+ TRACE(inline, "stack size: %u", instr.value);
+ stackSize = instr.value;
+ stack = static_cast<VM::Value *>(alloca(stackSize * sizeof(VM::Value)));
+ state.setStack(stack, stackSize);
+ MOTH_END_INSTR(Push)
+
+ MOTH_BEGIN_INSTR(CallValue)
+#ifdef DO_TRACE_INSTR
+ if (Debugging::Debugger *debugger = context->engine->debugger) {
+ if (VM::FunctionObject *o = (VALUE(instr.dest)).asFunctionObject()) {
+ if (Debugging::FunctionDebugInfo *info = debugger->debugInfo(o)) {
+ QString n = debugger->name(o);
+ std::cerr << "*** Call to \"" << (n.isNull() ? "<no name>" : qPrintable(n)) << "\" defined @" << info->startLine << ":" << info->startColumn << std::endl;
+ }
+ }
+ }
+#endif // DO_TRACE_INSTR
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_call_value(context, VALUEPTR(instr.result), /*thisObject*/0, VALUE(instr.dest), args, instr.argc);
+ MOTH_END_INSTR(CallValue)
+
+ MOTH_BEGIN_INSTR(CallProperty)
+ TRACE(property name, "%s, args=%u, argc=%u, this=%s", qPrintable(instr.name->toQString()), instr.args, instr.argc, (VALUE(instr.base)).toString(context)->toQString().toUtf8().constData());
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_call_property(context, VALUEPTR(instr.result), VALUE(instr.base), instr.name, args, instr.argc);
+ MOTH_END_INSTR(CallProperty)
+
+ MOTH_BEGIN_INSTR(CallElement)
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_call_element(context, VALUEPTR(instr.result), VALUE(instr.base), VALUE(instr.index), args, instr.argc);
+ MOTH_END_INSTR(CallElement)
+
+ MOTH_BEGIN_INSTR(CallActivationProperty)
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_call_activation_property(context, VALUEPTR(instr.result), instr.name, args, instr.argc);
+ MOTH_END_INSTR(CallActivationProperty)
+
+ MOTH_BEGIN_INSTR(CallBuiltinThrow)
+ __qmljs_builtin_throw(context, VALUE(instr.arg));
+ MOTH_END_INSTR(CallBuiltinThrow)
+
+ MOTH_BEGIN_INSTR(EnterTry)
+ VALUE(instr.exceptionVar) = VM::Value::undefinedValue();
+ try {
+ const uchar *tryCode = ((uchar *)&instr.tryOffset) + instr.tryOffset;
+ run(context, tryCode, stack, stackSize);
+ code = tryCode;
+ } catch (VM::Exception &ex) {
+ ex.accept(context);
+ VALUE(instr.exceptionVar) = ex.value();
+ try {
+ VM::ExecutionContext *catchContext = __qmljs_builtin_push_catch_scope(instr.exceptionVarName, ex.value(), context);
+ const uchar *catchCode = ((uchar *)&instr.catchOffset) + instr.catchOffset;
+ run(catchContext, catchCode, stack, stackSize);
+ code = catchCode;
+ context = __qmljs_builtin_pop_scope(catchContext);
+ } catch (VM::Exception &ex) {
+ ex.accept(context);
+ VALUE(instr.exceptionVar) = ex.value();
+ const uchar *catchCode = ((uchar *)&instr.catchOffset) + instr.catchOffset;
+ run(context, catchCode, stack, stackSize);
+ code = catchCode;
+ }
+ }
+ MOTH_END_INSTR(EnterTry)
+
+ MOTH_BEGIN_INSTR(CallBuiltinFinishTry)
+ return VM::Value();
+ MOTH_END_INSTR(CallBuiltinFinishTry)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPushScope)
+ context = __qmljs_builtin_push_with_scope(VALUE(instr.arg), context);
+ MOTH_END_INSTR(CallBuiltinPushScope)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPopScope)
+ context = __qmljs_builtin_pop_scope(context);
+ MOTH_END_INSTR(CallBuiltinPopScope)
+
+ MOTH_BEGIN_INSTR(CallBuiltinForeachIteratorObject)
+ __qmljs_foreach_iterator_object(context, VALUEPTR(instr.result), VALUE(instr.arg));
+ MOTH_END_INSTR(CallBuiltinForeachIteratorObject)
+
+ MOTH_BEGIN_INSTR(CallBuiltinForeachNextPropertyName)
+ __qmljs_foreach_next_property_name(VALUEPTR(instr.result), VALUE(instr.arg));
+ MOTH_END_INSTR(CallBuiltinForeachNextPropertyName)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDeleteMember)
+ __qmljs_delete_member(context, VALUEPTR(instr.result), VALUE(instr.base), instr.member);
+ MOTH_END_INSTR(CallBuiltinDeleteMember)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDeleteSubscript)
+ __qmljs_delete_subscript(context, VALUEPTR(instr.result), VALUE(instr.base), VALUE(instr.index));
+ MOTH_END_INSTR(CallBuiltinDeleteSubscript)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDeleteName)
+ __qmljs_delete_name(context, VALUEPTR(instr.result), instr.name);
+ MOTH_END_INSTR(CallBuiltinDeleteName)
+
+ MOTH_BEGIN_INSTR(CallBuiltinTypeofMember)
+ __qmljs_builtin_typeof_member(context, VALUEPTR(instr.result), VALUE(instr.base), instr.member);
+ MOTH_END_INSTR(CallBuiltinTypeofMember)
+
+ MOTH_BEGIN_INSTR(CallBuiltinTypeofSubscript)
+ __qmljs_builtin_typeof_element(context, VALUEPTR(instr.result), VALUE(instr.base), VALUE(instr.index));
+ MOTH_END_INSTR(CallBuiltinTypeofSubscript)
+
+ MOTH_BEGIN_INSTR(CallBuiltinTypeofName)
+ __qmljs_builtin_typeof_name(context, VALUEPTR(instr.result), instr.name);
+ MOTH_END_INSTR(CallBuiltinTypeofName)
+
+ MOTH_BEGIN_INSTR(CallBuiltinTypeofValue)
+ __qmljs_builtin_typeof(context, VALUEPTR(instr.result), VALUE(instr.value));
+ MOTH_END_INSTR(CallBuiltinTypeofValue)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostIncMember)
+ __qmljs_builtin_post_increment_member(context, VALUEPTR(instr.result), VALUE(instr.base), instr.member);
+ MOTH_END_INSTR(CallBuiltinTypeofMember)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostIncSubscript)
+ __qmljs_builtin_post_increment_element(context, VALUEPTR(instr.result), VALUE(instr.base), VALUEPTR(instr.index));
+ MOTH_END_INSTR(CallBuiltinTypeofSubscript)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostIncName)
+ __qmljs_builtin_post_increment_name(context, VALUEPTR(instr.result), instr.name);
+ MOTH_END_INSTR(CallBuiltinTypeofName)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostIncValue)
+ __qmljs_builtin_post_increment(VALUEPTR(instr.result), VALUEPTR(instr.value));
+ MOTH_END_INSTR(CallBuiltinTypeofValue)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostDecMember)
+ __qmljs_builtin_post_decrement_member(context, VALUEPTR(instr.result), VALUE(instr.base), instr.member);
+ MOTH_END_INSTR(CallBuiltinTypeofMember)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostDecSubscript)
+ __qmljs_builtin_post_decrement_element(context, VALUEPTR(instr.result), VALUE(instr.base), VALUE(instr.index));
+ MOTH_END_INSTR(CallBuiltinTypeofSubscript)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostDecName)
+ __qmljs_builtin_post_decrement_name(context, VALUEPTR(instr.result), instr.name);
+ MOTH_END_INSTR(CallBuiltinTypeofName)
+
+ MOTH_BEGIN_INSTR(CallBuiltinPostDecValue)
+ __qmljs_builtin_post_decrement(VALUEPTR(instr.result), VALUEPTR(instr.value));
+ MOTH_END_INSTR(CallBuiltinTypeofValue)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDeclareVar)
+ __qmljs_builtin_declare_var(context, instr.isDeletable, instr.varName);
+ MOTH_END_INSTR(CallBuiltinDeclareVar)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDefineGetterSetter)
+ __qmljs_builtin_define_getter_setter(context, VALUE(instr.object), instr.name, VALUEPTR(instr.getter), VALUEPTR(instr.setter));
+ MOTH_END_INSTR(CallBuiltinDefineGetterSetter)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDefineProperty)
+ __qmljs_builtin_define_property(context, VALUE(instr.object), instr.name, VALUEPTR(instr.value));
+ MOTH_END_INSTR(CallBuiltinDefineProperty)
+
+ MOTH_BEGIN_INSTR(CallBuiltinDefineArray)
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_builtin_define_array(context, VALUEPTR(instr.result), args, instr.argc);
+ MOTH_END_INSTR(CallBuiltinDefineArray)
+
+ MOTH_BEGIN_INSTR(CreateValue)
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_construct_value(context, VALUEPTR(instr.result), VALUE(instr.func), args, instr.argc);
+ MOTH_END_INSTR(CreateValue)
+
+ MOTH_BEGIN_INSTR(CreateProperty)
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_construct_property(context, VALUEPTR(instr.result), VALUE(instr.base), instr.name, args, instr.argc);
+ MOTH_END_INSTR(CreateProperty)
+
+ MOTH_BEGIN_INSTR(CreateActivationProperty)
+ TRACE(inline, "property name = %s, args = %d, argc = %d", instr.name->toQString().toUtf8().constData(), instr.args, instr.argc);
+ Q_ASSERT(instr.args + instr.argc <= stackSize);
+ VM::Value *args = stack + instr.args;
+ __qmljs_construct_activation_property(context, VALUEPTR(instr.result), instr.name, args, instr.argc);
+ MOTH_END_INSTR(CreateActivationProperty)
+
+ MOTH_BEGIN_INSTR(Jump)
+ code = ((uchar *)&instr.offset) + instr.offset;
+ MOTH_END_INSTR(Jump)
+
+ MOTH_BEGIN_INSTR(CJump)
+ uint cond = __qmljs_to_boolean(VALUE(instr.condition));
+ TRACE(condition, "%s", cond ? "TRUE" : "FALSE");
+ if (cond)
+ code = ((uchar *)&instr.offset) + instr.offset;
+ MOTH_END_INSTR(CJump)
+
+ MOTH_BEGIN_INSTR(Unop)
+ instr.alu(VALUEPTR(instr.result), VALUE(instr.source));
+ MOTH_END_INSTR(Unop)
+
+ MOTH_BEGIN_INSTR(Binop)
+ instr.alu(context, VALUEPTR(instr.result), VALUE(instr.lhs), VALUE(instr.rhs));
+ MOTH_END_INSTR(Binop)
+
+ MOTH_BEGIN_INSTR(Ret)
+ VM::Value &result = VALUE(instr.result);
+// TRACE(Ret, "returning value %s", result.toString(context)->toQString().toUtf8().constData());
+ return result;
+ MOTH_END_INSTR(Ret)
+
+ MOTH_BEGIN_INSTR(LoadThis)
+ VALUE(instr.result) = context->thisObject;
+ MOTH_END_INSTR(LoadThis)
+
+ MOTH_BEGIN_INSTR(InplaceElementOp)
+ instr.alu(context,
+ VALUE(instr.base),
+ VALUE(instr.index),
+ VALUE(instr.source));
+ MOTH_END_INSTR(InplaceElementOp)
+
+ MOTH_BEGIN_INSTR(InplaceMemberOp)
+ instr.alu(context,
+ VALUE(instr.base),
+ instr.member,
+ VALUE(instr.source));
+ MOTH_END_INSTR(InplaceMemberOp)
+
+ MOTH_BEGIN_INSTR(InplaceNameOp)
+ TRACE(name, "%s", instr.name->toQString().toUtf8().constData());
+ instr.alu(context, instr.name, VALUE(instr.source));
+ MOTH_END_INSTR(InplaceNameOp)
+
+#ifdef MOTH_THREADED_INTERPRETER
+ // nothing to do
+#else
+ default:
+ qFatal("QQmlJS::Moth::VME: Internal error - unknown instruction %d", genericInstr->common.instructionType);
+ break;
+ }
+ }
+#endif
+
+}
+
+#ifdef MOTH_THREADED_INTERPRETER
+void **VME::instructionJumpTable()
+{
+ static void **jumpTable = 0;
+ if (!jumpTable) {
+ const uchar *code = 0;
+ VME().run(0, code, 0, 0, &jumpTable);
+ }
+ return jumpTable;
+}
+#endif
+
+VM::Value VME::exec(VM::ExecutionContext *ctxt, const uchar *code)
+{
+ VME vme;
+ return vme.run(ctxt, code);
+}
diff --git a/src/qml/qml/v4vm/moth/qv4vme_moth_p.h b/src/qml/qml/v4vm/moth/qv4vme_moth_p.h
new file mode 100644
index 0000000000..9eea23901a
--- /dev/null
+++ b/src/qml/qml/v4vm/moth/qv4vme_moth_p.h
@@ -0,0 +1,35 @@
+#ifndef QV4VME_MOTH_P_H
+#define QV4VME_MOTH_P_H
+
+#include "qv4runtime.h"
+#include "qv4instr_moth_p.h"
+
+namespace QQmlJS {
+namespace VM {
+ struct Value;
+}
+
+namespace Moth {
+
+class VME
+{
+public:
+ static VM::Value exec(VM::ExecutionContext *, const uchar *);
+
+#ifdef MOTH_THREADED_INTERPRETER
+ static void **instructionJumpTable();
+#endif
+
+private:
+ VM::Value run(QQmlJS::VM::ExecutionContext *, const uchar *&code,
+ VM::Value *stack = 0, unsigned stackSize = 0
+#ifdef MOTH_THREADED_INTERPRETER
+ , void ***storeJumpTable = 0
+#endif
+ );
+};
+
+} // namespace Moth
+} // namespace QQmlJS
+
+#endif // QV4VME_MOTH_P_H
diff --git a/src/qml/qml/v4vm/qcalculatehash_p.h b/src/qml/qml/v4vm/qcalculatehash_p.h
new file mode 100644
index 0000000000..36c5a6807a
--- /dev/null
+++ b/src/qml/qml/v4vm/qcalculatehash_p.h
@@ -0,0 +1,73 @@
+/****************************************************************************
+**
+** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the QtV8 module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef CALCULATEHASH_P_H
+#define CALCULATEHASH_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+#include <QtCore/qglobal.h>
+#include <QtCore/qstring.h>
+#include "qv4v8.h"
+QT_BEGIN_NAMESPACE
+
+inline uint32_t calculateHash(const quint8* chars, int length)
+{
+ return v8::String::ComputeHash((char *)chars, length);
+}
+
+inline uint32_t calculateHash(const quint16* chars, int length)
+{
+ return v8::String::ComputeHash((uint16_t *)chars, length);
+}
+
+QT_END_NAMESPACE
+
+#endif // CALCULATEHASH_P_H
diff --git a/src/qml/qml/v4vm/qv4_llvm_p.h b/src/qml/qml/v4vm/qv4_llvm_p.h
new file mode 100644
index 0000000000..189fe8586f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4_llvm_p.h
@@ -0,0 +1,65 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4_LLVM_P_H
+#define QV4_LLVM_P_H
+
+#include "qv4global.h"
+#include "qv4jsir_p.h"
+
+#include <QtCore/QString>
+
+namespace QQmlJS {
+
+// Note: keep this enum in sync with the command-line option!
+enum LLVMOutputType {
+ LLVMOutputJit = -1,
+ LLVMOutputIR = 0, // .ll
+ LLVMOutputBitcode = 1, // .bc
+ LLVMOutputAssembler = 2, // .s
+ LLVMOutputObject = 3 // .o
+};
+
+Q_V4_EXPORT int compileWithLLVM(IR::Module *module, const QString &fileName, LLVMOutputType outputType, int (*)(void *));
+
+} // QQmlJS
+
+#endif // QV4_LLVM_P_H
diff --git a/src/qml/qml/v4vm/qv4alloca_p.h b/src/qml/qml/v4vm/qv4alloca_p.h
new file mode 100644
index 0000000000..1e9388c48c
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4alloca_p.h
@@ -0,0 +1,54 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4_ALLOCA_H
+#define QV4_ALLOCA_H
+
+#include <qglobal.h>
+
+#if defined(Q_OS_WIN)
+#include <malloc.h>
+#define alloca _alloca
+#else
+#include <alloca.h>
+#endif
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4argumentsobject.cpp b/src/qml/qml/v4vm/qv4argumentsobject.cpp
new file mode 100644
index 0000000000..21c72be460
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4argumentsobject.cpp
@@ -0,0 +1,176 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <qv4argumentsobject.h>
+
+namespace QQmlJS {
+namespace VM {
+
+
+static Value throwTypeError(SimpleCallContext *ctx)
+{
+ ctx->throwTypeError();
+ return Value::undefinedValue();
+}
+
+DEFINE_MANAGED_VTABLE(ArgumentsObject);
+
+ArgumentsObject::ArgumentsObject(CallContext *context, int formalParameterCount, int actualParameterCount)
+ : Object(context->engine), context(context)
+{
+ vtbl = &static_vtbl;
+ type = Type_ArgumentsObject;
+
+ defineDefaultProperty(context->engine->id_length, Value::fromInt32(actualParameterCount));
+ if (context->strictMode) {
+ for (uint i = 0; i < context->argumentCount; ++i)
+ Object::put(context, QString::number(i), context->arguments[i]);
+ FunctionObject *thrower = context->engine->newBuiltinFunction(context, 0, throwTypeError);
+ Property pd = Property::fromAccessor(thrower, thrower);
+ __defineOwnProperty__(context, QStringLiteral("callee"), pd, Attr_Accessor|Attr_NotConfigurable|Attr_NotEnumerable);
+ __defineOwnProperty__(context, QStringLiteral("caller"), pd, Attr_Accessor|Attr_NotConfigurable|Attr_NotEnumerable);
+ } else {
+ uint numAccessors = qMin(formalParameterCount, actualParameterCount);
+ context->engine->requireArgumentsAccessors(numAccessors);
+ for (uint i = 0; i < (uint)numAccessors; ++i) {
+ mappedArguments.append(context->argument(i));
+ __defineOwnProperty__(context, i, context->engine->argumentsAccessors.at(i), Attr_Accessor);
+ }
+ for (uint i = numAccessors; i < qMin((uint)actualParameterCount, context->argumentCount); ++i) {
+ Property pd = Property::fromValue(context->argument(i));
+ __defineOwnProperty__(context, i, pd, Attr_Data);
+ }
+ defineDefaultProperty(context, QStringLiteral("callee"), Value::fromObject(context->function));
+ isNonStrictArgumentsObject = true;
+ }
+}
+
+void ArgumentsObject::destroy(Managed *that)
+{
+ static_cast<ArgumentsObject *>(that)->~ArgumentsObject();
+}
+
+bool ArgumentsObject::defineOwnProperty(ExecutionContext *ctx, uint index, const Property &desc, PropertyAttributes attrs)
+{
+ uint pidx = propertyIndexFromArrayIndex(index);
+ Property *pd = arrayData + pidx;
+ Property map;
+ PropertyAttributes mapAttrs;
+ bool isMapped = false;
+ if (pd && index < (uint)mappedArguments.size())
+ isMapped = arrayAttributes && arrayAttributes[pidx].isAccessor() && pd->getter() == context->engine->argumentsAccessors.at(index).getter();
+
+ if (isMapped) {
+ map = *pd;
+ mapAttrs = arrayAttributes[pidx];
+ arrayAttributes[pidx] = Attr_Data;
+ pd->value = mappedArguments.at(index);
+ }
+
+ isNonStrictArgumentsObject = false;
+ bool strict = ctx->strictMode;
+ ctx->strictMode = false;
+ bool result = Object::__defineOwnProperty__(ctx, index, desc, attrs);
+ ctx->strictMode = strict;
+ isNonStrictArgumentsObject = true;
+
+ if (isMapped && attrs.isData()) {
+ if (!attrs.isGeneric()) {
+ Value arg = desc.value;
+ map.setter()->call(ctx, Value::fromObject(this), &arg, 1);
+ }
+ if (attrs.isWritable()) {
+ *pd = map;
+ arrayAttributes[pidx] = mapAttrs;
+ }
+ }
+
+ if (ctx->strictMode && !result)
+ ctx->throwTypeError();
+ return result;
+}
+
+DEFINE_MANAGED_VTABLE(ArgumentsGetterFunction);
+
+Value ArgumentsGetterFunction::call(Managed *getter, ExecutionContext *ctx, const Value &thisObject, Value *, int)
+{
+ ArgumentsGetterFunction *g = static_cast<ArgumentsGetterFunction *>(getter);
+ Object *that = thisObject.asObject();
+ if (!that)
+ ctx->throwTypeError();
+ ArgumentsObject *o = that->asArgumentsObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ assert(g->index < o->context->argumentCount);
+ return o->context->argument(g->index);
+}
+
+DEFINE_MANAGED_VTABLE(ArgumentsSetterFunction);
+
+Value ArgumentsSetterFunction::call(Managed *setter, ExecutionContext *ctx, const Value &thisObject, Value *args, int argc)
+{
+ ArgumentsSetterFunction *s = static_cast<ArgumentsSetterFunction *>(setter);
+ Object *that = thisObject.asObject();
+ if (!that)
+ ctx->throwTypeError();
+ ArgumentsObject *o = that->asArgumentsObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ assert(s->index < o->context->argumentCount);
+ o->context->arguments[s->index] = argc ? args[0] : Value::undefinedValue();
+ return Value::undefinedValue();
+}
+
+void ArgumentsObject::markObjects(Managed *that)
+{
+ ArgumentsObject *o = static_cast<ArgumentsObject *>(that);
+ o->context->mark();
+ for (int i = 0; i < o->mappedArguments.size(); ++i) {
+ Managed *m = o->mappedArguments.at(i).asManaged();
+ if (m)
+ m->mark();
+ }
+ Object::markObjects(that);
+}
+
+}
+}
diff --git a/src/qml/qml/v4vm/qv4argumentsobject.h b/src/qml/qml/v4vm/qv4argumentsobject.h
new file mode 100644
index 0000000000..6727d8759b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4argumentsobject.h
@@ -0,0 +1,99 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ARGUMENTSOBJECTS_H
+#define QV4ARGUMENTSOBJECTS_H
+
+#include <qv4object.h>
+#include <qv4functionobject.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct ArgumentsGetterFunction: FunctionObject
+{
+ uint index;
+
+ ArgumentsGetterFunction(ExecutionContext *scope, uint index)
+ : FunctionObject(scope), index(index) { vtbl = &static_vtbl; }
+
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct ArgumentsSetterFunction: FunctionObject
+{
+ uint index;
+
+ ArgumentsSetterFunction(ExecutionContext *scope, uint index)
+ : FunctionObject(scope), index(index) { vtbl = &static_vtbl; }
+
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+
+struct ArgumentsObject: Object {
+ CallContext *context;
+ QVector<Value> mappedArguments;
+ ArgumentsObject(CallContext *context, int formalParameterCount, int actualParameterCount);
+ ~ArgumentsObject() {}
+
+ bool defineOwnProperty(ExecutionContext *ctx, uint index, const Property &desc, PropertyAttributes attrs);
+
+ static void markObjects(Managed *that);
+protected:
+ static const ManagedVTable static_vtbl;
+ static void destroy(Managed *);
+};
+
+}
+}
+
+QT_END_NAMESPACE
+
+#endif
+
diff --git a/src/qml/qml/v4vm/qv4arrayobject.cpp b/src/qml/qml/v4vm/qv4arrayobject.cpp
new file mode 100644
index 0000000000..90746d008c
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4arrayobject.cpp
@@ -0,0 +1,859 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4arrayobject.h"
+#include "qv4sparsearray.h"
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(ArrayCtor);
+
+ArrayCtor::ArrayCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value ArrayCtor::construct(Managed *, ExecutionContext *ctx, Value *argv, int argc)
+{
+ ArrayObject *a = ctx->engine->newArrayObject(ctx);
+ uint len;
+ if (argc == 1 && argv[0].isNumber()) {
+ bool ok;
+ len = argv[0].asArrayLength(&ok);
+
+ if (!ok)
+ ctx->throwRangeError(argv[0]);
+
+ if (len < 0x1000)
+ a->arrayReserve(len);
+ } else {
+ len = argc;
+ a->arrayReserve(len);
+ for (unsigned int i = 0; i < len; ++i)
+ a->arrayData[i].value = argv[i];
+ a->arrayDataLen = len;
+ }
+ a->setArrayLengthUnchecked(len);
+
+ return Value::fromObject(a);
+}
+
+Value ArrayCtor::call(Managed *that, ExecutionContext *ctx, const Value &thisObject, Value *argv, int argc)
+{
+ return construct(that, ctx, argv, argc);
+}
+
+void ArrayPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("isArray"), method_isArray, 1);
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleString"), method_toLocaleString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("concat"), method_concat, 1);
+ defineDefaultProperty(ctx, QStringLiteral("join"), method_join, 1);
+ defineDefaultProperty(ctx, QStringLiteral("pop"), method_pop, 0);
+ defineDefaultProperty(ctx, QStringLiteral("push"), method_push, 1);
+ defineDefaultProperty(ctx, QStringLiteral("reverse"), method_reverse, 0);
+ defineDefaultProperty(ctx, QStringLiteral("shift"), method_shift, 0);
+ defineDefaultProperty(ctx, QStringLiteral("slice"), method_slice, 2);
+ defineDefaultProperty(ctx, QStringLiteral("sort"), method_sort, 1);
+ defineDefaultProperty(ctx, QStringLiteral("splice"), method_splice, 2);
+ defineDefaultProperty(ctx, QStringLiteral("unshift"), method_unshift, 1);
+ defineDefaultProperty(ctx, QStringLiteral("indexOf"), method_indexOf, 1);
+ defineDefaultProperty(ctx, QStringLiteral("lastIndexOf"), method_lastIndexOf, 1);
+ defineDefaultProperty(ctx, QStringLiteral("every"), method_every, 1);
+ defineDefaultProperty(ctx, QStringLiteral("some"), method_some, 1);
+ defineDefaultProperty(ctx, QStringLiteral("forEach"), method_forEach, 1);
+ defineDefaultProperty(ctx, QStringLiteral("map"), method_map, 1);
+ defineDefaultProperty(ctx, QStringLiteral("filter"), method_filter, 1);
+ defineDefaultProperty(ctx, QStringLiteral("reduce"), method_reduce, 1);
+ defineDefaultProperty(ctx, QStringLiteral("reduceRight"), method_reduceRight, 1);
+}
+
+uint ArrayPrototype::getLength(ExecutionContext *ctx, Object *o)
+{
+ if (o->isArrayObject())
+ return o->arrayLength();
+ return o->get(ctx, ctx->engine->id_length).toUInt32();
+}
+
+Value ArrayPrototype::method_isArray(SimpleCallContext *ctx)
+{
+ Value arg = ctx->argument(0);
+ bool isArray = arg.asArrayObject();
+ return Value::fromBoolean(isArray);
+}
+
+Value ArrayPrototype::method_toString(SimpleCallContext *ctx)
+{
+ return method_join(ctx);
+}
+
+Value ArrayPrototype::method_toLocaleString(SimpleCallContext *ctx)
+{
+ return method_toString(ctx);
+}
+
+Value ArrayPrototype::method_concat(SimpleCallContext *ctx)
+{
+ ArrayObject *result = ctx->engine->newArrayObject(ctx);
+
+ if (ArrayObject *instance = ctx->thisObject.asArrayObject()) {
+ result->copyArrayData(instance);
+ } else {
+ QString v = ctx->thisObject.toString(ctx)->toQString();
+ result->arraySet(0, Value::fromString(ctx, v));
+ }
+
+ for (uint i = 0; i < ctx->argumentCount; ++i) {
+ Value arg = ctx->argument(i);
+
+ if (ArrayObject *elt = arg.asArrayObject())
+ result->arrayConcat(elt);
+
+ else
+ result->arraySet(getLength(ctx, result), arg);
+ }
+
+ return Value::fromObject(result);
+}
+
+Value ArrayPrototype::method_join(SimpleCallContext *ctx)
+{
+ Value arg = ctx->argument(0);
+
+ QString r4;
+ if (arg.isUndefined())
+ r4 = QStringLiteral(",");
+ else
+ r4 = arg.toString(ctx)->toQString();
+
+ Value self = ctx->thisObject;
+ const Value length = self.property(ctx, ctx->engine->id_length);
+ const quint32 r2 = Value::toUInt32(length.isUndefined() ? 0 : length.toNumber());
+
+ static QSet<Object *> visitedArrayElements;
+
+ if (! r2 || visitedArrayElements.contains(self.objectValue()))
+ return Value::fromString(ctx, QString());
+
+ // avoid infinite recursion
+ visitedArrayElements.insert(self.objectValue());
+
+ QString R;
+
+ // ### FIXME
+ if (ArrayObject *a = self.asArrayObject()) {
+ for (uint i = 0; i < a->arrayLength(); ++i) {
+ if (i)
+ R += r4;
+
+ Value e = a->getIndexed(ctx, i);
+ if (! (e.isUndefined() || e.isNull()))
+ R += e.toString(ctx)->toQString();
+ }
+ } else {
+ //
+ // crazy!
+ //
+ Value r6 = self.property(ctx, ctx->engine->newString(QStringLiteral("0")));
+ if (!(r6.isUndefined() || r6.isNull()))
+ R = r6.toString(ctx)->toQString();
+
+ for (quint32 k = 1; k < r2; ++k) {
+ R += r4;
+
+ String *name = Value::fromDouble(k).toString(ctx);
+ Value r12 = self.property(ctx, name);
+
+ if (! (r12.isUndefined() || r12.isNull()))
+ R += r12.toString(ctx)->toQString();
+ }
+ }
+
+ visitedArrayElements.remove(self.objectValue());
+ return Value::fromString(ctx, R);
+}
+
+Value ArrayPrototype::method_pop(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+
+ if (!len) {
+ if (!instance->isArrayObject())
+ instance->put(ctx, ctx->engine->id_length, Value::fromInt32(0));
+ return Value::undefinedValue();
+ }
+
+ Value result = instance->getIndexed(ctx, len - 1);
+
+ instance->deleteIndexedProperty(ctx, len - 1);
+ if (instance->isArrayObject())
+ instance->setArrayLengthUnchecked(len - 1);
+ else
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(len - 1));
+ return result;
+}
+
+Value ArrayPrototype::method_push(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+
+ if (len + ctx->argumentCount < len) {
+ // ughh...
+ double l = len;
+ for (double i = 0; i < ctx->argumentCount; ++i) {
+ Value idx = Value::fromDouble(l + i);
+ instance->put(ctx, idx.toString(ctx), ctx->argument(i));
+ }
+ double newLen = l + ctx->argumentCount;
+ if (!instance->isArrayObject())
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(newLen));
+ else
+ ctx->throwRangeError(Value::fromString(ctx, QStringLiteral("Array.prototype.push: Overflow")));
+ return Value::fromDouble(newLen);
+ }
+
+ bool protoHasArray = false;
+ Object *p = instance;
+ while ((p = p->prototype))
+ if (p->arrayDataLen)
+ protoHasArray = true;
+
+ if (!protoHasArray && instance->arrayDataLen <= len) {
+ for (uint i = 0; i < ctx->argumentCount; ++i) {
+ Value v = ctx->argument(i);
+
+ if (!instance->sparseArray) {
+ if (len >= instance->arrayAlloc)
+ instance->arrayReserve(len + 1);
+ instance->arrayData[len].value = v;
+ if (instance->arrayAttributes)
+ instance->arrayAttributes[len] = Attr_Data;
+ instance->arrayDataLen = len + 1;
+ } else {
+ uint i = instance->allocArrayValue(v);
+ instance->sparseArray->push_back(i, len);
+ }
+ ++len;
+ }
+ } else {
+ for (uint i = 0; i < ctx->argumentCount; ++i)
+ instance->putIndexed(ctx, len + i, ctx->argument(i));
+ len += ctx->argumentCount;
+ }
+ if (instance->isArrayObject())
+ instance->setArrayLengthUnchecked(len);
+ else
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(len));
+
+ if (len < INT_MAX)
+ return Value::fromInt32(len);
+ return Value::fromDouble((double)len);
+
+}
+
+Value ArrayPrototype::method_reverse(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint length = getLength(ctx, instance);
+
+ int lo = 0, hi = length - 1;
+
+ for (; lo < hi; ++lo, --hi) {
+ bool loExists, hiExists;
+ Value lval = instance->getIndexed(ctx, lo, &loExists);
+ Value hval = instance->getIndexed(ctx, hi, &hiExists);
+ if (hiExists)
+ instance->putIndexed(ctx, lo, hval);
+ else
+ instance->deleteIndexedProperty(ctx, lo);
+ if (loExists)
+ instance->putIndexed(ctx, hi, lval);
+ else
+ instance->deleteIndexedProperty(ctx, hi);
+ }
+ return Value::fromObject(instance);
+}
+
+Value ArrayPrototype::method_shift(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+
+ if (!len) {
+ if (!instance->isArrayObject())
+ instance->put(ctx, ctx->engine->id_length, Value::fromInt32(0));
+ return Value::undefinedValue();
+ }
+
+ Property *front = 0;
+ uint pidx = instance->propertyIndexFromArrayIndex(0);
+ if (pidx < UINT_MAX && (!instance->arrayAttributes || !instance->arrayAttributes[0].isGeneric()))
+ front = instance->arrayData + pidx;
+
+ Value result = front ? instance->getValue(ctx, front, instance->arrayAttributes ? instance->arrayAttributes[pidx] : Attr_Data) : Value::undefinedValue();
+
+ bool protoHasArray = false;
+ Object *p = instance;
+ while ((p = p->prototype))
+ if (p->arrayDataLen)
+ protoHasArray = true;
+
+ if (!protoHasArray && instance->arrayDataLen <= len) {
+ if (!instance->sparseArray) {
+ if (instance->arrayDataLen) {
+ ++instance->arrayOffset;
+ ++instance->arrayData;
+ --instance->arrayDataLen;
+ --instance->arrayAlloc;
+ if (instance->arrayAttributes)
+ ++instance->arrayAttributes;
+ }
+ } else {
+ uint idx = instance->sparseArray->pop_front();
+ instance->freeArrayValue(idx);
+ }
+ } else {
+ // do it the slow way
+ for (uint k = 1; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (exists)
+ instance->putIndexed(ctx, k - 1, v);
+ else
+ instance->deleteIndexedProperty(ctx, k - 1);
+ }
+ instance->deleteIndexedProperty(ctx, len - 1);
+ }
+
+ if (instance->isArrayObject())
+ instance->setArrayLengthUnchecked(len - 1);
+ else
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(len - 1));
+ return result;
+}
+
+Value ArrayPrototype::method_slice(SimpleCallContext *ctx)
+{
+ Object *o = ctx->thisObject.toObject(ctx);
+
+ ArrayObject *result = ctx->engine->newArrayObject(ctx);
+ uint len = o->get(ctx, ctx->engine->id_length).toUInt32();
+ double s = ctx->argument(0).toInteger();
+ uint start;
+ if (s < 0)
+ start = (uint)qMax(len + s, 0.);
+ else if (s > len)
+ start = len;
+ else
+ start = (uint) s;
+ uint end = len;
+ if (!ctx->argument(1).isUndefined()) {
+ double e = ctx->argument(1).toInteger();
+ if (e < 0)
+ end = (uint)qMax(len + e, 0.);
+ else if (e > len)
+ end = len;
+ else
+ end = (uint) e;
+ }
+
+ uint n = 0;
+ for (uint i = start; i < end; ++i) {
+ bool exists;
+ Value v = o->getIndexed(ctx, i, &exists);
+ if (exists) {
+ result->arraySet(n, v);
+ }
+ ++n;
+ }
+ return Value::fromObject(result);
+}
+
+Value ArrayPrototype::method_sort(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ Value comparefn = ctx->argument(0);
+ instance->arraySort(ctx, instance, comparefn, len);
+ return ctx->thisObject;
+}
+
+Value ArrayPrototype::method_splice(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+
+ ArrayObject *newArray = ctx->engine->newArrayObject(ctx);
+
+ double rs = ctx->argument(0).toInteger();
+ uint start;
+ if (rs < 0)
+ start = (uint) qMax(0., len + rs);
+ else
+ start = (uint) qMin(rs, (double)len);
+
+ uint deleteCount = (uint)qMin(qMax(ctx->argument(1).toInteger(), 0.), (double)(len - start));
+
+ newArray->arrayReserve(deleteCount);
+ Property *pd = newArray->arrayData;
+ for (uint i = 0; i < deleteCount; ++i) {
+ pd->value = instance->getIndexed(ctx, start + i);
+ ++pd;
+ }
+ newArray->arrayDataLen = deleteCount;
+ newArray->setArrayLengthUnchecked(deleteCount);
+
+ uint itemCount = ctx->argumentCount < 2 ? 0 : ctx->argumentCount - 2;
+
+ if (itemCount < deleteCount) {
+ for (uint k = start; k < len - deleteCount; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k + deleteCount, &exists);
+ if (exists)
+ instance->arraySet(k + itemCount, v);
+ else
+ instance->deleteIndexedProperty(ctx, k + itemCount);
+ }
+ for (uint k = len; k > len - deleteCount + itemCount; --k)
+ instance->deleteIndexedProperty(ctx, k - 1);
+ } else if (itemCount > deleteCount) {
+ uint k = len - deleteCount;
+ while (k > start) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k + deleteCount - 1, &exists);
+ if (exists)
+ instance->arraySet(k + itemCount - 1, v);
+ else
+ instance->deleteIndexedProperty(ctx, k + itemCount - 1);
+ --k;
+ }
+ }
+
+ for (uint i = 0; i < itemCount; ++i)
+ instance->arraySet(start + i, ctx->argument(i + 2));
+
+ ctx->strictMode = true;
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(len - deleteCount + itemCount));
+
+ return Value::fromObject(newArray);
+}
+
+Value ArrayPrototype::method_unshift(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+
+ bool protoHasArray = false;
+ Object *p = instance;
+ while ((p = p->prototype))
+ if (p->arrayDataLen)
+ protoHasArray = true;
+
+ if (!protoHasArray && instance->arrayDataLen <= len) {
+ for (int i = ctx->argumentCount - 1; i >= 0; --i) {
+ Value v = ctx->argument(i);
+
+ if (!instance->sparseArray) {
+ if (!instance->arrayOffset)
+ instance->getArrayHeadRoom();
+
+ --instance->arrayOffset;
+ --instance->arrayData;
+ ++instance->arrayDataLen;
+ if (instance->arrayAttributes) {
+ --instance->arrayAttributes;
+ *instance->arrayAttributes = Attr_Data;
+ }
+ instance->arrayData->value = v;
+ } else {
+ uint idx = instance->allocArrayValue(v);
+ instance->sparseArray->push_front(idx);
+ }
+ }
+ } else {
+ for (uint k = len; k > 0; --k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k - 1, &exists);
+ if (exists)
+ instance->putIndexed(ctx, k + ctx->argumentCount - 1, v);
+ else
+ instance->deleteIndexedProperty(ctx, k + ctx->argumentCount - 1);
+ }
+ for (uint i = 0; i < ctx->argumentCount; ++i)
+ instance->putIndexed(ctx, i, ctx->argument(i));
+ }
+
+ uint newLen = len + ctx->argumentCount;
+ if (instance->isArrayObject())
+ instance->setArrayLengthUnchecked(newLen);
+ else
+ instance->put(ctx, ctx->engine->id_length, Value::fromDouble(newLen));
+
+ if (newLen < INT_MAX)
+ return Value::fromInt32(newLen);
+ return Value::fromDouble((double)newLen);
+}
+
+Value ArrayPrototype::method_indexOf(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+ if (!len)
+ return Value::fromInt32(-1);
+
+ Value searchValue;
+ uint fromIndex = 0;
+
+ if (ctx->argumentCount >= 1)
+ searchValue = ctx->argument(0);
+ else
+ searchValue = Value::undefinedValue();
+
+ if (ctx->argumentCount >= 2) {
+ double f = ctx->argument(1).toInteger();
+ if (f >= len)
+ return Value::fromInt32(-1);
+ if (f < 0)
+ f = qMax(len + f, 0.);
+ fromIndex = (uint) f;
+ }
+
+ if (instance->isStringObject()) {
+ for (uint k = fromIndex; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (exists && __qmljs_strict_equal(v, searchValue))
+ return Value::fromDouble(k);
+ }
+ return Value::fromInt32(-1);
+ }
+
+ return instance->arrayIndexOf(searchValue, fromIndex, len, ctx, instance);
+}
+
+Value ArrayPrototype::method_lastIndexOf(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+ uint len = getLength(ctx, instance);
+ if (!len)
+ return Value::fromInt32(-1);
+
+ Value searchValue;
+ uint fromIndex = len;
+
+ if (ctx->argumentCount >= 1)
+ searchValue = ctx->argument(0);
+ else
+ searchValue = Value::undefinedValue();
+
+ if (ctx->argumentCount >= 2) {
+ double f = ctx->argument(1).toInteger();
+ if (f > 0)
+ f = qMin(f, (double)(len - 1));
+ else if (f < 0) {
+ f = len + f;
+ if (f < 0)
+ return Value::fromInt32(-1);
+ }
+ fromIndex = (uint) f + 1;
+ }
+
+ for (uint k = fromIndex; k > 0;) {
+ --k;
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (exists && __qmljs_strict_equal(v, searchValue))
+ return Value::fromDouble(k);
+ }
+ return Value::fromInt32(-1);
+}
+
+Value ArrayPrototype::method_every(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ Value thisArg = ctx->argument(1);
+
+ bool ok = true;
+ for (uint k = 0; ok && k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (!exists)
+ continue;
+
+ Value args[3];
+ args[0] = v;
+ args[1] = Value::fromDouble(k);
+ args[2] = ctx->thisObject;
+ Value r = callback->call(ctx, thisArg, args, 3);
+ ok = r.toBoolean();
+ }
+ return Value::fromBoolean(ok);
+}
+
+Value ArrayPrototype::method_some(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ Value thisArg = ctx->argument(1);
+
+ for (uint k = 0; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (!exists)
+ continue;
+
+ Value args[3];
+ args[0] = v;
+ args[1] = Value::fromDouble(k);
+ args[2] = ctx->thisObject;
+ Value r = callback->call(ctx, thisArg, args, 3);
+ if (r.toBoolean())
+ return Value::fromBoolean(true);
+ }
+ return Value::fromBoolean(false);
+}
+
+Value ArrayPrototype::method_forEach(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ Value thisArg = ctx->argument(1);
+
+ for (uint k = 0; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (!exists)
+ continue;
+
+ Value args[3];
+ args[0] = v;
+ args[1] = Value::fromDouble(k);
+ args[2] = ctx->thisObject;
+ callback->call(ctx, thisArg, args, 3);
+ }
+ return Value::undefinedValue();
+}
+
+Value ArrayPrototype::method_map(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ Value thisArg = ctx->argument(1);
+
+ ArrayObject *a = ctx->engine->newArrayObject(ctx);
+ a->arrayReserve(len);
+ a->setArrayLengthUnchecked(len);
+
+ for (uint k = 0; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (!exists)
+ continue;
+
+ Value args[3];
+ args[0] = v;
+ args[1] = Value::fromDouble(k);
+ args[2] = ctx->thisObject;
+ Value mapped = callback->call(ctx, thisArg, args, 3);
+ a->arraySet(k, mapped);
+ }
+ return Value::fromObject(a);
+}
+
+Value ArrayPrototype::method_filter(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ Value thisArg = ctx->argument(1);
+
+ ArrayObject *a = ctx->engine->newArrayObject(ctx);
+ a->arrayReserve(len);
+
+ uint to = 0;
+ for (uint k = 0; k < len; ++k) {
+ bool exists;
+ Value v = instance->getIndexed(ctx, k, &exists);
+ if (!exists)
+ continue;
+
+ Value args[3];
+ args[0] = v;
+ args[1] = Value::fromDouble(k);
+ args[2] = ctx->thisObject;
+ Value selected = callback->call(ctx, thisArg, args, 3);
+ if (selected.toBoolean()) {
+ a->arraySet(to, v);
+ ++to;
+ }
+ }
+ return Value::fromObject(a);
+}
+
+Value ArrayPrototype::method_reduce(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ uint k = 0;
+ Value acc;
+ if (ctx->argumentCount > 1) {
+ acc = ctx->argument(1);
+ } else {
+ bool kPresent = false;
+ while (k < len && !kPresent) {
+ Value v = instance->getIndexed(ctx, k, &kPresent);
+ if (kPresent)
+ acc = v;
+ ++k;
+ }
+ if (!kPresent)
+ ctx->throwTypeError();
+ }
+
+ while (k < len) {
+ bool kPresent;
+ Value v = instance->getIndexed(ctx, k, &kPresent);
+ if (kPresent) {
+ Value args[4];
+ args[0] = acc;
+ args[1] = v;
+ args[2] = Value::fromDouble(k);
+ args[3] = ctx->thisObject;
+ acc = callback->call(ctx, Value::undefinedValue(), args, 4);
+ }
+ ++k;
+ }
+ return acc;
+}
+
+Value ArrayPrototype::method_reduceRight(SimpleCallContext *ctx)
+{
+ Object *instance = ctx->thisObject.toObject(ctx);
+
+ uint len = getLength(ctx, instance);
+
+ FunctionObject *callback = ctx->argument(0).asFunctionObject();
+ if (!callback)
+ ctx->throwTypeError();
+
+ if (len == 0) {
+ if (ctx->argumentCount == 1)
+ ctx->throwTypeError();
+ return ctx->argument(1);
+ }
+
+ uint k = len;
+ Value acc;
+ if (ctx->argumentCount > 1) {
+ acc = ctx->argument(1);
+ } else {
+ bool kPresent = false;
+ while (k > 0 && !kPresent) {
+ Value v = instance->getIndexed(ctx, k - 1, &kPresent);
+ if (kPresent)
+ acc = v;
+ --k;
+ }
+ if (!kPresent)
+ ctx->throwTypeError();
+ }
+
+ while (k > 0) {
+ bool kPresent;
+ Value v = instance->getIndexed(ctx, k - 1, &kPresent);
+ if (kPresent) {
+ Value args[4];
+ args[0] = acc;
+ args[1] = v;
+ args[2] = Value::fromDouble(k - 1);
+ args[3] = ctx->thisObject;
+ acc = callback->call(ctx, Value::undefinedValue(), args, 4);
+ }
+ --k;
+ }
+ return acc;
+}
+
diff --git a/src/qml/qml/v4vm/qv4arrayobject.h b/src/qml/qml/v4vm/qv4arrayobject.h
new file mode 100644
index 0000000000..86d14eb587
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4arrayobject.h
@@ -0,0 +1,103 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ARRAYOBJECT_H
+#define QV4ARRAYOBJECT_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+
+struct ArrayCtor: FunctionObject
+{
+ ArrayCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct ArrayPrototype: ArrayObject
+{
+ ArrayPrototype(ExecutionContext *context) : ArrayObject(context) {}
+
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static uint getLength(ExecutionContext *ctx, Object *o);
+
+ static Value method_isArray(SimpleCallContext *ctx);
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_toLocaleString(SimpleCallContext *ctx);
+ static Value method_concat(SimpleCallContext *ctx);
+ static Value method_join(SimpleCallContext *ctx);
+ static Value method_pop(SimpleCallContext *ctx);
+ static Value method_push(SimpleCallContext *ctx);
+ static Value method_reverse(SimpleCallContext *ctx);
+ static Value method_shift(SimpleCallContext *ctx);
+ static Value method_slice(SimpleCallContext *ctx);
+ static Value method_sort(SimpleCallContext *ctx);
+ static Value method_splice(SimpleCallContext *ctx);
+ static Value method_unshift(SimpleCallContext *ctx);
+ static Value method_indexOf(SimpleCallContext *ctx);
+ static Value method_lastIndexOf(SimpleCallContext *ctx);
+ static Value method_every(SimpleCallContext *ctx);
+ static Value method_some(SimpleCallContext *ctx);
+ static Value method_forEach(SimpleCallContext *ctx);
+ static Value method_map(SimpleCallContext *ctx);
+ static Value method_filter(SimpleCallContext *ctx);
+ static Value method_reduce(SimpleCallContext *ctx);
+ static Value method_reduceRight(SimpleCallContext *ctx);
+};
+
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4booleanobject.cpp b/src/qml/qml/v4vm/qv4booleanobject.cpp
new file mode 100644
index 0000000000..e449e5c7db
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4booleanobject.cpp
@@ -0,0 +1,97 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4booleanobject.h"
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(BooleanCtor);
+
+BooleanCtor::BooleanCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value BooleanCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ bool n = argc ? args[0].toBoolean() : false;
+ return Value::fromObject(ctx->engine->newBooleanObject(Value::fromBoolean(n)));
+}
+
+Value BooleanCtor::call(Managed *, ExecutionContext *parentCtx, const Value &thisObject, Value *argv, int argc)
+{
+ bool value = argc ? argv[0].toBoolean() : 0;
+ return Value::fromBoolean(value);
+}
+
+void BooleanPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString);
+ defineDefaultProperty(ctx, QStringLiteral("valueOf"), method_valueOf);
+}
+
+Value BooleanPrototype::method_toString(SimpleCallContext *ctx)
+{
+ bool result;
+ if (ctx->thisObject.isBoolean()) {
+ result = ctx->thisObject.booleanValue();
+ } else {
+ BooleanObject *thisObject = ctx->thisObject.asBooleanObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+ result = thisObject->value.booleanValue();
+ }
+
+ return Value::fromString(ctx, QLatin1String(result ? "true" : "false"));
+}
+
+Value BooleanPrototype::method_valueOf(SimpleCallContext *ctx)
+{
+ BooleanObject *thisObject = ctx->thisObject.asBooleanObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ return thisObject->value;
+}
diff --git a/src/qml/qml/v4vm/qv4booleanobject.h b/src/qml/qml/v4vm/qv4booleanobject.h
new file mode 100644
index 0000000000..05b542590a
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4booleanobject.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4BOOLEANOBJECT_H
+#define QBOOLEANOBJECT_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct BooleanCtor: FunctionObject
+{
+ BooleanCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct BooleanPrototype: BooleanObject
+{
+ BooleanPrototype(ExecutionEngine *engine): BooleanObject(engine, Value::fromBoolean(false)) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_valueOf(SimpleCallContext *ctx);
+};
+
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4codegen.cpp b/src/qml/qml/v4vm/qv4codegen.cpp
new file mode 100644
index 0000000000..c91458282f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4codegen.cpp
@@ -0,0 +1,3256 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4codegen_p.h"
+#include "qv4util.h"
+#include "debugging.h"
+
+#include <QtCore/QCoreApplication>
+#include <QtCore/QStringList>
+#include <QtCore/QSet>
+#include <QtCore/QBuffer>
+#include <QtCore/QBitArray>
+#include <QtCore/QStack>
+#include <private/qqmljsast_p.h>
+#include <qv4runtime.h>
+#include <qv4context.h>
+#include <cmath>
+#include <iostream>
+#include <cassert>
+
+#ifdef CONST
+#undef CONST
+#endif
+
+using namespace QQmlJS;
+using namespace AST;
+
+namespace {
+QTextStream qout(stdout, QIODevice::WriteOnly);
+
+void dfs(V4IR::BasicBlock *block,
+ QSet<V4IR::BasicBlock *> *V,
+ QVector<V4IR::BasicBlock *> *blocks)
+{
+ if (! V->contains(block)) {
+ V->insert(block);
+
+ foreach (V4IR::BasicBlock *succ, block->out)
+ dfs(succ, V, blocks);
+
+ blocks->append(block);
+ }
+}
+
+struct ComputeUseDef: V4IR::StmtVisitor, V4IR::ExprVisitor
+{
+ V4IR::Function *_function;
+ V4IR::Stmt *_stmt;
+
+ ComputeUseDef(V4IR::Function *function)
+ : _function(function)
+ , _stmt(0) {}
+
+ void operator()(V4IR::Stmt *s) {
+ assert(! s->d);
+ s->d = new V4IR::Stmt::Data;
+ qSwap(_stmt, s);
+ _stmt->accept(this);
+ qSwap(_stmt, s);
+ }
+
+ virtual void visitConst(V4IR::Const *) {}
+ virtual void visitString(V4IR::String *) {}
+ virtual void visitRegExp(V4IR::RegExp *) {}
+ virtual void visitName(V4IR::Name *) {}
+ virtual void visitClosure(V4IR::Closure *) {}
+ virtual void visitUnop(V4IR::Unop *e) { e->expr->accept(this); }
+ virtual void visitBinop(V4IR::Binop *e) { e->left->accept(this); e->right->accept(this); }
+ virtual void visitSubscript(V4IR::Subscript *e) { e->base->accept(this); e->index->accept(this); }
+ virtual void visitMember(V4IR::Member *e) { e->base->accept(this); }
+ virtual void visitExp(V4IR::Exp *s) { s->expr->accept(this); }
+ virtual void visitEnter(V4IR::Enter *) {}
+ virtual void visitLeave(V4IR::Leave *) {}
+ virtual void visitJump(V4IR::Jump *) {}
+ virtual void visitCJump(V4IR::CJump *s) { s->cond->accept(this); }
+ virtual void visitRet(V4IR::Ret *s) { s->expr->accept(this); }
+ virtual void visitTry(V4IR::Try *t) {
+ if (! _stmt->d->defs.contains(t->exceptionVar->index))
+ _stmt->d->defs.append(t->exceptionVar->index);
+ }
+
+ virtual void visitTemp(V4IR::Temp *e) {
+ if (e->index < 0 || e->scope != 0)
+ return;
+
+ if (! _stmt->d->uses.contains(e->index))
+ _stmt->d->uses.append(e->index);
+ }
+
+ virtual void visitCall(V4IR::Call *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitNew(V4IR::New *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitMove(V4IR::Move *s) {
+ if (V4IR::Temp *t = s->target->asTemp()) {
+ if (t->index >= 0 && t->scope == 0) // only collect unscoped locals and temps
+ if (! _stmt->d->defs.contains(t->index))
+ _stmt->d->defs.append(t->index);
+ } else {
+ // source was not a temp, but maybe a sub-expression has a temp
+ // (e.g. base expressions for subscripts/member-access),
+ // so visit it.
+ s->target->accept(this);
+ }
+ // whatever the target expr was, always visit the source expr to collect
+ // temps there.
+ s->source->accept(this);
+ }
+};
+
+void liveness(V4IR::Function *function)
+{
+ QSet<V4IR::BasicBlock *> V;
+ QVector<V4IR::BasicBlock *> blocks;
+
+ ComputeUseDef computeUseDef(function);
+ foreach (V4IR::BasicBlock *block, function->basicBlocks) {
+ foreach (V4IR::Stmt *s, block->statements)
+ computeUseDef(s);
+ }
+
+ dfs(function->basicBlocks.at(0), &V, &blocks);
+
+ bool changed;
+ do {
+ changed = false;
+
+ foreach (V4IR::BasicBlock *block, blocks) {
+ const QBitArray previousLiveIn = block->liveIn;
+ const QBitArray previousLiveOut = block->liveOut;
+ QBitArray live(function->tempCount);
+ foreach (V4IR::BasicBlock *succ, block->out)
+ live |= succ->liveIn;
+ block->liveOut = live;
+ for (int i = block->statements.size() - 1; i != -1; --i) {
+ V4IR::Stmt *s = block->statements.at(i);
+ s->d->liveOut = live;
+ foreach (unsigned d, s->d->defs)
+ live.clearBit(d);
+ foreach (unsigned u, s->d->uses)
+ live.setBit(u);
+ s->d->liveIn = live;
+ }
+ block->liveIn = live;
+ if (! changed) {
+ if (previousLiveIn != block->liveIn || previousLiveOut != block->liveOut)
+ changed = true;
+ }
+ }
+ } while (changed);
+}
+
+static inline bool isDeadAssignment(V4IR::Stmt *stmt, int localCount)
+{
+ V4IR::Move *move = stmt->asMove();
+ if (!move || move->op != V4IR::OpInvalid)
+ return false;
+ V4IR::Temp *target = move->target->asTemp();
+ if (!target)
+ return false;
+ if (target->scope || target->index < localCount)
+ return false;
+
+ if (V4IR::Name *n = move->source->asName()) {
+ if (*n->id != QStringLiteral("this"))
+ return false;
+ } else if (!move->source->asConst() && !move->source->asTemp()) {
+ return false;
+ }
+
+ return !stmt->d->liveOut.at(target->index);
+}
+
+void removeDeadAssignments(V4IR::Function *function)
+{
+ const int localCount = function->locals.size();
+ foreach (V4IR::BasicBlock *bb, function->basicBlocks) {
+ QVector<V4IR::Stmt *> &statements = bb->statements;
+ for (int i = 0; i < statements.size(); ) {
+// qout<<"removeDeadAssignments: considering ";statements.at(i)->dump(qout);qout<<"\n";qout.flush();
+ if (isDeadAssignment(statements.at(i), localCount))
+ statements.remove(i);
+ else
+ ++i;
+ }
+ }
+}
+
+class ConstantPropagation: public V4IR::StmtVisitor, public V4IR::ExprVisitor
+{
+ struct Value {
+ enum Type {
+ InvalidType = 0,
+ UndefinedType,
+ NullType,
+ BoolType,
+ NumberType,
+ ThisType,
+ StringType
+ } type;
+
+ union {
+ double numberValue;
+ V4IR::String *stringValue;
+ };
+
+ Value()
+ : type(InvalidType), stringValue(0)
+ {}
+
+ explicit Value(V4IR::String *str)
+ : type(StringType), stringValue(str)
+ {}
+
+ explicit Value(Type t)
+ : type(t), stringValue(0)
+ {}
+
+ Value(Type t, double val)
+ : type(t), numberValue(val)
+ {}
+
+ bool isValid() const
+ { return type != InvalidType; }
+
+ bool operator<(const Value &other) const
+ {
+ if (type < other.type)
+ return true;
+ if (type == Value::NumberType && other.type == Value::NumberType) {
+ if (numberValue == 0 && other.numberValue == 0)
+ return isNegative(numberValue) && !isNegative(other.numberValue);
+ else
+ return numberValue < other.numberValue;
+ }
+ if (type == Value::BoolType && other.type == Value::BoolType)
+ return numberValue < other.numberValue;
+ if (type == Value::StringType && other.type == Value::StringType)
+ return *stringValue->value < *other.stringValue->value;
+ return false;
+ }
+
+ bool operator==(const Value &other) const
+ {
+ if (type != other.type)
+ return false;
+ if (type == Value::NumberType && other.type == Value::NumberType) {
+ if (numberValue == 0 && other.numberValue == 0)
+ return isNegative(numberValue) == isNegative(other.numberValue);
+ else
+ return numberValue == other.numberValue;
+ }
+ if (type == Value::BoolType && other.type == Value::BoolType)
+ return numberValue == other.numberValue;
+ if (type == Value::StringType && other.type == Value::StringType)
+ return *stringValue->value == *other.stringValue->value;
+ return false;
+ }
+ };
+
+public:
+ void run(V4IR::Function *function)
+ {
+ if (function->hasTry)
+ return;
+ localCount = function->locals.size();
+ if (function->hasWith) {
+ thisTemp = -1;
+ } else {
+ V4IR::BasicBlock *entryBlock = function->basicBlocks.at(0);
+ thisTemp = entryBlock->newTemp();
+ V4IR::Move *fetchThis = function->New<V4IR::Move>();
+ fetchThis->init(entryBlock->TEMP(thisTemp),
+ entryBlock->NAME(QStringLiteral("this"), 0, 0),
+ V4IR::OpInvalid);
+ entryBlock->statements.prepend(fetchThis);
+ }
+
+ foreach (V4IR::BasicBlock *block, function->basicBlocks) {
+// qDebug()<<"--- Starting with BB"<<block->index;
+ reset();
+ QVector<V4IR::Stmt *> &statements = block->statements;
+ foreach (V4IR::Stmt *stmt, statements) {
+// qout<<"*** ";stmt->dump(qout);qout<<"\n";qout.flush();
+ stmt->accept(this);
+ }
+ }
+ }
+
+protected:
+ virtual void visitConst(V4IR::Const *) {}
+ virtual void visitString(V4IR::String *) {}
+ virtual void visitRegExp(V4IR::RegExp *) {}
+ virtual void visitName(V4IR::Name *) {}
+ virtual void visitClosure(V4IR::Closure *) {}
+ virtual void visitUnop(V4IR::Unop *e) { e->expr->accept(this); }
+ virtual void visitBinop(V4IR::Binop *e) { e->left->accept(this); e->right->accept(this); }
+ virtual void visitSubscript(V4IR::Subscript *e) { e->base->accept(this); e->index->accept(this); }
+ virtual void visitMember(V4IR::Member *e) { e->base->accept(this); }
+ virtual void visitExp(V4IR::Exp *s) { s->expr->accept(this); }
+ virtual void visitEnter(V4IR::Enter *) {}
+ virtual void visitLeave(V4IR::Leave *) {}
+ virtual void visitJump(V4IR::Jump *) {}
+ virtual void visitCJump(V4IR::CJump *s) { s->cond->accept(this); }
+ virtual void visitRet(V4IR::Ret *s) { s->expr->accept(this); }
+ virtual void visitTry(V4IR::Try *) {}
+
+ virtual void visitCall(V4IR::Call *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitNew(V4IR::New *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitTemp(V4IR::Temp *e) {
+ if (e->scope)
+ return;
+
+ const int replacement = tempReplacement.value(e->index, -1);
+ if (replacement != -1) {
+// qDebug() << "+++ Replacing" << e->index << "with" << replacement;
+ e->index = replacement;
+ }
+ }
+
+ virtual void visitMove(V4IR::Move *s) {
+ V4IR::Temp *targetTemp = s->target->asTemp();
+ if (targetTemp && targetTemp->index >= localCount && !targetTemp->scope) {
+ if (s->op == V4IR::OpInvalid) {
+ if (V4IR::Name *n = s->source->asName()) {
+ if (thisTemp != -1) {
+ if (*n->id == QStringLiteral("this")) {
+ check(targetTemp->index, Value(Value::ThisType));
+ return;
+ }
+ }
+ } else if (V4IR::Const *c = s->source->asConst()) {
+ Value value;
+ switch (c->type) {
+ case V4IR::UndefinedType: value.type = Value::UndefinedType; break;
+ case V4IR::NullType: value.type = Value::NullType; break;
+ case V4IR::BoolType: value.type = Value::BoolType; value.numberValue = c->value == 0 ? 0 : 1; break;
+ case V4IR::NumberType: value.type = Value::NumberType; value.numberValue = c->value; break;
+ default: Q_ASSERT("unknown const type"); return;
+ }
+ check(targetTemp->index, value);
+ return;
+ } else if (V4IR::String *str = s->source->asString()) {
+ check(targetTemp->index, Value(str));
+ return;
+ }
+ }
+ invalidate(targetTemp->index, Value());
+ } else {
+ s->target->accept(this);
+ }
+
+ s->source->accept(this);
+ }
+
+ void invalidate(int &targetTempIndex, const Value &value)
+ {
+ QMap<int, Value>::iterator it = valueForTemp.find(targetTempIndex);
+ if (it != valueForTemp.end()) {
+ if (it.value() == value)
+ return;
+ tempForValue.remove(it.value());
+ valueForTemp.erase(it);
+ }
+
+ QMap<int, int>::iterator it2 = tempReplacement.find(targetTempIndex);
+ if (it2 != tempReplacement.end()) {
+ tempReplacement.erase(it2);
+ }
+ }
+
+ void check(int &targetTempIndex, const Value &value)
+ {
+ Q_ASSERT(value.isValid());
+
+ invalidate(targetTempIndex, value);
+
+ int replacementTemp = tempForValue.value(value, -1);
+ if (replacementTemp == -1) {
+// qDebug() << "+++ inserting temp" << targetTempIndex;
+ tempForValue.insert(value, targetTempIndex);
+ valueForTemp.insert(targetTempIndex, value);
+ } else {
+// qDebug() << "+++ temp" << targetTempIndex << "can be replaced with" << replacementTemp;
+ tempReplacement.insert(targetTempIndex, replacementTemp);
+ }
+ }
+
+ void reset()
+ {
+ tempForValue.clear();
+ tempReplacement.clear();
+ if (thisTemp != -1)
+ tempForValue.insert(Value(Value::ThisType), thisTemp);
+ }
+
+private:
+ QMap<Value, int> tempForValue;
+ QMap<int, Value> valueForTemp;
+ QMap<int, int> tempReplacement;
+
+ int localCount;
+ int thisTemp;
+};
+
+#undef DEBUG_TEMP_COMPRESSION
+#ifdef DEBUG_TEMP_COMPRESSION
+# define DBTC(x) x
+#else // !DEBUG_TEMP_COMPRESSION
+# define DBTC(x)
+#endif // DEBUG_TEMP_COMPRESSION
+class CompressTemps: public V4IR::StmtVisitor, V4IR::ExprVisitor
+{
+public:
+ void run(V4IR::Function *function)
+ {
+ _nextFree = 0;
+ _active.reserve(function->tempCount);
+ _localCount = function->locals.size();
+
+ DBTC(qDebug() << "starting on function" << (*function->name) << "with" << (function->tempCount - _localCount) << "temps.";)
+
+ QVector<int> pinned;
+ foreach (V4IR::BasicBlock *block, function->basicBlocks) {
+ if (V4IR::Stmt *last = block->terminator()) {
+ const QBitArray &liveOut = last->d->liveOut;
+ for (int i = _localCount, ei = liveOut.size(); i < ei; ++i) {
+ if (liveOut.at(i) && !pinned.contains(i)) {
+ pinned.append(i);
+ add(i - _localCount, _nextFree);
+ }
+ }
+ }
+ }
+ _pinnedCount = _nextFree;
+
+ int maxUsed = _nextFree;
+
+ foreach (V4IR::BasicBlock *block, function->basicBlocks) {
+ DBTC(qDebug("L%d:", block->index));
+
+ for (int i = 0, ei = block->statements.size(); i < ei; ++i ) {
+ _currentStatement = block->statements[i];
+ if (i == 0)
+ expireOld();
+
+ DBTC(_currentStatement->dump(qout);qout<<endl<<flush;)
+
+ if (_currentStatement->d)
+ _currentStatement->accept(this);
+ }
+ maxUsed = std::max(maxUsed, _nextFree);
+ }
+ DBTC(qDebug() << "function" << (*function->name) << "uses" << maxUsed << "temps.";)
+ function->tempCount = maxUsed + _localCount;
+ }
+
+protected:
+ virtual void visitConst(V4IR::Const *) {}
+ virtual void visitString(V4IR::String *) {}
+ virtual void visitRegExp(V4IR::RegExp *) {}
+ virtual void visitName(V4IR::Name *) {}
+ virtual void visitClosure(V4IR::Closure *) {}
+ virtual void visitUnop(V4IR::Unop *e) { e->expr->accept(this); }
+ virtual void visitBinop(V4IR::Binop *e) { e->left->accept(this); e->right->accept(this); }
+ virtual void visitSubscript(V4IR::Subscript *e) { e->base->accept(this); e->index->accept(this); }
+ virtual void visitMember(V4IR::Member *e) { e->base->accept(this); }
+ virtual void visitExp(V4IR::Exp *s) { s->expr->accept(this); }
+ virtual void visitEnter(V4IR::Enter *) {}
+ virtual void visitLeave(V4IR::Leave *) {}
+ virtual void visitJump(V4IR::Jump *) {}
+ virtual void visitCJump(V4IR::CJump *s) { s->cond->accept(this); }
+ virtual void visitRet(V4IR::Ret *s) { s->expr->accept(this); }
+ virtual void visitTry(V4IR::Try *t) { visitTemp(t->exceptionVar); }
+
+ virtual void visitTemp(V4IR::Temp *e) {
+ if (e->scope) // scoped local
+ return;
+ if (e->index < _localCount) // local or argument
+ return;
+
+ e->index = remap(e->index - _localCount) + _localCount;
+ }
+
+ virtual void visitCall(V4IR::Call *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitNew(V4IR::New *e) {
+ e->base->accept(this);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitMove(V4IR::Move *s) {
+ s->target->accept(this);
+ s->source->accept(this);
+ }
+
+private:
+ int remap(int tempIndex) {
+ for (ActiveTemps::const_iterator i = _active.begin(), ei = _active.end(); i < ei; ++i) {
+ if (i->first == tempIndex) {
+ DBTC(qDebug() << " lookup" << (tempIndex + _localCount) << "->" << (i->second + _localCount);)
+ return i->second;
+ }
+ }
+
+ int firstFree = expireOld();
+ add(tempIndex, firstFree);
+ return firstFree;
+ }
+
+ void add(int tempIndex, int firstFree) {
+ if (_nextFree <= firstFree)
+ _nextFree = firstFree + 1;
+ _active.prepend(qMakePair(tempIndex, firstFree));
+ DBTC(qDebug() << " add" << (tempIndex + _localCount) << "->" << (firstFree+ _localCount);)
+ }
+
+ int expireOld() {
+ Q_ASSERT(_currentStatement->d);
+
+ const QBitArray &liveIn = _currentStatement->d->liveIn;
+ QBitArray inUse(_nextFree);
+ int i = 0;
+ while (i < _active.size()) {
+ const QPair<int, int> &p = _active[i];
+
+ if (p.second < _pinnedCount) {
+ inUse.setBit(p.second);
+ ++i;
+ continue;
+ }
+
+ if (liveIn[p.first + _localCount]) {
+ inUse[p.second] = true;
+ ++i;
+ } else {
+ DBTC(qDebug() << " remove" << (p.first + _localCount) << "->" << (p.second + _localCount);)
+ _active.remove(i);
+ }
+ }
+ for (int i = 0, ei = inUse.size(); i < ei; ++i)
+ if (!inUse[i])
+ return i;
+ return _nextFree;
+ }
+
+private:
+ typedef QVector<QPair<int, int> > ActiveTemps;
+ ActiveTemps _active;
+ V4IR::Stmt *_currentStatement;
+ int _localCount;
+ int _nextFree;
+ int _pinnedCount;
+};
+#undef DBTC
+
+} // end of anonymous namespace
+
+class Codegen::ScanFunctions: Visitor
+{
+ typedef TemporaryAssignment<bool> TemporaryBoolAssignment;
+public:
+ ScanFunctions(Codegen *cg, const QString &sourceCode)
+ : _cg(cg)
+ , _sourceCode(sourceCode)
+ , _env(0)
+ , _inFuncBody(false)
+ , _allowFuncDecls(true)
+ {
+ }
+
+ void operator()(Node *node)
+ {
+ if (node)
+ node->accept(this);
+ }
+
+ inline void enterEnvironment(Node *node)
+ {
+ Environment *e = _cg->newEnvironment(node, _env);
+ if (!e->isStrict)
+ e->isStrict = _cg->_strictMode;
+ _envStack.append(e);
+ _env = e;
+ }
+
+ inline void leaveEnvironment()
+ {
+ _envStack.pop();
+ _env = _envStack.isEmpty() ? 0 : _envStack.top();
+ }
+
+protected:
+ using Visitor::visit;
+ using Visitor::endVisit;
+
+ void checkDirectivePrologue(SourceElements *ast)
+ {
+ for (SourceElements *it = ast; it; it = it->next) {
+ if (StatementSourceElement *stmt = cast<StatementSourceElement *>(it->element)) {
+ if (ExpressionStatement *expr = cast<ExpressionStatement *>(stmt->statement)) {
+ if (StringLiteral *strLit = cast<StringLiteral *>(expr->expression)) {
+ // Use the source code, because the StringLiteral's
+ // value might have escape sequences in it, which is not
+ // allowed.
+ if (strLit->literalToken.length < 2)
+ continue;
+ QStringRef str = _sourceCode.midRef(strLit->literalToken.offset + 1, strLit->literalToken.length - 2);
+ if (str == QStringLiteral("use strict")) {
+ _env->isStrict = true;
+ } else {
+ // TODO: give a warning.
+ }
+ continue;
+ }
+ }
+ }
+
+ break;
+ }
+ }
+
+ void checkName(const QStringRef &name, const SourceLocation &loc)
+ {
+ if (_env->isStrict) {
+ if (name == QLatin1String("implements")
+ || name == QLatin1String("interface")
+ || name == QLatin1String("let")
+ || name == QLatin1String("package")
+ || name == QLatin1String("private")
+ || name == QLatin1String("protected")
+ || name == QLatin1String("public")
+ || name == QLatin1String("static")
+ || name == QLatin1String("yield")) {
+ _cg->throwSyntaxError(loc, QCoreApplication::translate("qv4codegen", "Unexpected strict mode reserved word"));
+ }
+ }
+ }
+ void checkForArguments(AST::FormalParameterList *parameters)
+ {
+ while (parameters) {
+ if (parameters->name == QStringLiteral("arguments"))
+ _env->usesArgumentsObject = Environment::ArgumentsObjectNotUsed;
+ parameters = parameters->next;
+ }
+ }
+
+ virtual bool visit(Program *ast)
+ {
+ enterEnvironment(ast);
+ checkDirectivePrologue(ast->elements);
+ return true;
+ }
+
+ virtual void endVisit(Program *)
+ {
+ leaveEnvironment();
+ }
+
+ virtual bool visit(CallExpression *ast)
+ {
+ if (! _env->hasDirectEval) {
+ if (IdentifierExpression *id = cast<IdentifierExpression *>(ast->base)) {
+ if (id->name == QStringLiteral("eval")) {
+ if (_env->usesArgumentsObject == Environment::ArgumentsObjectUnknown)
+ _env->usesArgumentsObject = Environment::ArgumentsObjectUsed;
+ _env->hasDirectEval = true;
+ }
+ }
+ }
+ int argc = 0;
+ for (ArgumentList *it = ast->arguments; it; it = it->next)
+ ++argc;
+ _env->maxNumberOfArguments = qMax(_env->maxNumberOfArguments, argc);
+ return true;
+ }
+
+ virtual bool visit(NewMemberExpression *ast)
+ {
+ int argc = 0;
+ for (ArgumentList *it = ast->arguments; it; it = it->next)
+ ++argc;
+ _env->maxNumberOfArguments = qMax(_env->maxNumberOfArguments, argc);
+ return true;
+ }
+
+ virtual bool visit(ArrayLiteral *ast)
+ {
+ int index = 0;
+ for (ElementList *it = ast->elements; it; it = it->next) {
+ for (Elision *elision = it->elision; elision; elision = elision->next)
+ ++index;
+ ++index;
+ }
+ if (ast->elision) {
+ for (Elision *elision = ast->elision->next; elision; elision = elision->next)
+ ++index;
+ }
+ _env->maxNumberOfArguments = qMax(_env->maxNumberOfArguments, index);
+ return true;
+ }
+
+ virtual bool visit(VariableDeclaration *ast)
+ {
+ if (_env->isStrict && (ast->name == QLatin1String("eval") || ast->name == QLatin1String("arguments")))
+ _cg->throwSyntaxError(ast->identifierToken, QCoreApplication::translate("qv4codegen", "Variable name may not be eval or arguments in strict mode"));
+ checkName(ast->name, ast->identifierToken);
+ if (ast->name == QLatin1String("arguments"))
+ _env->usesArgumentsObject = Environment::ArgumentsObjectNotUsed;
+ _env->enter(ast->name.toString(), ast->expression ? Environment::VariableDefinition : Environment::VariableDeclaration);
+ return true;
+ }
+
+ virtual bool visit(IdentifierExpression *ast)
+ {
+ checkName(ast->name, ast->identifierToken);
+ if (_env->usesArgumentsObject == Environment::ArgumentsObjectUnknown && ast->name == QLatin1String("arguments"))
+ _env->usesArgumentsObject = Environment::ArgumentsObjectUsed;
+ return true;
+ }
+
+ virtual bool visit(ExpressionStatement *ast)
+ {
+ if (FunctionExpression* expr = AST::cast<AST::FunctionExpression*>(ast->expression)) {
+ if (!_allowFuncDecls)
+ _cg->throwSyntaxError(expr->functionToken, QCoreApplication::translate("qv4codegen", "conditional function or closure declaration"));
+
+ enterFunction(expr, /*enterName*/ true);
+ Node::accept(expr->formals, this);
+ Node::accept(expr->body, this);
+ leaveEnvironment();
+ return false;
+ } else {
+ SourceLocation firstToken = ast->firstSourceLocation();
+ if (_sourceCode.midRef(firstToken.offset, firstToken.length) == QStringLiteral("function")) {
+ _cg->throwSyntaxError(firstToken, QCoreApplication::translate("qv4codegen", "unexpected token"));
+ }
+ }
+ return true;
+ }
+
+ virtual bool visit(FunctionExpression *ast)
+ {
+ enterFunction(ast, /*enterName*/ false);
+ return true;
+ }
+
+ void enterFunction(FunctionExpression *ast, bool enterName, bool isExpression = true)
+ {
+ if (_env->isStrict && (ast->name == QLatin1String("eval") || ast->name == QLatin1String("arguments")))
+ _cg->throwSyntaxError(ast->identifierToken, QCoreApplication::translate("qv4codegen", "Function name may not be eval or arguments in strict mode"));
+ enterFunction(ast, ast->name.toString(), ast->formals, ast->body, enterName ? ast : 0, isExpression);
+ }
+
+ virtual void endVisit(FunctionExpression *)
+ {
+ leaveEnvironment();
+ }
+
+ virtual bool visit(ObjectLiteral *ast)
+ {
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, true);
+ Node::accept(ast->properties, this);
+ return false;
+ }
+
+ virtual bool visit(PropertyGetterSetter *ast)
+ {
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, true);
+ enterFunction(ast, QString(), ast->formals, ast->functionBody, /*FunctionExpression*/0, /*isExpression*/false);
+ return true;
+ }
+
+ virtual void endVisit(PropertyGetterSetter *)
+ {
+ leaveEnvironment();
+ }
+
+ virtual bool visit(FunctionDeclaration *ast)
+ {
+ enterFunction(ast, /*enterName*/ true, /*isExpression */false);
+ return true;
+ }
+
+ virtual void endVisit(FunctionDeclaration *)
+ {
+ leaveEnvironment();
+ }
+
+ virtual bool visit(FunctionBody *ast)
+ {
+ TemporaryBoolAssignment inFuncBody(_inFuncBody, true);
+ Node::accept(ast->elements, this);
+ return false;
+ }
+
+ virtual bool visit(WithStatement *ast)
+ {
+ if (_env->isStrict) {
+ _cg->throwSyntaxError(ast->withToken, QCoreApplication::translate("qv4codegen", "'with' statement is not allowed in strict mode"));
+ return false;
+ }
+
+ return true;
+ }
+
+ virtual bool visit(IfStatement *ast) {
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_inFuncBody);
+ Node::accept(ast->ok, this);
+ Node::accept(ast->ko, this);
+
+ return false;
+ }
+
+ virtual bool visit(WhileStatement *ast) {
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_inFuncBody);
+ Node::accept(ast->statement, this);
+
+ return false;
+ }
+
+ virtual bool visit(DoWhileStatement *ast) {
+ {
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_env->isStrict);
+ Node::accept(ast->statement, this);
+ }
+ Node::accept(ast->expression, this);
+ return false;
+ }
+
+ virtual bool visit(ForStatement *ast) {
+ Node::accept(ast->initialiser, this);
+ Node::accept(ast->condition, this);
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_env->isStrict);
+ Node::accept(ast->statement, this);
+
+ return false;
+ }
+
+ virtual bool visit(LocalForStatement *ast) {
+ Node::accept(ast->declarations, this);
+ Node::accept(ast->condition, this);
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_env->isStrict);
+ Node::accept(ast->statement, this);
+
+ return false;
+ }
+
+ virtual bool visit(ForEachStatement *ast) {
+ Node::accept(ast->initialiser, this);
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_env->isStrict);
+ Node::accept(ast->statement, this);
+
+ return false;
+ }
+
+ virtual bool visit(LocalForEachStatement *ast) {
+ Node::accept(ast->declaration, this);
+ Node::accept(ast->expression, this);
+
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, !_env->isStrict);
+ Node::accept(ast->statement, this);
+
+ return false;
+ }
+
+ virtual bool visit(Block *ast) {
+ TemporaryBoolAssignment allowFuncDecls(_allowFuncDecls, _env->isStrict ? false : _allowFuncDecls);
+ Node::accept(ast->statements, this);
+ return false;
+ }
+
+private:
+ void enterFunction(Node *ast, const QString &name, FormalParameterList *formals, FunctionBody *body, FunctionExpression *expr, bool isExpression)
+ {
+ bool wasStrict = false;
+ if (_env) {
+ _env->hasNestedFunctions = true;
+ _env->enter(name, Environment::FunctionDefinition, expr);
+ if (name == QLatin1String("arguments"))
+ _env->usesArgumentsObject = Environment::ArgumentsObjectNotUsed;
+ wasStrict = _env->isStrict;
+ }
+
+ enterEnvironment(ast);
+ checkForArguments(formals);
+
+ _env->isNamedFunctionExpression = isExpression && !name.isEmpty();
+ _env->formals = formals;
+
+ if (body)
+ checkDirectivePrologue(body->elements);
+
+ if (wasStrict || _env->isStrict) {
+ QStringList args;
+ for (FormalParameterList *it = formals; it; it = it->next) {
+ QString arg = it->name.toString();
+ if (args.contains(arg))
+ _cg->throwSyntaxError(it->identifierToken, QCoreApplication::translate("qv4codegen", "Duplicate parameter name '%1' is not allowed in strict mode").arg(arg));
+ if (arg == QLatin1String("eval") || arg == QLatin1String("arguments"))
+ _cg->throwSyntaxError(it->identifierToken, QCoreApplication::translate("qv4codegen", "'%1' cannot be used as parameter name in strict mode").arg(arg));
+ args += arg;
+ }
+ }
+ }
+
+private: // fields:
+ Codegen *_cg;
+ const QString _sourceCode;
+ Environment *_env;
+ QStack<Environment *> _envStack;
+
+ bool _inFuncBody;
+ bool _allowFuncDecls;
+};
+
+Codegen::Codegen(VM::ExecutionContext *context, bool strict)
+ : _module(0)
+ , _function(0)
+ , _block(0)
+ , _exitBlock(0)
+ , _throwBlock(0)
+ , _returnAddress(0)
+ , _mode(GlobalCode)
+ , _env(0)
+ , _loop(0)
+ , _labelledStatement(0)
+ , _scopeAndFinally(0)
+ , _context(context)
+ , _strictMode(strict)
+ , _debugger(context->engine->debugger)
+ , _errorHandler(0)
+{
+}
+
+Codegen::Codegen(ErrorHandler *errorHandler, bool strictMode)
+ : _module(0)
+ , _function(0)
+ , _block(0)
+ , _exitBlock(0)
+ , _throwBlock(0)
+ , _returnAddress(0)
+ , _mode(GlobalCode)
+ , _env(0)
+ , _loop(0)
+ , _labelledStatement(0)
+ , _scopeAndFinally(0)
+ , _context(0)
+ , _strictMode(strictMode)
+ , _debugger(0)
+ , _errorHandler(errorHandler)
+{
+}
+
+V4IR::Function *Codegen::operator()(const QString &fileName,
+ const QString &sourceCode,
+ Program *node,
+ V4IR::Module *module,
+ Mode mode,
+ const QStringList &inheritedLocals)
+{
+ assert(node);
+
+ _fileName = fileName;
+ _module = module;
+ _env = 0;
+
+ ScanFunctions scan(this, sourceCode);
+ scan(node);
+
+ V4IR::Function *globalCode = defineFunction(QStringLiteral("%entry"), node, 0,
+ node->elements, mode, inheritedLocals);
+ if (_debugger) {
+ if (node->elements->element) {
+ SourceLocation loc = node->elements->element->firstSourceLocation();
+ _debugger->setSourceLocation(globalCode, loc.startLine, loc.startColumn);
+ }
+ }
+
+ foreach (V4IR::Function *function, _module->functions) {
+ linearize(function);
+ }
+
+ qDeleteAll(_envMap);
+ _envMap.clear();
+
+ return globalCode;
+}
+
+V4IR::Function *Codegen::operator()(const QString &fileName,
+ const QString &sourceCode,
+ AST::FunctionExpression *ast,
+ V4IR::Module *module)
+{
+ _fileName = fileName;
+ _module = module;
+ _env = 0;
+
+ ScanFunctions scan(this, sourceCode);
+ // fake a global environment
+ scan.enterEnvironment(0);
+ scan(ast);
+ scan.leaveEnvironment();
+
+ V4IR::Function *function = defineFunction(ast->name.toString(), ast, ast->formals, ast->body ? ast->body->elements : 0);
+ if (_debugger)
+ _debugger->setSourceLocation(function, ast->functionToken.startLine, ast->functionToken.startColumn);
+
+ foreach (V4IR::Function *function, _module->functions) {
+ linearize(function);
+ }
+
+ qDeleteAll(_envMap);
+ _envMap.clear();
+
+ return function;
+}
+
+
+void Codegen::enterEnvironment(Node *node)
+{
+ _env = _envMap.value(node);
+ assert(_env);
+}
+
+void Codegen::leaveEnvironment()
+{
+ assert(_env);
+ _env = _env->parent;
+}
+
+void Codegen::enterLoop(Statement *node, V4IR::BasicBlock *breakBlock, V4IR::BasicBlock *continueBlock)
+{
+ _loop = new Loop(node, breakBlock, continueBlock, _loop);
+ _loop->labelledStatement = _labelledStatement; // consume the enclosing labelled statement
+ _loop->scopeAndFinally = _scopeAndFinally;
+ _labelledStatement = 0;
+}
+
+void Codegen::leaveLoop()
+{
+ Loop *current = _loop;
+ _loop = _loop->parent;
+ delete current;
+}
+
+V4IR::Expr *Codegen::member(V4IR::Expr *base, const QString *name)
+{
+ if (base->asTemp() /*|| base->asName()*/)
+ return _block->MEMBER(base->asTemp(), name);
+ else {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), base);
+ return _block->MEMBER(_block->TEMP(t), name);
+ }
+}
+
+V4IR::Expr *Codegen::subscript(V4IR::Expr *base, V4IR::Expr *index)
+{
+ if (! base->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), base);
+ base = _block->TEMP(t);
+ }
+
+ if (! index->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), index);
+ index = _block->TEMP(t);
+ }
+
+ assert(base->asTemp() && index->asTemp());
+ return _block->SUBSCRIPT(base->asTemp(), index->asTemp());
+}
+
+V4IR::Expr *Codegen::argument(V4IR::Expr *expr)
+{
+ if (expr && ! expr->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+ return expr;
+}
+
+// keeps references alive, converts other expressions to temps
+V4IR::Expr *Codegen::reference(V4IR::Expr *expr)
+{
+ if (expr && !expr->asTemp() && !expr->asName() && !expr->asMember() && !expr->asSubscript()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+ return expr;
+}
+
+V4IR::Expr *Codegen::unop(V4IR::AluOp op, V4IR::Expr *expr)
+{
+ if (V4IR::Const *c = expr->asConst()) {
+ if (c->type == V4IR::NumberType) {
+ switch (op) {
+ case V4IR::OpNot:
+ return _block->CONST(V4IR::BoolType, !c->value);
+ case V4IR::OpUMinus:
+ return _block->CONST(V4IR::NumberType, -c->value);
+ case V4IR::OpUPlus:
+ return expr;
+ case V4IR::OpCompl:
+ return _block->CONST(V4IR::NumberType, ~VM::Value::toInt32(c->value));
+ case V4IR::OpIncrement:
+ return _block->CONST(V4IR::NumberType, c->value + 1);
+ case V4IR::OpDecrement:
+ return _block->CONST(V4IR::NumberType, c->value - 1);
+ default:
+ break;
+ }
+ }
+ }
+ if (! expr->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+ assert(expr->asTemp());
+ return _block->UNOP(op, expr->asTemp());
+}
+
+V4IR::Expr *Codegen::binop(V4IR::AluOp op, V4IR::Expr *left, V4IR::Expr *right)
+{
+ if (V4IR::Const *c1 = left->asConst()) {
+ if (V4IR::Const *c2 = right->asConst()) {
+ if (c1->type == V4IR::NumberType && c2->type == V4IR::NumberType) {
+ switch (op) {
+ case V4IR::OpAdd: return _block->CONST(V4IR::NumberType, c1->value + c2->value);
+ case V4IR::OpAnd: return _block->CONST(V4IR::BoolType, c1->value ? c2->value : 0);
+ case V4IR::OpBitAnd: return _block->CONST(V4IR::NumberType, int(c1->value) & int(c2->value));
+ case V4IR::OpBitOr: return _block->CONST(V4IR::NumberType, int(c1->value) | int(c2->value));
+ case V4IR::OpBitXor: return _block->CONST(V4IR::NumberType, int(c1->value) ^ int(c2->value));
+ case V4IR::OpDiv: return _block->CONST(V4IR::NumberType, c1->value / c2->value);
+ case V4IR::OpEqual: return _block->CONST(V4IR::BoolType, c1->value == c2->value);
+ case V4IR::OpNotEqual: return _block->CONST(V4IR::BoolType, c1->value != c2->value);
+ case V4IR::OpStrictEqual: return _block->CONST(V4IR::BoolType, c1->value == c2->value);
+ case V4IR::OpStrictNotEqual: return _block->CONST(V4IR::BoolType, c1->value != c2->value);
+ case V4IR::OpGe: return _block->CONST(V4IR::BoolType, c1->value >= c2->value);
+ case V4IR::OpGt: return _block->CONST(V4IR::BoolType, c1->value > c2->value);
+ case V4IR::OpLe: return _block->CONST(V4IR::BoolType, c1->value <= c2->value);
+ case V4IR::OpLt: return _block->CONST(V4IR::BoolType, c1->value < c2->value);
+ case V4IR::OpLShift: return _block->CONST(V4IR::NumberType, VM::Value::toInt32(c1->value) << (VM::Value::toUInt32(c2->value) & 0x1f));
+ case V4IR::OpMod: return _block->CONST(V4IR::NumberType, ::fmod(c1->value, c2->value));
+ case V4IR::OpMul: return _block->CONST(V4IR::NumberType, c1->value * c2->value);
+ case V4IR::OpOr: return _block->CONST(V4IR::NumberType, c1->value ? c1->value : c2->value);
+ case V4IR::OpRShift: return _block->CONST(V4IR::NumberType, VM::Value::toInt32(c1->value) >> (VM::Value::toUInt32(c2->value) & 0x1f));
+ case V4IR::OpSub: return _block->CONST(V4IR::NumberType, c1->value - c2->value);
+ case V4IR::OpURShift: return _block->CONST(V4IR::NumberType,VM::Value::toUInt32(c1->value) >> (VM::Value::toUInt32(c2->value) & 0x1f));
+
+ case V4IR::OpInstanceof:
+ case V4IR::OpIn:
+ break;
+
+ case V4IR::OpIfTrue: // unary ops
+ case V4IR::OpNot:
+ case V4IR::OpUMinus:
+ case V4IR::OpUPlus:
+ case V4IR::OpCompl:
+ case V4IR::OpIncrement:
+ case V4IR::OpDecrement:
+ case V4IR::OpInvalid:
+ break;
+ }
+ }
+ }
+ } else if (op == V4IR::OpAdd) {
+ if (V4IR::String *s1 = left->asString()) {
+ if (V4IR::String *s2 = right->asString()) {
+ return _block->STRING(_function->newString(*s1->value + *s2->value));
+ }
+ }
+ }
+
+ if (!left->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), left);
+ left = _block->TEMP(t);
+ }
+
+ if (!right->asTemp()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), right);
+ right = _block->TEMP(t);
+ }
+
+ assert(left->asTemp());
+ assert(right->asTemp());
+
+ return _block->BINOP(op, left, right);
+}
+
+V4IR::Expr *Codegen::call(V4IR::Expr *base, V4IR::ExprList *args)
+{
+ base = reference(base);
+ return _block->CALL(base, args);
+}
+
+void Codegen::move(V4IR::Expr *target, V4IR::Expr *source, V4IR::AluOp op)
+{
+ assert(target->isLValue());
+
+ if (!source->asTemp() && !source->asConst() && (op != V4IR::OpInvalid || ! target->asTemp())) {
+ unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), source);
+ source = _block->TEMP(t);
+ }
+ if (source->asConst() && (!target->asTemp() || op != V4IR::OpInvalid)) {
+ unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), source);
+ source = _block->TEMP(t);
+ }
+
+ _block->MOVE(target, source, op);
+}
+
+void Codegen::cjump(V4IR::Expr *cond, V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse)
+{
+ if (! (cond->asTemp() || cond->asBinop())) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), cond);
+ cond = _block->TEMP(t);
+ }
+ _block->CJUMP(cond, iftrue, iffalse);
+}
+
+void Codegen::accept(Node *node)
+{
+ if (node)
+ node->accept(this);
+}
+
+void Codegen::statement(Statement *ast)
+{
+ accept(ast);
+}
+
+void Codegen::statement(ExpressionNode *ast)
+{
+ if (! ast) {
+ return;
+ } else {
+ Result r(nx);
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ if (r.format == ex) {
+ if (r->asCall()) {
+ _block->EXP(*r); // the nest nx representation for calls is EXP(CALL(c..))
+ } else if (r->asTemp()) {
+ // there is nothing to do
+ } else {
+ unsigned t = _block->newTemp();
+ move(_block->TEMP(t), *r);
+ }
+ }
+ }
+}
+
+void Codegen::condition(ExpressionNode *ast, V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse)
+{
+ if (ast) {
+ Result r(iftrue, iffalse);
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ if (r.format == ex) {
+ cjump(*r, r.iftrue, r.iffalse);
+ }
+ }
+}
+
+Codegen::Result Codegen::expression(ExpressionNode *ast)
+{
+ Result r;
+ if (ast) {
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ }
+ return r;
+}
+
+QString Codegen::propertyName(PropertyName *ast)
+{
+ QString p;
+ if (ast) {
+ qSwap(_property, p);
+ accept(ast);
+ qSwap(_property, p);
+ }
+ return p;
+}
+
+Codegen::Result Codegen::sourceElement(SourceElement *ast)
+{
+ Result r(nx);
+ if (ast) {
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ }
+ return r;
+}
+
+Codegen::UiMember Codegen::uiObjectMember(UiObjectMember *ast)
+{
+ UiMember m;
+ if (ast) {
+ qSwap(_uiMember, m);
+ accept(ast);
+ qSwap(_uiMember, m);
+ }
+ return m;
+}
+
+void Codegen::functionBody(FunctionBody *ast)
+{
+ if (ast)
+ sourceElements(ast->elements);
+}
+
+void Codegen::program(Program *ast)
+{
+ if (ast) {
+ sourceElements(ast->elements);
+ }
+}
+
+void Codegen::sourceElements(SourceElements *ast)
+{
+ for (SourceElements *it = ast; it; it = it->next) {
+ sourceElement(it->element);
+ }
+}
+
+void Codegen::variableDeclaration(VariableDeclaration *ast)
+{
+ V4IR::Expr *initializer = 0;
+ if (!ast->expression)
+ return;
+ Result expr = expression(ast->expression);
+ assert(expr.code);
+ initializer = *expr;
+
+ if (! _env->parent || _function->insideWithOrCatch) {
+ // it's global code.
+ move(_block->NAME(ast->name.toString(), ast->identifierToken.startLine, ast->identifierToken.startColumn), initializer);
+ } else {
+ const int index = _env->findMember(ast->name.toString());
+ assert(index != -1);
+ move(_block->TEMP(index), initializer);
+ }
+}
+
+void Codegen::variableDeclarationList(VariableDeclarationList *ast)
+{
+ for (VariableDeclarationList *it = ast; it; it = it->next) {
+ variableDeclaration(it->declaration);
+ }
+}
+
+
+bool Codegen::visit(ArgumentList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseBlock *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseClause *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseClauses *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Catch *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(DefaultClause *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(ElementList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Elision *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Finally *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(FormalParameterList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(FunctionBody *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Program *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(PropertyAssignmentList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(PropertyNameAndValue *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(PropertyGetterSetter *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(SourceElements *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(StatementList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiArrayMemberList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiImport *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiImportList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiObjectInitializer *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiObjectMemberList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiParameterList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiProgram *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiQualifiedId *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(VariableDeclaration *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(VariableDeclarationList *)
+{
+ assert(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Expression *ast)
+{
+ statement(ast->left);
+ accept(ast->right);
+ return false;
+}
+
+bool Codegen::visit(ArrayLiteral *ast)
+{
+ V4IR::ExprList *args = 0;
+ V4IR::ExprList *current = 0;
+ for (ElementList *it = ast->elements; it; it = it->next) {
+ for (Elision *elision = it->elision; elision; elision = elision->next) {
+ V4IR::ExprList *arg = _function->New<V4IR::ExprList>();
+ if (!current) {
+ args = arg;
+ } else {
+ current->next = arg;
+ }
+ current = arg;
+ current->expr = _block->CONST(V4IR::MissingType, 0);
+ }
+ Result expr = expression(it->expression);
+
+ V4IR::ExprList *arg = _function->New<V4IR::ExprList>();
+ if (!current) {
+ args = arg;
+ } else {
+ current->next = arg;
+ }
+ current = arg;
+
+ V4IR::Expr *exp = *expr;
+ if (exp->asTemp() || exp->asConst()) {
+ current->expr = exp;
+ } else {
+ unsigned value = _block->newTemp();
+ move(_block->TEMP(value), exp);
+ current->expr = _block->TEMP(value);
+ }
+ }
+ for (Elision *elision = ast->elision; elision; elision = elision->next) {
+ V4IR::ExprList *arg = _function->New<V4IR::ExprList>();
+ if (!current) {
+ args = arg;
+ } else {
+ current->next = arg;
+ }
+ current = arg;
+ current->expr = _block->CONST(V4IR::MissingType, 0);
+ }
+
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), _block->CALL(_block->NAME(V4IR::Name::builtin_define_array, 0, 0), args));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(ArrayMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ Result index = expression(ast->expression);
+ _expr.code = subscript(*base, *index);
+ return false;
+}
+
+static V4IR::AluOp baseOp(int op)
+{
+ switch ((QSOperator::Op) op) {
+ case QSOperator::InplaceAnd: return V4IR::OpBitAnd;
+ case QSOperator::InplaceSub: return V4IR::OpSub;
+ case QSOperator::InplaceDiv: return V4IR::OpDiv;
+ case QSOperator::InplaceAdd: return V4IR::OpAdd;
+ case QSOperator::InplaceLeftShift: return V4IR::OpLShift;
+ case QSOperator::InplaceMod: return V4IR::OpMod;
+ case QSOperator::InplaceMul: return V4IR::OpMul;
+ case QSOperator::InplaceOr: return V4IR::OpBitOr;
+ case QSOperator::InplaceRightShift: return V4IR::OpRShift;
+ case QSOperator::InplaceURightShift: return V4IR::OpURShift;
+ case QSOperator::InplaceXor: return V4IR::OpBitXor;
+ default: return V4IR::OpInvalid;
+ }
+}
+
+bool Codegen::visit(BinaryExpression *ast)
+{
+ if (ast->op == QSOperator::And) {
+ if (_expr.accept(cx)) {
+ V4IR::BasicBlock *iftrue = _function->newBasicBlock();
+ condition(ast->left, iftrue, _expr.iffalse);
+ _block = iftrue;
+ condition(ast->right, _expr.iftrue, _expr.iffalse);
+ } else {
+ V4IR::BasicBlock *iftrue = _function->newBasicBlock();
+ V4IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned r = _block->newTemp();
+
+ move(_block->TEMP(r), *expression(ast->left));
+ cjump(_block->TEMP(r), iftrue, endif);
+ _block = iftrue;
+ move(_block->TEMP(r), *expression(ast->right));
+ _block->JUMP(endif);
+
+ _expr.code = _block->TEMP(r);
+ _block = endif;
+ }
+ return false;
+ } else if (ast->op == QSOperator::Or) {
+ if (_expr.accept(cx)) {
+ V4IR::BasicBlock *iffalse = _function->newBasicBlock();
+ condition(ast->left, _expr.iftrue, iffalse);
+ _block = iffalse;
+ condition(ast->right, _expr.iftrue, _expr.iffalse);
+ } else {
+ V4IR::BasicBlock *iffalse = _function->newBasicBlock();
+ V4IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned r = _block->newTemp();
+ move(_block->TEMP(r), *expression(ast->left));
+ cjump(_block->TEMP(r), endif, iffalse);
+ _block = iffalse;
+ move(_block->TEMP(r), *expression(ast->right));
+ _block->JUMP(endif);
+
+ _block = endif;
+ _expr.code = _block->TEMP(r);
+ }
+ return false;
+ }
+
+ V4IR::Expr* left = *expression(ast->left);
+ throwSyntaxErrorOnEvalOrArgumentsInStrictMode(left, ast->left->lastSourceLocation());
+
+ switch (ast->op) {
+ case QSOperator::Or:
+ case QSOperator::And:
+ break;
+
+ case QSOperator::Assign: {
+ V4IR::Expr* right = *expression(ast->right);
+ if (! (left->asTemp() || left->asName() || left->asSubscript() || left->asMember()))
+ throwReferenceError(ast->operatorToken, QCoreApplication::translate("qv4codegen", "left-hand side of assignment operator is not an lvalue"));
+
+ if (_expr.accept(nx)) {
+ move(left, right);
+ } else {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), right);
+ move(left, _block->TEMP(t));
+ _expr.code = _block->TEMP(t);
+ }
+ break;
+ }
+
+ case QSOperator::InplaceAnd:
+ case QSOperator::InplaceSub:
+ case QSOperator::InplaceDiv:
+ case QSOperator::InplaceAdd:
+ case QSOperator::InplaceLeftShift:
+ case QSOperator::InplaceMod:
+ case QSOperator::InplaceMul:
+ case QSOperator::InplaceOr:
+ case QSOperator::InplaceRightShift:
+ case QSOperator::InplaceURightShift:
+ case QSOperator::InplaceXor: {
+ V4IR::Expr* right = *expression(ast->right);
+ if (!left->isLValue())
+ throwSyntaxError(ast->operatorToken, QCoreApplication::translate("qv4codegen", "left-hand side of inplace operator is not an lvalue"));
+
+ if (_expr.accept(nx)) {
+ move(left, right, baseOp(ast->op));
+ } else {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), right);
+ move(left, _block->TEMP(t), baseOp(ast->op));
+ _expr.code = left;
+ }
+ break;
+ }
+
+ case QSOperator::In:
+ case QSOperator::InstanceOf:
+ case QSOperator::Equal:
+ case QSOperator::NotEqual:
+ case QSOperator::Ge:
+ case QSOperator::Gt:
+ case QSOperator::Le:
+ case QSOperator::Lt:
+ case QSOperator::StrictEqual:
+ case QSOperator::StrictNotEqual: {
+ if (!left->asTemp() && !left->asConst()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), left);
+ left = _block->TEMP(t);
+ }
+
+ V4IR::Expr* right = *expression(ast->right);
+
+ if (_expr.accept(cx)) {
+ cjump(binop(V4IR::binaryOperator(ast->op), left, right), _expr.iftrue, _expr.iffalse);
+ } else {
+ V4IR::Expr *e = binop(V4IR::binaryOperator(ast->op), left, right);
+ if (e->asConst() || e->asString())
+ _expr.code = e;
+ else {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), e);
+ _expr.code = _block->TEMP(t);
+ }
+ }
+ break;
+ }
+
+ case QSOperator::Add:
+ case QSOperator::BitAnd:
+ case QSOperator::BitOr:
+ case QSOperator::BitXor:
+ case QSOperator::Div:
+ case QSOperator::LShift:
+ case QSOperator::Mod:
+ case QSOperator::Mul:
+ case QSOperator::RShift:
+ case QSOperator::Sub:
+ case QSOperator::URShift: {
+ if (!left->asTemp() && !left->asConst()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), left);
+ left = _block->TEMP(t);
+ }
+
+ V4IR::Expr* right = *expression(ast->right);
+
+ V4IR::Expr *e = binop(V4IR::binaryOperator(ast->op), left, right);
+ if (e->asConst() || e->asString())
+ _expr.code = e;
+ else {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), e);
+ _expr.code = _block->TEMP(t);
+ }
+ break;
+ }
+
+ } // switch
+
+ return false;
+}
+
+bool Codegen::visit(CallExpression *ast)
+{
+ Result base = expression(ast->base);
+ V4IR::ExprList *args = 0, **args_it = &args;
+ for (ArgumentList *it = ast->arguments; it; it = it->next) {
+ Result arg = expression(it->expression);
+ V4IR::Expr *actual = argument(*arg);
+ *args_it = _function->New<V4IR::ExprList>();
+ (*args_it)->init(actual);
+ args_it = &(*args_it)->next;
+ }
+ _expr.code = call(*base, args);
+ return false;
+}
+
+bool Codegen::visit(ConditionalExpression *ast)
+{
+ V4IR::BasicBlock *iftrue = _function->newBasicBlock();
+ V4IR::BasicBlock *iffalse = _function->newBasicBlock();
+ V4IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned t = _block->newTemp();
+
+ condition(ast->expression, iftrue, iffalse);
+
+ _block = iftrue;
+ move(_block->TEMP(t), *expression(ast->ok));
+ _block->JUMP(endif);
+
+ _block = iffalse;
+ move(_block->TEMP(t), *expression(ast->ko));
+ _block->JUMP(endif);
+
+ _block = endif;
+
+ _expr.code = _block->TEMP(t);
+
+ return false;
+}
+
+bool Codegen::visit(DeleteExpression *ast)
+{
+ V4IR::Expr* expr = *expression(ast->expression);
+ // Temporaries cannot be deleted
+ if (expr->asTemp() && expr->asTemp()->index < _env->members.size()) {
+ // Trying to delete a function argument might throw.
+ if (_function->isStrict && expr->asTemp()->index < 0)
+ throwSyntaxError(ast->deleteToken, "Delete of an unqualified identifier in strict mode.");
+ _expr.code = _block->CONST(V4IR::BoolType, 0);
+ return false;
+ }
+ if (_function->isStrict && expr->asName())
+ throwSyntaxError(ast->deleteToken, "Delete of an unqualified identifier in strict mode.");
+
+ // [[11.4.1]] Return true if it's not a reference
+ if (expr->asConst() || expr->asString()) {
+ _expr.code = _block->CONST(V4IR::BoolType, 1);
+ return false;
+ }
+
+ // Return values from calls are also not a reference, but we have to
+ // perform the call to allow for side effects.
+ if (expr->asCall()) {
+ _block->EXP(expr);
+ _expr.code = _block->CONST(V4IR::BoolType, 1);
+ return false;
+ }
+ if (expr->asTemp() && expr->asTemp()->index >= _env->members.size()) {
+ _expr.code = _block->CONST(V4IR::BoolType, 1);
+ return false;
+ }
+
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(reference(expr));
+ _expr.code = call(_block->NAME(V4IR::Name::builtin_delete, ast->deleteToken.startLine, ast->deleteToken.startColumn), args);
+ return false;
+}
+
+bool Codegen::visit(FalseLiteral *)
+{
+ if (_expr.accept(cx)) {
+ _block->JUMP(_expr.iffalse);
+ } else {
+ _expr.code = _block->CONST(V4IR::BoolType, 0);
+ }
+ return false;
+}
+
+bool Codegen::visit(FieldMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ _expr.code = member(*base, _function->newString(ast->name.toString()));
+ return false;
+}
+
+bool Codegen::visit(FunctionExpression *ast)
+{
+ V4IR::Function *function = defineFunction(ast->name.toString(), ast, ast->formals, ast->body ? ast->body->elements : 0);
+ if (_debugger)
+ _debugger->setSourceLocation(function, ast->functionToken.startLine, ast->functionToken.startColumn);
+ _expr.code = _block->CLOSURE(function);
+ return false;
+}
+
+V4IR::Expr *Codegen::identifier(const QString &name, int line, int col)
+{
+ uint scope = 0;
+ Environment *e = _env;
+ V4IR::Function *f = _function;
+
+ while (f && e->parent) {
+ if ((f->usesArgumentsObject && name == "arguments") || (!f->isStrict && f->hasDirectEval) || f->insideWithOrCatch || (f->isNamedExpression && f->name == name))
+ break;
+ int index = e->findMember(name);
+ assert (index < e->members.size());
+ if (index != -1) {
+ return _block->TEMP(index, scope);
+ }
+ const int argIdx = f->indexOfArgument(&name);
+ if (argIdx != -1)
+ return _block->TEMP(-(argIdx + 1), scope);
+ ++scope;
+ e = e->parent;
+ f = f->outer;
+ }
+
+ if (!e->parent && (!f || !f->insideWithOrCatch) && _mode != EvalCode && (!f || f->name != name))
+ return _block->GLOBALNAME(name, line, col);
+
+ // global context or with. Lookup by name
+ return _block->NAME(name, line, col);
+
+}
+
+bool Codegen::visit(IdentifierExpression *ast)
+{
+ _expr.code = identifier(ast->name.toString(), ast->identifierToken.startLine, ast->identifierToken.startColumn);
+ return false;
+}
+
+bool Codegen::visit(NestedExpression *ast)
+{
+ accept(ast->expression);
+ return false;
+}
+
+bool Codegen::visit(NewExpression *ast)
+{
+ Result base = expression(ast->expression);
+ V4IR::Expr *expr = *base;
+ if (expr && !expr->asTemp() && !expr->asName() && !expr->asMember()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+ _expr.code = _block->NEW(expr, 0);
+ return false;
+}
+
+bool Codegen::visit(NewMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ V4IR::Expr *expr = *base;
+ if (expr && !expr->asTemp() && !expr->asName() && !expr->asMember()) {
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+
+ V4IR::ExprList *args = 0, **args_it = &args;
+ for (ArgumentList *it = ast->arguments; it; it = it->next) {
+ Result arg = expression(it->expression);
+ V4IR::Expr *actual = argument(*arg);
+ *args_it = _function->New<V4IR::ExprList>();
+ (*args_it)->init(actual);
+ args_it = &(*args_it)->next;
+ }
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), _block->NEW(expr, args));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(NotExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned r = _block->newTemp();
+ move(_block->TEMP(r), unop(V4IR::OpNot, *expr));
+ _expr.code = _block->TEMP(r);
+ return false;
+}
+
+bool Codegen::visit(NullExpression *)
+{
+ if (_expr.accept(cx)) _block->JUMP(_expr.iffalse);
+ else _expr.code = _block->CONST(V4IR::NullType, 0);
+
+ return false;
+}
+
+bool Codegen::visit(NumericLiteral *ast)
+{
+ if (_expr.accept(cx)) {
+ if (ast->value) _block->JUMP(_expr.iftrue);
+ else _block->JUMP(_expr.iffalse);
+ } else {
+ _expr.code = _block->CONST(V4IR::NumberType, ast->value);
+ }
+ return false;
+}
+
+struct ObjectPropertyValue {
+ V4IR::Expr *value;
+ V4IR::Function *getter;
+ V4IR::Function *setter;
+};
+
+bool Codegen::visit(ObjectLiteral *ast)
+{
+ QMap<QString, ObjectPropertyValue> valueMap;
+
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), _block->NEW(_block->NAME(QStringLiteral("Object"), ast->firstSourceLocation().startLine, ast->firstSourceLocation().startColumn)));
+ for (PropertyAssignmentList *it = ast->properties; it; it = it->next) {
+ if (PropertyNameAndValue *nv = AST::cast<AST::PropertyNameAndValue *>(it->assignment)) {
+ QString name = propertyName(nv->name);
+ Result value = expression(nv->value);
+ ObjectPropertyValue &v = valueMap[name];
+ if (v.getter || v.setter || (_function->isStrict && v.value))
+ throwSyntaxError(nv->lastSourceLocation(),
+ QCoreApplication::translate("qv4codegen", "Illegal duplicate key '%1' in object literal").arg(name));
+
+ valueMap[name].value = *value;
+ } else if (PropertyGetterSetter *gs = AST::cast<AST::PropertyGetterSetter *>(it->assignment)) {
+ QString name = propertyName(gs->name);
+ V4IR::Function *function = defineFunction(name, gs, gs->formals, gs->functionBody ? gs->functionBody->elements : 0);
+ if (_debugger)
+ _debugger->setSourceLocation(function, gs->getSetToken.startLine, gs->getSetToken.startColumn);
+ ObjectPropertyValue &v = valueMap[name];
+ if (v.value ||
+ (gs->type == PropertyGetterSetter::Getter && v.getter) ||
+ (gs->type == PropertyGetterSetter::Setter && v.setter))
+ throwSyntaxError(gs->lastSourceLocation(),
+ QCoreApplication::translate("qv4codegen", "Illegal duplicate key '%1' in object literal").arg(name));
+ if (gs->type == PropertyGetterSetter::Getter)
+ v.getter = function;
+ else
+ v.setter = function;
+ } else {
+ Q_UNREACHABLE();
+ }
+ }
+ if (!valueMap.isEmpty()) {
+ unsigned value = 0;
+ unsigned getter = 0;
+ unsigned setter = 0;
+ for (QMap<QString, ObjectPropertyValue>::const_iterator it = valueMap.constBegin(); it != valueMap.constEnd(); ++it) {
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ V4IR::ExprList *current = args;
+ current->expr = _block->TEMP(t);
+ current->next = _function->New<V4IR::ExprList>();
+ current = current->next;
+ current->expr = _block->NAME(it.key(), 0, 0);
+ current->next = _function->New<V4IR::ExprList>();
+ current = current->next;
+
+ if (it->value) {
+ if (!value)
+ value = _block->newTemp();
+ move(_block->TEMP(value), it->value);
+ // __qmljs_builtin_define_property(Value object, String *name, Value val, ExecutionContext *ctx)
+ current->expr = _block->TEMP(value);
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_define_property, 0, 0), args));
+ } else {
+ if (!getter) {
+ getter = _block->newTemp();
+ setter = _block->newTemp();
+ }
+ move(_block->TEMP(getter), it->getter ? _block->CLOSURE(it->getter) : _block->CONST(V4IR::UndefinedType, 0));
+ move(_block->TEMP(setter), it->setter ? _block->CLOSURE(it->setter) : _block->CONST(V4IR::UndefinedType, 0));
+
+
+ // __qmljs_builtin_define_getter_setter(Value object, String *name, Value getter, Value setter, ExecutionContext *ctx);
+ current->expr = _block->TEMP(getter);
+ current->next = _function->New<V4IR::ExprList>();
+ current = current->next;
+ current->expr = _block->TEMP(setter);
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_define_getter_setter, 0, 0), args));
+ }
+ }
+ }
+
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(PostDecrementExpression *ast)
+{
+ Result expr = expression(ast->base);
+ if (!expr->isLValue())
+ throwReferenceError(ast->base->lastSourceLocation(), "Invalid left-hand side expression in postfix operation");
+ throwSyntaxErrorOnEvalOrArgumentsInStrictMode(*expr, ast->decrementToken);
+
+ if (_expr.accept(nx)) {
+ move(*expr, unop(V4IR::OpDecrement, *expr));
+ } else {
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(*expr);
+ _expr.code = call(_block->NAME(V4IR::Name::builtin_postdecrement, ast->lastSourceLocation().startLine, ast->lastSourceLocation().startColumn), args);
+ }
+ return false;
+}
+
+bool Codegen::visit(PostIncrementExpression *ast)
+{
+ Result expr = expression(ast->base);
+ if (!expr->isLValue())
+ throwReferenceError(ast->base->lastSourceLocation(), "Invalid left-hand side expression in postfix operation");
+ throwSyntaxErrorOnEvalOrArgumentsInStrictMode(*expr, ast->incrementToken);
+
+ if (_expr.accept(nx)) {
+ move(*expr, unop(V4IR::OpIncrement, *expr));
+ } else {
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(*expr);
+ _expr.code = call(_block->NAME(V4IR::Name::builtin_postincrement, ast->lastSourceLocation().startLine, ast->lastSourceLocation().startColumn), args);
+ }
+ return false;
+}
+
+bool Codegen::visit(PreDecrementExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ throwSyntaxErrorOnEvalOrArgumentsInStrictMode(*expr, ast->decrementToken);
+ move(*expr, unop(V4IR::OpDecrement, *expr));
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = *expr;
+ }
+ return false;
+}
+
+bool Codegen::visit(PreIncrementExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ throwSyntaxErrorOnEvalOrArgumentsInStrictMode(*expr, ast->incrementToken);
+ move(*expr, unop(V4IR::OpIncrement, *expr));
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = *expr;
+ }
+ return false;
+}
+
+bool Codegen::visit(RegExpLiteral *ast)
+{
+ _expr.code = _block->REGEXP(_function->newString(ast->pattern.toString()), ast->flags);
+ return false;
+}
+
+bool Codegen::visit(StringLiteral *ast)
+{
+ _expr.code = _block->STRING(_function->newString(ast->value.toString()));
+ return false;
+}
+
+bool Codegen::visit(ThisExpression *ast)
+{
+ _expr.code = _block->NAME(QStringLiteral("this"), ast->thisToken.startLine, ast->thisToken.startColumn);
+ return false;
+}
+
+bool Codegen::visit(TildeExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), unop(V4IR::OpCompl, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(TrueLiteral *)
+{
+ if (_expr.accept(cx)) {
+ _block->JUMP(_expr.iftrue);
+ } else {
+ _expr.code = _block->CONST(V4IR::BoolType, 1);
+ }
+ return false;
+}
+
+bool Codegen::visit(TypeOfExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(reference(*expr));
+ _expr.code = call(_block->NAME(V4IR::Name::builtin_typeof, ast->typeofToken.startLine, ast->typeofToken.startColumn), args);
+ return false;
+}
+
+bool Codegen::visit(UnaryMinusExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), unop(V4IR::OpUMinus, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(UnaryPlusExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ move(_block->TEMP(t), unop(V4IR::OpUPlus, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(VoidExpression *ast)
+{
+ statement(ast->expression);
+ _expr.code = _block->CONST(V4IR::UndefinedType, 0);
+ return false;
+}
+
+bool Codegen::visit(FunctionDeclaration * /*ast*/)
+{
+ _expr.accept(nx);
+ return false;
+}
+
+void Codegen::linearize(V4IR::Function *function)
+{
+ V4IR::BasicBlock *exitBlock = function->basicBlocks.last();
+ assert(exitBlock->isTerminated());
+ assert(exitBlock->terminator()->asRet());
+
+ QSet<V4IR::BasicBlock *> V;
+ V.insert(exitBlock);
+
+ QVector<V4IR::BasicBlock *> trace;
+
+ for (int i = 0; i < function->basicBlocks.size(); ++i) {
+ V4IR::BasicBlock *block = function->basicBlocks.at(i);
+ if (!block->isTerminated() && (i + 1) < function->basicBlocks.size()) {
+ V4IR::BasicBlock *next = function->basicBlocks.at(i + 1);
+ block->JUMP(next);
+ }
+ }
+
+ struct I { static void trace(V4IR::BasicBlock *block, QSet<V4IR::BasicBlock *> *V,
+ QVector<V4IR::BasicBlock *> *output) {
+ if (block == 0 || V->contains(block))
+ return;
+
+ V->insert(block);
+ block->index = output->size();
+ output->append(block);
+
+ if (V4IR::Stmt *term = block->terminator()) {
+ if (V4IR::Jump *j = term->asJump()) {
+ trace(j->target, V, output);
+ } else if (V4IR::CJump *cj = term->asCJump()) {
+ if (! V->contains(cj->iffalse))
+ trace(cj->iffalse, V, output);
+ else
+ trace(cj->iftrue, V, output);
+ } else if (V4IR::Try *t = term->asTry()) {
+ trace(t->tryBlock, V, output);
+ trace(t->catchBlock, V, output);
+ }
+ }
+
+ // We could do this for each type above, but it is safer to have a
+ // "catchall" here
+ for (int ii = 0; ii < block->out.count(); ++ii)
+ trace(block->out.at(ii), V, output);
+ }
+ };
+
+ I::trace(function->basicBlocks.first(), &V, &trace);
+
+ V.insert(exitBlock);
+ exitBlock->index = trace.size();
+ trace.append(exitBlock);
+
+ QVarLengthArray<V4IR::BasicBlock*> blocksToDelete;
+ foreach (V4IR::BasicBlock *b, function->basicBlocks) {
+ if (!V.contains(b)) {
+ foreach (V4IR::BasicBlock *out, b->out) {
+ int idx = out->in.indexOf(b);
+ if (idx >= 0)
+ out->in.remove(idx);
+ }
+ blocksToDelete.append(b);
+ }
+ }
+ qDeleteAll(blocksToDelete);
+ function->basicBlocks = trace;
+
+ function->removeSharedExpressions();
+
+ if (qgetenv("NO_OPT").isEmpty())
+ ConstantPropagation().run(function);
+
+#ifndef QV4_NO_LIVENESS
+ liveness(function);
+#endif
+
+ if (qgetenv("NO_OPT").isEmpty())
+ removeDeadAssignments(function);
+
+ static bool showCode = !qgetenv("SHOW_CODE").isNull();
+ if (showCode) {
+ QVector<V4IR::Stmt *> code;
+ QHash<V4IR::Stmt *, V4IR::BasicBlock *> leader;
+
+ foreach (V4IR::BasicBlock *block, function->basicBlocks) {
+ leader.insert(block->statements.first(), block);
+ foreach (V4IR::Stmt *s, block->statements) {
+ code.append(s);
+ }
+ }
+
+ QString name;
+ if (function->name && !function->name->isEmpty())
+ name = *function->name;
+ else
+ name.sprintf("%p", function);
+
+ qout << "function " << name << "(";
+ for (int i = 0; i < function->formals.size(); ++i) {
+ if (i != 0)
+ qout << ", ";
+ qout << *function->formals.at(i);
+ }
+ qout << ")" << endl
+ << "{" << endl;
+
+ foreach (const QString *local, function->locals) {
+ qout << " var " << *local << ';' << endl;
+ }
+
+ for (int i = 0; i < code.size(); ++i) {
+ V4IR::Stmt *s = code.at(i);
+
+ if (V4IR::BasicBlock *bb = leader.value(s)) {
+ qout << endl;
+ QByteArray str;
+ str.append('L');
+ str.append(QByteArray::number(bb->index));
+ str.append(':');
+ for (int i = 66 - str.length(); i; --i)
+ str.append(' ');
+ qout << str;
+ qout << "// predecessor blocks:";
+ foreach (V4IR::BasicBlock *in, bb->in)
+ qout << " L" << in->index;
+ qout << endl;
+ }
+ V4IR::Stmt *n = (i + 1) < code.size() ? code.at(i + 1) : 0;
+ if (n && s->asJump() && s->asJump()->target == leader.value(n)) {
+ continue;
+ }
+
+ QByteArray str;
+ QBuffer buf(&str);
+ buf.open(QIODevice::WriteOnly);
+ QTextStream out(&buf);
+ s->dump(out, V4IR::Stmt::MIR);
+ out.flush();
+
+#ifndef QV4_NO_LIVENESS
+ for (int i = 60 - str.size(); i >= 0; --i)
+ str.append(' ');
+
+ qout << " " << str;
+
+ // if (! s->uses.isEmpty()) {
+ // qout << " // uses:";
+ // foreach (unsigned use, s->uses) {
+ // qout << " %" << use;
+ // }
+ // }
+
+ // if (! s->defs.isEmpty()) {
+ // qout << " // defs:";
+ // foreach (unsigned def, s->defs) {
+ // qout << " %" << def;
+ // }
+ // }
+
+# if 0
+ if (! s->d->liveIn.isEmpty()) {
+ qout << " // lives in:";
+ for (int i = 0; i < s->d->liveIn.size(); ++i) {
+ if (s->d->liveIn.testBit(i))
+ qout << " %" << i;
+ }
+ }
+# else
+ if (! s->d->liveOut.isEmpty()) {
+ qout << " // lives out:";
+ for (int i = 0; i < s->d->liveOut.size(); ++i) {
+ if (s->d->liveOut.testBit(i))
+ qout << " %" << i;
+ }
+ }
+# endif
+#else
+ qout << " " << str;
+#endif
+
+ qout << endl;
+
+ if (n && s->asCJump() && s->asCJump()->iffalse != leader.value(n)) {
+ qout << " goto L" << s->asCJump()->iffalse << ";" << endl;
+ }
+ }
+
+ qout << "}" << endl
+ << endl;
+ }
+
+ //### NOTE: after this pass, the liveness information is not correct anymore!
+ if (qgetenv("NO_OPT").isEmpty())
+ CompressTemps().run(function);
+}
+
+V4IR::Function *Codegen::defineFunction(const QString &name, AST::Node *ast,
+ AST::FormalParameterList *formals,
+ AST::SourceElements *body, Mode mode,
+ const QStringList &inheritedLocals)
+{
+ qSwap(_mode, mode); // enter function code.
+
+ ScopeAndFinally *scopeAndFinally = 0;
+
+ enterEnvironment(ast);
+ V4IR::Function *function = _module->newFunction(name, _function);
+
+ if (_debugger)
+ _debugger->addFunction(function);
+ V4IR::BasicBlock *entryBlock = function->newBasicBlock();
+ V4IR::BasicBlock *exitBlock = function->newBasicBlock(V4IR::Function::DontInsertBlock);
+ V4IR::BasicBlock *throwBlock = function->newBasicBlock();
+ function->hasDirectEval = _env->hasDirectEval;
+ function->usesArgumentsObject = (_env->usesArgumentsObject == Environment::ArgumentsObjectUsed);
+ function->maxNumberOfArguments = _env->maxNumberOfArguments;
+ function->isStrict = _env->isStrict;
+ function->isNamedExpression = _env->isNamedFunctionExpression;
+
+ // variables in global code are properties of the global context object, not locals as with other functions.
+ if (_mode == FunctionCode) {
+ for (Environment::MemberMap::iterator it = _env->members.begin(); it != _env->members.end(); ++it) {
+ const QString &local = it.key();
+ function->LOCAL(local);
+ unsigned t = entryBlock->newTemp();
+ (*it).index = t;
+ }
+ } else {
+ if (!_env->isStrict) {
+ foreach (const QString &inheritedLocal, inheritedLocals) {
+ function->LOCAL(inheritedLocal);
+ unsigned tempIndex = entryBlock->newTemp();
+ Environment::Member member = { Environment::UndefinedMember,
+ static_cast<int>(tempIndex), 0 };
+ _env->members.insert(inheritedLocal, member);
+ }
+ }
+
+ V4IR::ExprList *args = 0;
+ for (Environment::MemberMap::const_iterator it = _env->members.constBegin(); it != _env->members.constEnd(); ++it) {
+ const QString &local = it.key();
+ V4IR::ExprList *next = function->New<V4IR::ExprList>();
+ next->expr = entryBlock->NAME(local, 0, 0);
+ next->next = args;
+ args = next;
+ }
+ if (args) {
+ V4IR::ExprList *next = function->New<V4IR::ExprList>();
+ next->expr = entryBlock->CONST(V4IR::BoolType, mode == EvalCode);
+ next->next = args;
+ args = next;
+
+ entryBlock->EXP(entryBlock->CALL(entryBlock->NAME(V4IR::Name::builtin_declare_vars, 0, 0), args));
+ }
+ }
+
+ unsigned returnAddress = entryBlock->newTemp();
+
+ entryBlock->MOVE(entryBlock->TEMP(returnAddress), entryBlock->CONST(V4IR::UndefinedType, 0));
+ exitBlock->RET(exitBlock->TEMP(returnAddress));
+ V4IR::ExprList *throwArgs = function->New<V4IR::ExprList>();
+ throwArgs->expr = throwBlock->TEMP(returnAddress);
+ throwBlock->EXP(throwBlock->CALL(throwBlock->NAME(V4IR::Name::builtin_throw, /*line*/0, /*column*/0), throwArgs));
+ throwBlock->JUMP(exitBlock);
+ Loop *loop = 0;
+
+ qSwap(_function, function);
+ qSwap(_block, entryBlock);
+ qSwap(_exitBlock, exitBlock);
+ qSwap(_throwBlock, throwBlock);
+ qSwap(_returnAddress, returnAddress);
+ qSwap(_scopeAndFinally, scopeAndFinally);
+ qSwap(_loop, loop);
+
+ for (FormalParameterList *it = formals; it; it = it->next) {
+ _function->RECEIVE(it->name.toString());
+ }
+
+ foreach (const Environment::Member &member, _env->members) {
+ if (member.function) {
+ V4IR::Function *function = defineFunction(member.function->name.toString(), member.function, member.function->formals,
+ member.function->body ? member.function->body->elements : 0);
+ if (_debugger)
+ _debugger->setSourceLocation(function, member.function->functionToken.startLine, member.function->functionToken.startColumn);
+ if (! _env->parent) {
+ move(_block->NAME(member.function->name.toString(), member.function->identifierToken.startLine, member.function->identifierToken.startColumn),
+ _block->CLOSURE(function));
+ } else {
+ assert(member.index >= 0);
+ move(_block->TEMP(member.index), _block->CLOSURE(function));
+ }
+ }
+ }
+
+ sourceElements(body);
+
+ _function->insertBasicBlock(_exitBlock);
+
+ _block->JUMP(_exitBlock);
+
+ qSwap(_function, function);
+ qSwap(_block, entryBlock);
+ qSwap(_exitBlock, exitBlock);
+ qSwap(_throwBlock, throwBlock);
+ qSwap(_returnAddress, returnAddress);
+ qSwap(_scopeAndFinally, scopeAndFinally);
+ qSwap(_loop, loop);
+
+ leaveEnvironment();
+
+ qSwap(_mode, mode);
+
+ return function;
+}
+
+bool Codegen::visit(IdentifierPropertyName *ast)
+{
+ _property = ast->id.toString();
+ return false;
+}
+
+bool Codegen::visit(NumericLiteralPropertyName *ast)
+{
+ _property = QString::number(ast->id, 'g', 16);
+ return false;
+}
+
+bool Codegen::visit(StringLiteralPropertyName *ast)
+{
+ _property = ast->id.toString();
+ return false;
+}
+
+bool Codegen::visit(FunctionSourceElement *ast)
+{
+ statement(ast->declaration);
+ return false;
+}
+
+bool Codegen::visit(StatementSourceElement *ast)
+{
+ statement(ast->statement);
+ return false;
+}
+
+bool Codegen::visit(Block *ast)
+{
+ for (StatementList *it = ast->statements; it; it = it->next) {
+ statement(it->statement);
+ }
+ return false;
+}
+
+bool Codegen::visit(BreakStatement *ast)
+{
+ if (!_loop)
+ throwSyntaxError(ast->lastSourceLocation(), QCoreApplication::translate("qv4codegen", "Break outside of loop"));
+ Loop *loop = 0;
+ if (ast->label.isEmpty())
+ loop = _loop;
+ else {
+ for (loop = _loop; loop; loop = loop->parent) {
+ if (loop->labelledStatement && loop->labelledStatement->label == ast->label)
+ break;
+ }
+ if (!loop)
+ throwSyntaxError(ast->lastSourceLocation(), QCoreApplication::translate("qv4codegen", "Undefined label '%1'").arg(ast->label.toString()));
+ }
+ unwindException(loop->scopeAndFinally);
+ _block->JUMP(loop->breakBlock);
+ return false;
+}
+
+bool Codegen::visit(ContinueStatement *ast)
+{
+ Loop *loop = 0;
+ if (ast->label.isEmpty()) {
+ for (loop = _loop; loop; loop = loop->parent) {
+ if (loop->continueBlock)
+ break;
+ }
+ } else {
+ for (loop = _loop; loop; loop = loop->parent) {
+ if (loop->labelledStatement && loop->labelledStatement->label == ast->label) {
+ if (!loop->continueBlock)
+ loop = 0;
+ break;
+ }
+ }
+ if (!loop)
+ throwSyntaxError(ast->lastSourceLocation(), QCoreApplication::translate("qv4codegen", "Undefined label '%1'").arg(ast->label.toString()));
+ }
+ if (!loop)
+ throwSyntaxError(ast->lastSourceLocation(), QCoreApplication::translate("qv4codegen", "continue outside of loop"));
+ unwindException(loop->scopeAndFinally);
+ _block->JUMP(loop->continueBlock);
+ return false;
+}
+
+bool Codegen::visit(DebuggerStatement *)
+{
+ Q_UNIMPLEMENTED();
+ return false;
+}
+
+bool Codegen::visit(DoWhileStatement *ast)
+{
+ V4IR::BasicBlock *loopbody = _function->newBasicBlock();
+ V4IR::BasicBlock *loopcond = _function->newBasicBlock();
+ V4IR::BasicBlock *loopend = _function->newBasicBlock();
+
+ enterLoop(ast, loopend, loopcond);
+
+ _block->JUMP(loopbody);
+
+ _block = loopbody;
+ statement(ast->statement);
+ _block->JUMP(loopcond);
+
+ _block = loopcond;
+ condition(ast->expression, loopbody, loopend);
+
+ _block = loopend;
+
+ leaveLoop();
+
+ return false;
+}
+
+bool Codegen::visit(EmptyStatement *)
+{
+ return false;
+}
+
+bool Codegen::visit(ExpressionStatement *ast)
+{
+ if (_mode == EvalCode) {
+ Result e = expression(ast->expression);
+ if (*e)
+ move(_block->TEMP(_returnAddress), *e);
+ } else {
+ statement(ast->expression);
+ }
+ return false;
+}
+
+bool Codegen::visit(ForEachStatement *ast)
+{
+ V4IR::BasicBlock *foreachin = _function->newBasicBlock();
+ V4IR::BasicBlock *foreachbody = _function->newBasicBlock();
+ V4IR::BasicBlock *foreachend = _function->newBasicBlock();
+
+ enterLoop(ast, foreachend, foreachin);
+
+ int objectToIterateOn = _block->newTemp();
+ move(_block->TEMP(objectToIterateOn), *expression(ast->expression));
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(_block->TEMP(objectToIterateOn));
+
+ int iterator = _block->newTemp();
+ move(_block->TEMP(iterator), _block->CALL(_block->NAME(V4IR::Name::builtin_foreach_iterator_object, 0, 0), args));
+
+ _block->JUMP(foreachin);
+
+ _block = foreachbody;
+ int temp = _block->newTemp();
+ move(*expression(ast->initialiser), _block->TEMP(temp));
+ statement(ast->statement);
+ _block->JUMP(foreachin);
+
+ _block = foreachin;
+
+ args = _function->New<V4IR::ExprList>();
+ args->init(_block->TEMP(iterator));
+ move(_block->TEMP(temp), _block->CALL(_block->NAME(V4IR::Name::builtin_foreach_next_property_name, 0, 0), args));
+ int null = _block->newTemp();
+ move(_block->TEMP(null), _block->CONST(V4IR::NullType, 0));
+ cjump(_block->BINOP(V4IR::OpStrictNotEqual, _block->TEMP(temp), _block->TEMP(null)), foreachbody, foreachend);
+ _block = foreachend;
+
+ leaveLoop();
+ return false;
+}
+
+bool Codegen::visit(ForStatement *ast)
+{
+ V4IR::BasicBlock *forcond = _function->newBasicBlock();
+ V4IR::BasicBlock *forbody = _function->newBasicBlock();
+ V4IR::BasicBlock *forstep = _function->newBasicBlock();
+ V4IR::BasicBlock *forend = _function->newBasicBlock();
+
+ enterLoop(ast, forend, forstep);
+
+ statement(ast->initialiser);
+ _block->JUMP(forcond);
+
+ _block = forcond;
+ condition(ast->condition, forbody, forend);
+
+ _block = forbody;
+ statement(ast->statement);
+ _block->JUMP(forstep);
+
+ _block = forstep;
+ statement(ast->expression);
+ _block->JUMP(forcond);
+ _block = forend;
+
+ leaveLoop();
+
+ return false;
+}
+
+bool Codegen::visit(IfStatement *ast)
+{
+ V4IR::BasicBlock *iftrue = _function->newBasicBlock();
+ V4IR::BasicBlock *iffalse = ast->ko ? _function->newBasicBlock() : 0;
+ V4IR::BasicBlock *endif = _function->newBasicBlock();
+ condition(ast->expression, iftrue, ast->ko ? iffalse : endif);
+
+ _block = iftrue;
+ statement(ast->ok);
+ _block->JUMP(endif);
+
+ if (ast->ko) {
+ _block = iffalse;
+ statement(ast->ko);
+ _block->JUMP(endif);
+ }
+
+ _block = endif;
+
+ return false;
+}
+
+bool Codegen::visit(LabelledStatement *ast)
+{
+ _labelledStatement = ast;
+
+ if (AST::cast<AST::SwitchStatement *>(ast->statement) ||
+ AST::cast<AST::WhileStatement *>(ast->statement) ||
+ AST::cast<AST::DoWhileStatement *>(ast->statement) ||
+ AST::cast<AST::ForStatement *>(ast->statement) ||
+ AST::cast<AST::ForEachStatement *>(ast->statement) ||
+ AST::cast<AST::LocalForStatement *>(ast->statement) ||
+ AST::cast<AST::LocalForEachStatement *>(ast->statement)) {
+ statement(ast->statement); // labelledStatement will be associated with the ast->statement's loop.
+ } else {
+ V4IR::BasicBlock *breakBlock = _function->newBasicBlock();
+ enterLoop(ast->statement, breakBlock, /*continueBlock*/ 0);
+ statement(ast->statement);
+ _block->JUMP(breakBlock);
+ _block = breakBlock;
+ leaveLoop();
+ }
+
+ return false;
+}
+
+bool Codegen::visit(LocalForEachStatement *ast)
+{
+ V4IR::BasicBlock *foreachin = _function->newBasicBlock();
+ V4IR::BasicBlock *foreachbody = _function->newBasicBlock();
+ V4IR::BasicBlock *foreachend = _function->newBasicBlock();
+
+ enterLoop(ast, foreachend, foreachin);
+
+ variableDeclaration(ast->declaration);
+
+ int iterator = _block->newTemp();
+ move(_block->TEMP(iterator), *expression(ast->expression));
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(_block->TEMP(iterator));
+ move(_block->TEMP(iterator), _block->CALL(_block->NAME(V4IR::Name::builtin_foreach_iterator_object, 0, 0), args));
+
+ _block->JUMP(foreachin);
+
+ _block = foreachbody;
+ int temp = _block->newTemp();
+ move(identifier(ast->declaration->name.toString()), _block->TEMP(temp));
+ statement(ast->statement);
+ _block->JUMP(foreachin);
+
+ _block = foreachin;
+
+ args = _function->New<V4IR::ExprList>();
+ args->init(_block->TEMP(iterator));
+ move(_block->TEMP(temp), _block->CALL(_block->NAME(V4IR::Name::builtin_foreach_next_property_name, 0, 0), args));
+ int null = _block->newTemp();
+ move(_block->TEMP(null), _block->CONST(V4IR::NullType, 0));
+ cjump(_block->BINOP(V4IR::OpStrictNotEqual, _block->TEMP(temp), _block->TEMP(null)), foreachbody, foreachend);
+ _block = foreachend;
+
+ leaveLoop();
+ return false;
+}
+
+bool Codegen::visit(LocalForStatement *ast)
+{
+ V4IR::BasicBlock *forcond = _function->newBasicBlock();
+ V4IR::BasicBlock *forbody = _function->newBasicBlock();
+ V4IR::BasicBlock *forstep = _function->newBasicBlock();
+ V4IR::BasicBlock *forend = _function->newBasicBlock();
+
+ enterLoop(ast, forend, forstep);
+
+ variableDeclarationList(ast->declarations);
+ _block->JUMP(forcond);
+
+ _block = forcond;
+ condition(ast->condition, forbody, forend);
+
+ _block = forbody;
+ statement(ast->statement);
+ _block->JUMP(forstep);
+
+ _block = forstep;
+ statement(ast->expression);
+ _block->JUMP(forcond);
+ _block = forend;
+
+ leaveLoop();
+
+ return false;
+}
+
+bool Codegen::visit(ReturnStatement *ast)
+{
+ if (_mode != FunctionCode)
+ throwSyntaxError(ast->returnToken, QCoreApplication::translate("qv4codegen", "Return statement outside of function"));
+ if (ast->expression) {
+ Result expr = expression(ast->expression);
+ move(_block->TEMP(_returnAddress), *expr);
+ }
+ unwindException(0);
+
+ _block->JUMP(_exitBlock);
+ return false;
+}
+
+bool Codegen::visit(SwitchStatement *ast)
+{
+ V4IR::BasicBlock *switchend = _function->newBasicBlock();
+
+ if (ast->block) {
+ Result lhs = expression(ast->expression);
+ V4IR::BasicBlock *switchcond = _block;
+
+ QHash<Node *, V4IR::BasicBlock *> blockMap;
+
+ enterLoop(ast, switchend, 0);
+
+ for (CaseClauses *it = ast->block->clauses; it; it = it->next) {
+ CaseClause *clause = it->clause;
+
+ _block = _function->newBasicBlock();
+ blockMap[clause] = _block;
+
+ for (StatementList *it2 = clause->statements; it2; it2 = it2->next)
+ statement(it2->statement);
+ }
+
+ if (ast->block->defaultClause) {
+ _block = _function->newBasicBlock();
+ blockMap[ast->block->defaultClause] = _block;
+
+ for (StatementList *it2 = ast->block->defaultClause->statements; it2; it2 = it2->next)
+ statement(it2->statement);
+ }
+
+ for (CaseClauses *it = ast->block->moreClauses; it; it = it->next) {
+ CaseClause *clause = it->clause;
+
+ _block = _function->newBasicBlock();
+ blockMap[clause] = _block;
+
+ for (StatementList *it2 = clause->statements; it2; it2 = it2->next)
+ statement(it2->statement);
+ }
+
+ leaveLoop();
+
+ _block->JUMP(switchend);
+
+ _block = switchcond;
+ for (CaseClauses *it = ast->block->clauses; it; it = it->next) {
+ CaseClause *clause = it->clause;
+ Result rhs = expression(clause->expression);
+ V4IR::BasicBlock *iftrue = blockMap[clause];
+ V4IR::BasicBlock *iffalse = _function->newBasicBlock();
+ cjump(binop(V4IR::OpStrictEqual, *lhs, *rhs), iftrue, iffalse);
+ _block = iffalse;
+ }
+
+ for (CaseClauses *it = ast->block->moreClauses; it; it = it->next) {
+ CaseClause *clause = it->clause;
+ Result rhs = expression(clause->expression);
+ V4IR::BasicBlock *iftrue = blockMap[clause];
+ V4IR::BasicBlock *iffalse = _function->newBasicBlock();
+ cjump(binop(V4IR::OpStrictEqual, *lhs, *rhs), iftrue, iffalse);
+ _block = iffalse;
+ }
+
+ if (ast->block->defaultClause) {
+ _block->JUMP(blockMap[ast->block->defaultClause]);
+ }
+ }
+
+ _block->JUMP(switchend);
+
+ _block = switchend;
+ return false;
+}
+
+bool Codegen::visit(ThrowStatement *ast)
+{
+ Result expr = expression(ast->expression);
+ move(_block->TEMP(_returnAddress), *expr);
+ _block->JUMP(_throwBlock);
+ return false;
+}
+
+bool Codegen::visit(TryStatement *ast)
+{
+ _function->hasTry = true;
+
+ if (_function->isStrict && ast->catchExpression &&
+ (ast->catchExpression->name == QLatin1String("eval") || ast->catchExpression->name == QLatin1String("arguments")))
+ throwSyntaxError(ast->catchExpression->identifierToken, QCoreApplication::translate("qv4codegen", "Catch variable name may not be eval or arguments in strict mode"));
+
+ V4IR::BasicBlock *tryBody = _function->newBasicBlock();
+ V4IR::BasicBlock *catchBody = _function->newBasicBlock();
+ // We always need a finally body to clean up the exception handler
+ V4IR::BasicBlock *finallyBody = _function->newBasicBlock();
+
+ V4IR::BasicBlock *throwBlock = _function->newBasicBlock();
+ V4IR::ExprList *throwArgs = _function->New<V4IR::ExprList>();
+ throwArgs->expr = throwBlock->TEMP(_returnAddress);
+ throwBlock->EXP(throwBlock->CALL(throwBlock->NAME(V4IR::Name::builtin_throw, /*line*/0, /*column*/0), throwArgs));
+ throwBlock->JUMP(catchBody);
+ qSwap(_throwBlock, throwBlock);
+
+ int hasException = _block->newTemp();
+ move(_block->TEMP(hasException), _block->CONST(V4IR::BoolType, false));
+
+ // Pass the hidden "needRethrow" TEMP to the
+ // builtin_delete_exception_handler, in order to have those TEMPs alive for
+ // the duration of the exception handling block.
+ V4IR::ExprList *finishTryArgs = _function->New<V4IR::ExprList>();
+ finishTryArgs->init(_block->TEMP(hasException));
+
+ ScopeAndFinally tcf(_scopeAndFinally, ast->finallyExpression, finishTryArgs);
+ _scopeAndFinally = &tcf;
+
+ int exception_to_rethrow = _block->newTemp();
+
+ _block->TRY(tryBody, catchBody,
+ ast->catchExpression ? ast->catchExpression->name.toString() : QString(),
+ _block->TEMP(exception_to_rethrow));
+
+ _block = tryBody;
+ statement(ast->statement);
+ _block->JUMP(finallyBody);
+
+ _block = catchBody;
+
+ if (ast->catchExpression) {
+ // check if an exception got thrown within catch. Go to finally
+ // and then rethrow
+ V4IR::BasicBlock *b = _function->newBasicBlock();
+ _block->CJUMP(_block->TEMP(hasException), finallyBody, b);
+ _block = b;
+ }
+
+ move(_block->TEMP(hasException), _block->CONST(V4IR::BoolType, true));
+
+ if (ast->catchExpression) {
+ ++_function->insideWithOrCatch;
+ {
+ ScopeAndFinally scope(_scopeAndFinally, ScopeAndFinally::CatchScope);
+ _scopeAndFinally = &scope;
+ statement(ast->catchExpression->statement);
+ _scopeAndFinally = scope.parent;
+ }
+ --_function->insideWithOrCatch;
+ move(_block->TEMP(hasException), _block->CONST(V4IR::BoolType, false));
+ }
+ _block->JUMP(finallyBody);
+
+ _scopeAndFinally = tcf.parent;
+
+ qSwap(_throwBlock, throwBlock);
+
+ V4IR::BasicBlock *after = _function->newBasicBlock();
+ _block = finallyBody;
+
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_finish_try, 0, 0), finishTryArgs));
+
+ if (ast->finallyExpression && ast->finallyExpression->statement)
+ statement(ast->finallyExpression->statement);
+
+ V4IR::BasicBlock *rethrowBlock = _function->newBasicBlock();
+ _block->CJUMP(_block->TEMP(hasException), rethrowBlock, after);
+ _block = rethrowBlock;
+ move(_block->TEMP(_returnAddress), _block->TEMP(exception_to_rethrow));
+ _block->JUMP(_throwBlock);
+
+ _block = after;
+
+ return false;
+}
+
+void Codegen::unwindException(Codegen::ScopeAndFinally *outest)
+{
+ int savedDepthForWidthOrCatch = _function->insideWithOrCatch;
+ ScopeAndFinally *scopeAndFinally = _scopeAndFinally;
+ qSwap(_scopeAndFinally, scopeAndFinally);
+ while (_scopeAndFinally != outest) {
+ switch (_scopeAndFinally->type) {
+ case ScopeAndFinally::WithScope:
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_pop_scope, 0, 0)));
+ // fall through
+ case ScopeAndFinally::CatchScope:
+ _scopeAndFinally = _scopeAndFinally->parent;
+ --_function->insideWithOrCatch;
+ break;
+ case ScopeAndFinally::TryScope: {
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_finish_try, 0, 0), _scopeAndFinally->finishTryArgs));
+ ScopeAndFinally *tc = _scopeAndFinally;
+ _scopeAndFinally = tc->parent;
+ if (tc->finally && tc->finally->statement)
+ statement(tc->finally->statement);
+ break;
+ }
+ }
+ }
+ qSwap(_scopeAndFinally, scopeAndFinally);
+ _function->insideWithOrCatch = savedDepthForWidthOrCatch;
+}
+
+bool Codegen::visit(VariableStatement *ast)
+{
+ variableDeclarationList(ast->declarations);
+ return false;
+}
+
+bool Codegen::visit(WhileStatement *ast)
+{
+ V4IR::BasicBlock *whilecond = _function->newBasicBlock();
+ V4IR::BasicBlock *whilebody = _function->newBasicBlock();
+ V4IR::BasicBlock *whileend = _function->newBasicBlock();
+
+ enterLoop(ast, whileend, whilecond);
+
+ _block->JUMP(whilecond);
+ _block = whilecond;
+ condition(ast->expression, whilebody, whileend);
+
+ _block = whilebody;
+ statement(ast->statement);
+ _block->JUMP(whilecond);
+
+ _block = whileend;
+ leaveLoop();
+
+ return false;
+}
+
+bool Codegen::visit(WithStatement *ast)
+{
+ _function->hasWith = true;
+
+ V4IR::BasicBlock *withBlock = _function->newBasicBlock();
+
+ _block->JUMP(withBlock);
+ _block = withBlock;
+ int withObject = _block->newTemp();
+ _block->MOVE(_block->TEMP(withObject), *expression(ast->expression));
+ V4IR::ExprList *args = _function->New<V4IR::ExprList>();
+ args->init(_block->TEMP(withObject));
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_push_with_scope, 0, 0), args));
+
+ ++_function->insideWithOrCatch;
+ {
+ ScopeAndFinally scope(_scopeAndFinally);
+ _scopeAndFinally = &scope;
+ statement(ast->statement);
+ _scopeAndFinally = scope.parent;
+ }
+ --_function->insideWithOrCatch;
+ _block->EXP(_block->CALL(_block->NAME(V4IR::Name::builtin_pop_scope, 0, 0), 0));
+
+ V4IR::BasicBlock *next = _function->newBasicBlock();
+ _block->JUMP(next);
+ _block = next;
+
+ return false;
+}
+
+bool Codegen::visit(UiArrayBinding *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiObjectBinding *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiObjectDefinition *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiPublicMember *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiScriptBinding *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiSourceElement *)
+{
+ assert(!"not implemented");
+ return false;
+}
+
+void Codegen::throwSyntaxErrorOnEvalOrArgumentsInStrictMode(V4IR::Expr *expr, const SourceLocation& loc)
+{
+ if (!_env->isStrict)
+ return;
+ V4IR::Name *n = expr->asName();
+ if (!n)
+ return;
+ if (*n->id == QLatin1String("eval") || *n->id == QLatin1String("arguments"))
+ throwSyntaxError(loc, QCoreApplication::translate("qv4codegen", "Variable name may not be eval or arguments in strict mode"));
+}
+
+void Codegen::throwSyntaxError(const SourceLocation &loc, const QString &detail)
+{
+ VM::DiagnosticMessage *msg = new VM::DiagnosticMessage;
+ msg->fileName = _fileName;
+ msg->offset = loc.begin();
+ msg->startLine = loc.startLine;
+ msg->startColumn = loc.startColumn;
+ msg->message = detail;
+ if (_context)
+ _context->throwSyntaxError(msg);
+ else if (_errorHandler)
+ _errorHandler->syntaxError(msg);
+ else
+ Q_ASSERT(!"No error handler available.");
+}
+
+void Codegen::throwReferenceError(const SourceLocation &loc, const QString &detail)
+{
+ if (_context)
+ _context->throwReferenceError(VM::Value::fromString(_context, detail));
+ else if (_errorHandler)
+ throwSyntaxError(loc, detail);
+ else
+ Q_ASSERT(!"No error handler available.");
+}
diff --git a/src/qml/qml/v4vm/qv4codegen_p.h b/src/qml/qml/v4vm/qv4codegen_p.h
new file mode 100644
index 0000000000..031e75d207
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4codegen_p.h
@@ -0,0 +1,444 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4CODEGEN_P_H
+#define QV4CODEGEN_P_H
+
+#include "qv4global.h"
+#include "qv4jsir_p.h"
+#include <private/qqmljsastvisitor_p.h>
+#include <private/qqmljsast_p.h>
+#include <QtCore/QStringList>
+#include <assert.h>
+
+namespace QQmlJS {
+
+namespace AST {
+class UiParameterList;
+}
+
+namespace VM {
+struct DiagnosticMessage;
+struct ExecutionContext;
+}
+
+namespace Debugging {
+class Debugger;
+} // namespace Debugging
+
+class ErrorHandler
+{
+public:
+ virtual void syntaxError(VM::DiagnosticMessage *message) = 0;
+};
+
+class Q_V4_EXPORT Codegen: protected AST::Visitor
+{
+public:
+ Codegen(VM::ExecutionContext *ctx, bool strict);
+ Codegen(ErrorHandler *errorHandler, bool strictMode);
+
+ enum Mode {
+ GlobalCode,
+ EvalCode,
+ FunctionCode
+ };
+
+ V4IR::Function *operator()(const QString &fileName,
+ const QString &sourceCode,
+ AST::Program *ast,
+ V4IR::Module *module,
+ Mode mode = GlobalCode,
+ const QStringList &inheritedLocals = QStringList());
+ V4IR::Function *operator()(const QString &fileName,
+ const QString &sourceCode,
+ AST::FunctionExpression *ast,
+ V4IR::Module *module);
+
+protected:
+ enum Format { ex, cx, nx };
+ struct Result {
+ V4IR::Expr *code;
+ V4IR::BasicBlock *iftrue;
+ V4IR::BasicBlock *iffalse;
+ Format format;
+ Format requested;
+
+ explicit Result(Format requested = ex)
+ : code(0)
+ , iftrue(0)
+ , iffalse(0)
+ , format(ex)
+ , requested(requested) {}
+
+ explicit Result(V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse)
+ : code(0)
+ , iftrue(iftrue)
+ , iffalse(iffalse)
+ , format(ex)
+ , requested(cx) {}
+
+ inline V4IR::Expr *operator*() const { Q_ASSERT(format == ex); return code; }
+ inline V4IR::Expr *operator->() const { Q_ASSERT(format == ex); return code; }
+
+ bool accept(Format f)
+ {
+ if (requested == f) {
+ format = f;
+ return true;
+ }
+ return false;
+ }
+ };
+
+ struct Environment {
+ Environment *parent;
+
+ enum MemberType {
+ UndefinedMember,
+ VariableDefinition,
+ VariableDeclaration,
+ FunctionDefinition
+ };
+ struct Member {
+ MemberType type;
+ int index;
+ AST::FunctionExpression *function;
+ };
+ typedef QMap<QString, Member> MemberMap;
+
+ MemberMap members;
+ AST::FormalParameterList *formals;
+ int maxNumberOfArguments;
+ bool hasDirectEval;
+ bool hasNestedFunctions;
+ bool isStrict;
+ bool isNamedFunctionExpression;
+ enum UsesArgumentsObject {
+ ArgumentsObjectUnknown,
+ ArgumentsObjectNotUsed,
+ ArgumentsObjectUsed
+ };
+
+ UsesArgumentsObject usesArgumentsObject;
+
+ Environment(Environment *parent)
+ : parent(parent)
+ , formals(0)
+ , maxNumberOfArguments(0)
+ , hasDirectEval(false)
+ , hasNestedFunctions(false)
+ , isStrict(false)
+ , isNamedFunctionExpression(false)
+ , usesArgumentsObject(ArgumentsObjectUnknown)
+ {
+ if (parent && parent->isStrict)
+ isStrict = true;
+ }
+
+ int findMember(const QString &name) const
+ {
+ MemberMap::const_iterator it = members.find(name);
+ if (it == members.end())
+ return -1;
+ assert((*it).index != -1 || !parent);
+ return (*it).index;
+ }
+
+ bool lookupMember(const QString &name, Environment **scope, int *index, int *distance)
+ {
+ Environment *it = this;
+ *distance = 0;
+ for (; it; it = it->parent, ++(*distance)) {
+ int idx = it->findMember(name);
+ if (idx != -1) {
+ *scope = it;
+ *index = idx;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void enter(const QString &name, MemberType type, AST::FunctionExpression *function = 0)
+ {
+ if (! name.isEmpty()) {
+ if (type != FunctionDefinition) {
+ for (AST::FormalParameterList *it = formals; it; it = it->next)
+ if (it->name == name)
+ return;
+ }
+ MemberMap::iterator it = members.find(name);
+ if (it == members.end()) {
+ Member m;
+ m.index = -1;
+ m.type = type;
+ m.function = function;
+ members.insert(name, m);
+ } else {
+ if ((*it).type <= type) {
+ (*it).type = type;
+ (*it).function = function;
+ }
+ }
+ }
+ }
+ };
+
+ Environment *newEnvironment(AST::Node *node, Environment *parent)
+ {
+ Environment *env = new Environment(parent);
+ _envMap.insert(node, env);
+ return env;
+ }
+
+ struct UiMember {
+ };
+
+ struct ScopeAndFinally {
+ enum ScopeType {
+ WithScope,
+ TryScope,
+ CatchScope
+ };
+
+ ScopeAndFinally *parent;
+ AST::Finally *finally;
+ V4IR::ExprList *finishTryArgs;
+ ScopeType type;
+
+ ScopeAndFinally(ScopeAndFinally *parent, ScopeType t = WithScope) : parent(parent), finally(0), finishTryArgs(0), type(t) {}
+ ScopeAndFinally(ScopeAndFinally *parent, AST::Finally *finally, V4IR::ExprList *finishTryArgs)
+ : parent(parent), finally(finally), finishTryArgs(finishTryArgs), type(TryScope)
+ {}
+ };
+
+ struct Loop {
+ AST::LabelledStatement *labelledStatement;
+ AST::Statement *node;
+ V4IR::BasicBlock *breakBlock;
+ V4IR::BasicBlock *continueBlock;
+ Loop *parent;
+ ScopeAndFinally *scopeAndFinally;
+
+ Loop(AST::Statement *node, V4IR::BasicBlock *breakBlock, V4IR::BasicBlock *continueBlock, Loop *parent)
+ : labelledStatement(0), node(node), breakBlock(breakBlock), continueBlock(continueBlock), parent(parent) {}
+ };
+
+ void enterEnvironment(AST::Node *node);
+ void leaveEnvironment();
+
+ void enterLoop(AST::Statement *node, V4IR::BasicBlock *breakBlock, V4IR::BasicBlock *continueBlock);
+ void leaveLoop();
+
+
+ V4IR::Expr *member(V4IR::Expr *base, const QString *name);
+ V4IR::Expr *subscript(V4IR::Expr *base, V4IR::Expr *index);
+ V4IR::Expr *argument(V4IR::Expr *expr);
+ V4IR::Expr *reference(V4IR::Expr *expr);
+ V4IR::Expr *unop(V4IR::AluOp op, V4IR::Expr *expr);
+ V4IR::Expr *binop(V4IR::AluOp op, V4IR::Expr *left, V4IR::Expr *right);
+ V4IR::Expr *call(V4IR::Expr *base, V4IR::ExprList *args);
+ void move(V4IR::Expr *target, V4IR::Expr *source, V4IR::AluOp op = V4IR::OpInvalid);
+ void cjump(V4IR::Expr *cond, V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse);
+
+ void linearize(V4IR::Function *function);
+ V4IR::Function *defineFunction(const QString &name, AST::Node *ast,
+ AST::FormalParameterList *formals,
+ AST::SourceElements *body,
+ Mode mode = FunctionCode,
+ const QStringList &inheritedLocals = QStringList());
+
+ void unwindException(ScopeAndFinally *outest);
+
+ void statement(AST::Statement *ast);
+ void statement(AST::ExpressionNode *ast);
+ void condition(AST::ExpressionNode *ast, V4IR::BasicBlock *iftrue, V4IR::BasicBlock *iffalse);
+ Result expression(AST::ExpressionNode *ast);
+ QString propertyName(AST::PropertyName *ast);
+ Result sourceElement(AST::SourceElement *ast);
+ UiMember uiObjectMember(AST::UiObjectMember *ast);
+
+ void accept(AST::Node *node);
+
+ void functionBody(AST::FunctionBody *ast);
+ void program(AST::Program *ast);
+ void sourceElements(AST::SourceElements *ast);
+ void variableDeclaration(AST::VariableDeclaration *ast);
+ void variableDeclarationList(AST::VariableDeclarationList *ast);
+
+ V4IR::Expr *identifier(const QString &name, int line = 0, int col = 0);
+
+ // nodes
+ virtual bool visit(AST::ArgumentList *ast);
+ virtual bool visit(AST::CaseBlock *ast);
+ virtual bool visit(AST::CaseClause *ast);
+ virtual bool visit(AST::CaseClauses *ast);
+ virtual bool visit(AST::Catch *ast);
+ virtual bool visit(AST::DefaultClause *ast);
+ virtual bool visit(AST::ElementList *ast);
+ virtual bool visit(AST::Elision *ast);
+ virtual bool visit(AST::Finally *ast);
+ virtual bool visit(AST::FormalParameterList *ast);
+ virtual bool visit(AST::FunctionBody *ast);
+ virtual bool visit(AST::Program *ast);
+ virtual bool visit(AST::PropertyNameAndValue *ast);
+ virtual bool visit(AST::PropertyAssignmentList *ast);
+ virtual bool visit(AST::PropertyGetterSetter *ast);
+ virtual bool visit(AST::SourceElements *ast);
+ virtual bool visit(AST::StatementList *ast);
+ virtual bool visit(AST::UiArrayMemberList *ast);
+ virtual bool visit(AST::UiImport *ast);
+ virtual bool visit(AST::UiImportList *ast);
+ virtual bool visit(AST::UiObjectInitializer *ast);
+ virtual bool visit(AST::UiObjectMemberList *ast);
+ virtual bool visit(AST::UiParameterList *ast);
+ virtual bool visit(AST::UiProgram *ast);
+ virtual bool visit(AST::UiQualifiedId *ast);
+ virtual bool visit(AST::VariableDeclaration *ast);
+ virtual bool visit(AST::VariableDeclarationList *ast);
+
+ // expressions
+ virtual bool visit(AST::Expression *ast);
+ virtual bool visit(AST::ArrayLiteral *ast);
+ virtual bool visit(AST::ArrayMemberExpression *ast);
+ virtual bool visit(AST::BinaryExpression *ast);
+ virtual bool visit(AST::CallExpression *ast);
+ virtual bool visit(AST::ConditionalExpression *ast);
+ virtual bool visit(AST::DeleteExpression *ast);
+ virtual bool visit(AST::FalseLiteral *ast);
+ virtual bool visit(AST::FieldMemberExpression *ast);
+ virtual bool visit(AST::FunctionExpression *ast);
+ virtual bool visit(AST::IdentifierExpression *ast);
+ virtual bool visit(AST::NestedExpression *ast);
+ virtual bool visit(AST::NewExpression *ast);
+ virtual bool visit(AST::NewMemberExpression *ast);
+ virtual bool visit(AST::NotExpression *ast);
+ virtual bool visit(AST::NullExpression *ast);
+ virtual bool visit(AST::NumericLiteral *ast);
+ virtual bool visit(AST::ObjectLiteral *ast);
+ virtual bool visit(AST::PostDecrementExpression *ast);
+ virtual bool visit(AST::PostIncrementExpression *ast);
+ virtual bool visit(AST::PreDecrementExpression *ast);
+ virtual bool visit(AST::PreIncrementExpression *ast);
+ virtual bool visit(AST::RegExpLiteral *ast);
+ virtual bool visit(AST::StringLiteral *ast);
+ virtual bool visit(AST::ThisExpression *ast);
+ virtual bool visit(AST::TildeExpression *ast);
+ virtual bool visit(AST::TrueLiteral *ast);
+ virtual bool visit(AST::TypeOfExpression *ast);
+ virtual bool visit(AST::UnaryMinusExpression *ast);
+ virtual bool visit(AST::UnaryPlusExpression *ast);
+ virtual bool visit(AST::VoidExpression *ast);
+ virtual bool visit(AST::FunctionDeclaration *ast);
+
+ // property names
+ virtual bool visit(AST::IdentifierPropertyName *ast);
+ virtual bool visit(AST::NumericLiteralPropertyName *ast);
+ virtual bool visit(AST::StringLiteralPropertyName *ast);
+
+ // source elements
+ virtual bool visit(AST::FunctionSourceElement *ast);
+ virtual bool visit(AST::StatementSourceElement *ast);
+
+ // statements
+ virtual bool visit(AST::Block *ast);
+ virtual bool visit(AST::BreakStatement *ast);
+ virtual bool visit(AST::ContinueStatement *ast);
+ virtual bool visit(AST::DebuggerStatement *ast);
+ virtual bool visit(AST::DoWhileStatement *ast);
+ virtual bool visit(AST::EmptyStatement *ast);
+ virtual bool visit(AST::ExpressionStatement *ast);
+ virtual bool visit(AST::ForEachStatement *ast);
+ virtual bool visit(AST::ForStatement *ast);
+ virtual bool visit(AST::IfStatement *ast);
+ virtual bool visit(AST::LabelledStatement *ast);
+ virtual bool visit(AST::LocalForEachStatement *ast);
+ virtual bool visit(AST::LocalForStatement *ast);
+ virtual bool visit(AST::ReturnStatement *ast);
+ virtual bool visit(AST::SwitchStatement *ast);
+ virtual bool visit(AST::ThrowStatement *ast);
+ virtual bool visit(AST::TryStatement *ast);
+ virtual bool visit(AST::VariableStatement *ast);
+ virtual bool visit(AST::WhileStatement *ast);
+ virtual bool visit(AST::WithStatement *ast);
+
+ // ui object members
+ virtual bool visit(AST::UiArrayBinding *ast);
+ virtual bool visit(AST::UiObjectBinding *ast);
+ virtual bool visit(AST::UiObjectDefinition *ast);
+ virtual bool visit(AST::UiPublicMember *ast);
+ virtual bool visit(AST::UiScriptBinding *ast);
+ virtual bool visit(AST::UiSourceElement *ast);
+
+ void throwSyntaxErrorOnEvalOrArgumentsInStrictMode(V4IR::Expr* expr, const AST::SourceLocation &loc);
+
+ void throwSyntaxError(const AST::SourceLocation &loc, const QString &detail);
+ void throwReferenceError(const AST::SourceLocation &loc, const QString &detail);
+
+private:
+ QString _fileName;
+ Result _expr;
+ QString _property;
+ UiMember _uiMember;
+ V4IR::Module *_module;
+ V4IR::Function *_function;
+ V4IR::BasicBlock *_block;
+ V4IR::BasicBlock *_exitBlock;
+ V4IR::BasicBlock *_throwBlock;
+ unsigned _returnAddress;
+ Mode _mode;
+ Environment *_env;
+ Loop *_loop;
+ AST::LabelledStatement *_labelledStatement;
+ ScopeAndFinally *_scopeAndFinally;
+ QHash<AST::Node *, Environment *> _envMap;
+ QHash<AST::FunctionExpression *, int> _functionMap;
+ VM::ExecutionContext *_context;
+ bool _strictMode;
+ Debugging::Debugger *_debugger;
+ ErrorHandler *_errorHandler;
+
+ class ScanFunctions;
+};
+
+} // end of namespace QQmlJS
+
+#endif // QV4CODEGEN_P_H
diff --git a/src/qml/qml/v4vm/qv4context.cpp b/src/qml/qml/v4vm/qv4context.cpp
new file mode 100644
index 0000000000..e1ce0016dd
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4context.cpp
@@ -0,0 +1,576 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QString>
+#include "debugging.h"
+#include <qv4context.h>
+#include <qv4object.h>
+#include <qv4objectproto.h>
+#include "qv4mm.h"
+#include <qv4argumentsobject.h>
+
+namespace QQmlJS {
+namespace VM {
+
+DiagnosticMessage::DiagnosticMessage()
+ : offset(0)
+ , length(0)
+ , startLine(0)
+ , startColumn(0)
+ , type(0)
+ , next(0)
+{}
+
+DiagnosticMessage::~DiagnosticMessage()
+{
+ delete next;
+}
+
+String *DiagnosticMessage::buildFullMessage(ExecutionContext *ctx) const
+{
+ QString msg;
+ if (!fileName.isEmpty())
+ msg = fileName + QLatin1Char(':');
+ msg += QString::number(startLine) + QLatin1Char(':') + QString::number(startColumn) + QLatin1String(": ");
+ if (type == QQmlJS::VM::DiagnosticMessage::Error)
+ msg += QLatin1String("error");
+ else
+ msg += QLatin1String("warning");
+ msg += ": " + message;
+
+ return ctx->engine->newString(msg);
+}
+
+void ExecutionContext::createMutableBinding(String *name, bool deletable)
+{
+
+ // find the right context to create the binding on
+ Object *activation = engine->globalObject;
+ ExecutionContext *ctx = this;
+ while (ctx) {
+ if (ctx->type >= Type_CallContext) {
+ CallContext *c = static_cast<CallContext *>(ctx);
+ if (!c->activation)
+ c->activation = engine->newObject();
+ activation = c->activation;
+ break;
+ }
+ ctx = ctx->outer;
+ }
+
+ if (activation->__hasProperty__(name))
+ return;
+ Property desc = Property::fromValue(Value::undefinedValue());
+ PropertyAttributes attrs(Attr_Data);
+ attrs.setConfigurable(deletable);
+ activation->__defineOwnProperty__(this, name, desc, attrs);
+}
+
+String * const *ExecutionContext::formals() const
+{
+ return type >= Type_CallContext ? static_cast<const CallContext *>(this)->function->formalParameterList : 0;
+}
+
+unsigned int ExecutionContext::formalCount() const
+{
+ return type >= Type_CallContext ? static_cast<const CallContext *>(this)->function->formalParameterCount : 0;
+}
+
+String * const *ExecutionContext::variables() const
+{
+ return type >= Type_CallContext ? static_cast<const CallContext *>(this)->function->varList : 0;
+}
+
+unsigned int ExecutionContext::variableCount() const
+{
+ return type >= Type_CallContext ? static_cast<const CallContext *>(this)->function->varCount : 0;
+}
+
+
+void GlobalContext::init(ExecutionEngine *eng)
+{
+ type = Type_GlobalContext;
+ strictMode = false;
+ marked = false;
+ thisObject = Value::fromObject(eng->globalObject);
+ engine = eng;
+ outer = 0;
+ lookups = 0;
+ global = 0;
+}
+
+void WithContext::init(ExecutionContext *p, Object *with)
+{
+ type = Type_WithContext;
+ strictMode = false;
+ marked = false;
+ thisObject = p->thisObject;
+ engine = p->engine;
+ outer = p;
+ lookups = p->lookups;
+
+ withObject = with;
+}
+
+void CatchContext::init(ExecutionContext *p, String *exceptionVarName, const Value &exceptionValue)
+{
+ type = Type_CatchContext;
+ strictMode = p->strictMode;
+ marked = false;
+ thisObject = p->thisObject;
+ engine = p->engine;
+ outer = p;
+ lookups = p->lookups;
+
+ this->exceptionVarName = exceptionVarName;
+ this->exceptionValue = exceptionValue;
+}
+
+void CallContext::initCallContext(ExecutionEngine *engine)
+{
+ type = Type_CallContext;
+ strictMode = function->strictMode;
+ marked = false;
+ this->engine = engine;
+ outer = function->scope;
+#ifndef QT_NO_DEBUG
+ assert(outer->next != (ExecutionContext *)0x1);
+#endif
+
+ activation = 0;
+
+ if (function->function)
+ lookups = function->function->lookups;
+
+ uint argc = argumentCount;
+
+ locals = (Value *)(this + 1);
+ if (function->varCount)
+ std::fill(locals, locals + function->varCount, Value::undefinedValue());
+
+ if (needsOwnArguments()) {
+ Value *args = arguments;
+ argumentCount = qMax(argc, function->formalParameterCount);
+ arguments = locals + function->varCount;
+ if (argc)
+ ::memcpy(arguments, args, argc * sizeof(Value));
+ if (argc < function->formalParameterCount)
+ std::fill(arguments + argc, arguments + function->formalParameterCount, Value::undefinedValue());
+
+ }
+
+ if (function->usesArgumentsObject) {
+ ArgumentsObject *args = new (engine->memoryManager) ArgumentsObject(this, function->formalParameterCount, argc);
+ args->prototype = engine->objectPrototype;
+ Value arguments = Value::fromObject(args);
+ activation = engine->newObject();
+ Property desc = Property::fromValue(Value::fromObject(args));
+ activation->__defineOwnProperty__(this, engine->id_arguments, desc, Attr_NotConfigurable);
+ }
+}
+
+
+bool ExecutionContext::deleteProperty(String *name)
+{
+ bool hasWith = false;
+ for (ExecutionContext *ctx = this; ctx; ctx = ctx->outer) {
+ if (ctx->type == Type_WithContext) {
+ hasWith = true;
+ WithContext *w = static_cast<WithContext *>(ctx);
+ if (w->withObject->__hasProperty__(name))
+ return w->withObject->deleteProperty(this, name);
+ } else if (ctx->type == Type_CatchContext) {
+ CatchContext *c = static_cast<CatchContext *>(ctx);
+ if (c->exceptionVarName->isEqualTo(name))
+ return false;
+ } else if (ctx->type >= Type_CallContext) {
+ CallContext *c = static_cast<CallContext *>(ctx);
+ FunctionObject *f = c->function;
+ if (f->needsActivation || hasWith) {
+ for (unsigned int i = 0; i < f->varCount; ++i)
+ if (f->varList[i]->isEqualTo(name))
+ return false;
+ for (int i = (int)f->formalParameterCount - 1; i >= 0; --i)
+ if (f->formalParameterList[i]->isEqualTo(name))
+ return false;
+ }
+ if (c->activation && c->activation->__hasProperty__(name))
+ return c->activation->deleteProperty(this, name);
+ } else if (ctx->type == Type_GlobalContext) {
+ GlobalContext *g = static_cast<GlobalContext *>(ctx);
+ if (g->global->__hasProperty__(name))
+ return g->global->deleteProperty(this, name);
+ }
+ }
+
+ if (strictMode)
+ throwSyntaxError(0);
+ return true;
+}
+
+bool CallContext::needsOwnArguments() const
+{
+ return function->needsActivation || argumentCount < function->formalParameterCount;
+}
+
+void ExecutionContext::mark()
+{
+ if (marked)
+ return;
+ marked = true;
+
+ if (type != Type_SimpleCallContext && outer)
+ outer->mark();
+
+ thisObject.mark();
+
+ if (type >= Type_SimpleCallContext) {
+ VM::CallContext *c = static_cast<CallContext *>(this);
+ for (unsigned arg = 0, lastArg = c->argumentCount; arg < lastArg; ++arg)
+ c->arguments[arg].mark();
+ if (type >= Type_CallContext) {
+ for (unsigned local = 0, lastLocal = c->variableCount(); local < lastLocal; ++local)
+ c->locals[local].mark();
+ if (c->activation)
+ c->activation->mark();
+ c->function->mark();
+ }
+ } else if (type == Type_WithContext) {
+ WithContext *w = static_cast<WithContext *>(this);
+ w->withObject->mark();
+ } else if (type == Type_CatchContext) {
+ CatchContext *c = static_cast<CatchContext *>(this);
+ if (c->exceptionVarName)
+ c->exceptionVarName->mark();
+ c->exceptionValue.mark();
+ } else if (type == Type_GlobalContext) {
+ GlobalContext *g = static_cast<GlobalContext *>(this);
+ g->global->mark();
+ }
+}
+
+void ExecutionContext::setProperty(String *name, const Value& value)
+{
+ for (ExecutionContext *ctx = this; ctx; ctx = ctx->outer) {
+ if (ctx->type == Type_WithContext) {
+ Object *w = static_cast<WithContext *>(ctx)->withObject;
+ if (w->__hasProperty__(name)) {
+ w->put(ctx, name, value);
+ return;
+ }
+ } else if (ctx->type == Type_CatchContext && static_cast<CatchContext *>(ctx)->exceptionVarName->isEqualTo(name)) {
+ static_cast<CatchContext *>(ctx)->exceptionValue = value;
+ return;
+ } else {
+ Object *activation = 0;
+ if (ctx->type >= Type_CallContext) {
+ CallContext *c = static_cast<CallContext *>(ctx);
+ for (unsigned int i = 0; i < c->function->varCount; ++i)
+ if (c->function->varList[i]->isEqualTo(name)) {
+ c->locals[i] = value;
+ return;
+ }
+ for (int i = (int)c->function->formalParameterCount - 1; i >= 0; --i)
+ if (c->function->formalParameterList[i]->isEqualTo(name)) {
+ c->arguments[i] = value;
+ return;
+ }
+ activation = c->activation;
+ } else if (ctx->type == Type_GlobalContext) {
+ activation = static_cast<GlobalContext *>(ctx)->global;
+ }
+
+ if (activation && (ctx->type == Type_QmlContext || activation->__hasProperty__(name))) {
+ activation->put(this, name, value);
+ return;
+ }
+ }
+ }
+ if (strictMode || name->isEqualTo(engine->id_this))
+ throwReferenceError(Value::fromString(name));
+ engine->globalObject->put(this, name, value);
+}
+
+Value ExecutionContext::getProperty(String *name)
+{
+ name->makeIdentifier(this);
+
+ if (name->isEqualTo(engine->id_this))
+ return thisObject;
+
+ bool hasWith = false;
+ bool hasCatchScope = false;
+ for (ExecutionContext *ctx = this; ctx; ctx = ctx->outer) {
+ if (ctx->type == Type_WithContext) {
+ Object *w = static_cast<WithContext *>(ctx)->withObject;
+ hasWith = true;
+ bool hasProperty = false;
+ Value v = w->get(ctx, name, &hasProperty);
+ if (hasProperty) {
+ return v;
+ }
+ continue;
+ }
+
+ else if (ctx->type == Type_CatchContext) {
+ hasCatchScope = true;
+ CatchContext *c = static_cast<CatchContext *>(ctx);
+ if (c->exceptionVarName->isEqualTo(name))
+ return c->exceptionValue;
+ }
+
+ else if (ctx->type >= Type_CallContext) {
+ VM::CallContext *c = static_cast<CallContext *>(ctx);
+ FunctionObject *f = c->function;
+ if (f->needsActivation || hasWith || hasCatchScope) {
+ for (unsigned int i = 0; i < f->varCount; ++i)
+ if (f->varList[i]->isEqualTo(name))
+ return c->locals[i];
+ for (int i = (int)f->formalParameterCount - 1; i >= 0; --i)
+ if (f->formalParameterList[i]->isEqualTo(name))
+ return c->arguments[i];
+ }
+ if (c->activation) {
+ bool hasProperty = false;
+ Value v = c->activation->get(c, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ if (f->function && f->function->isNamedExpression
+ && name->isEqualTo(f->function->name))
+ return Value::fromObject(c->function);
+ }
+
+ else if (ctx->type == Type_GlobalContext) {
+ GlobalContext *g = static_cast<GlobalContext *>(ctx);
+ bool hasProperty = false;
+ Value v = g->global->get(g, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ }
+ throwReferenceError(Value::fromString(name));
+ return Value::undefinedValue();
+}
+
+Value ExecutionContext::getPropertyNoThrow(String *name)
+{
+ name->makeIdentifier(this);
+
+ if (name->isEqualTo(engine->id_this))
+ return thisObject;
+
+ bool hasWith = false;
+ bool hasCatchScope = false;
+ for (ExecutionContext *ctx = this; ctx; ctx = ctx->outer) {
+ if (ctx->type == Type_WithContext) {
+ Object *w = static_cast<WithContext *>(ctx)->withObject;
+ hasWith = true;
+ bool hasProperty = false;
+ Value v = w->get(ctx, name, &hasProperty);
+ if (hasProperty) {
+ return v;
+ }
+ continue;
+ }
+
+ else if (ctx->type == Type_CatchContext) {
+ hasCatchScope = true;
+ CatchContext *c = static_cast<CatchContext *>(ctx);
+ if (c->exceptionVarName->isEqualTo(name))
+ return c->exceptionValue;
+ }
+
+ else if (ctx->type >= Type_CallContext) {
+ VM::CallContext *c = static_cast<CallContext *>(ctx);
+ FunctionObject *f = c->function;
+ if (f->needsActivation || hasWith || hasCatchScope) {
+ for (unsigned int i = 0; i < f->varCount; ++i)
+ if (f->varList[i]->isEqualTo(name))
+ return c->locals[i];
+ for (int i = (int)f->formalParameterCount - 1; i >= 0; --i)
+ if (f->formalParameterList[i]->isEqualTo(name))
+ return c->arguments[i];
+ }
+ if (c->activation) {
+ bool hasProperty = false;
+ Value v = c->activation->get(c, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ if (f->function && f->function->isNamedExpression
+ && name->isEqualTo(f->function->name))
+ return Value::fromObject(c->function);
+ }
+
+ else if (ctx->type == Type_GlobalContext) {
+ GlobalContext *g = static_cast<GlobalContext *>(ctx);
+ bool hasProperty = false;
+ Value v = g->global->get(g, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ }
+ return Value::undefinedValue();
+}
+
+Value ExecutionContext::getPropertyAndBase(String *name, Object **base)
+{
+ *base = 0;
+ name->makeIdentifier(this);
+
+ if (name->isEqualTo(engine->id_this))
+ return thisObject;
+
+ bool hasWith = false;
+ bool hasCatchScope = false;
+ for (ExecutionContext *ctx = this; ctx; ctx = ctx->outer) {
+ if (ctx->type == Type_WithContext) {
+ Object *w = static_cast<WithContext *>(ctx)->withObject;
+ hasWith = true;
+ bool hasProperty = false;
+ Value v = w->get(ctx, name, &hasProperty);
+ if (hasProperty) {
+ *base = w;
+ return v;
+ }
+ continue;
+ }
+
+ else if (ctx->type == Type_CatchContext) {
+ hasCatchScope = true;
+ CatchContext *c = static_cast<CatchContext *>(ctx);
+ if (c->exceptionVarName->isEqualTo(name))
+ return c->exceptionValue;
+ }
+
+ else if (ctx->type >= Type_CallContext) {
+ VM::CallContext *c = static_cast<CallContext *>(ctx);
+ FunctionObject *f = c->function;
+ if (f->needsActivation || hasWith || hasCatchScope) {
+ for (unsigned int i = 0; i < f->varCount; ++i)
+ if (f->varList[i]->isEqualTo(name))
+ return c->locals[i];
+ for (int i = (int)f->formalParameterCount - 1; i >= 0; --i)
+ if (f->formalParameterList[i]->isEqualTo(name))
+ return c->arguments[i];
+ }
+ if (c->activation) {
+ bool hasProperty = false;
+ Value v = c->activation->get(c, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ if (f->function && f->function->isNamedExpression
+ && name->isEqualTo(f->function->name))
+ return Value::fromObject(c->function);
+ }
+
+ else if (ctx->type == Type_GlobalContext) {
+ GlobalContext *g = static_cast<GlobalContext *>(ctx);
+ bool hasProperty = false;
+ Value v = g->global->get(g, name, &hasProperty);
+ if (hasProperty)
+ return v;
+ }
+ }
+ throwReferenceError(Value::fromString(name));
+ return Value::undefinedValue();
+}
+
+
+
+void ExecutionContext::inplaceBitOp(String *name, const Value &value, BinOp op)
+{
+ Value lhs = getProperty(name);
+ Value result;
+ op(this, &result, lhs, value);
+ setProperty(name, result);
+}
+
+void ExecutionContext::throwError(const Value &value)
+{
+ __qmljs_builtin_throw(this, value);
+}
+
+void ExecutionContext::throwError(const QString &message)
+{
+ Value v = Value::fromString(this, message);
+ throwError(Value::fromObject(engine->newErrorObject(v)));
+}
+
+void ExecutionContext::throwSyntaxError(DiagnosticMessage *message)
+{
+ throwError(Value::fromObject(engine->newSyntaxErrorObject(this, message)));
+}
+
+void ExecutionContext::throwTypeError()
+{
+ throwError(Value::fromObject(engine->newTypeErrorObject(this, QStringLiteral("Type error"))));
+}
+
+void ExecutionContext::throwUnimplemented(const QString &message)
+{
+ Value v = Value::fromString(this, QStringLiteral("Unimplemented ") + message);
+ throwError(Value::fromObject(engine->newErrorObject(v)));
+}
+
+void ExecutionContext::throwReferenceError(Value value)
+{
+ String *s = value.toString(this);
+ QString msg = s->toQString() + QStringLiteral(" is not defined");
+ throwError(Value::fromObject(engine->newReferenceErrorObject(this, msg)));
+}
+
+void ExecutionContext::throwRangeError(Value value)
+{
+ String *s = value.toString(this);
+ QString msg = s->toQString() + QStringLiteral(" out of range");
+ throwError(Value::fromObject(engine->newRangeErrorObject(this, msg)));
+}
+
+void ExecutionContext::throwURIError(Value msg)
+{
+ throwError(Value::fromObject(engine->newURIErrorObject(this, msg)));
+}
+
+} // namespace VM
+} // namespace QQmlJS
diff --git a/src/qml/qml/v4vm/qv4context.h b/src/qml/qml/v4vm/qv4context.h
new file mode 100644
index 0000000000..c26cc6bfc5
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4context.h
@@ -0,0 +1,194 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_ENVIRONMENT_H
+#define QMLJS_ENVIRONMENT_H
+
+#include "qv4global.h"
+#include <qv4runtime.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Value;
+struct Object;
+struct ExecutionEngine;
+struct DeclarativeEnvironment;
+struct Lookup;
+
+struct Q_V4_EXPORT DiagnosticMessage
+{
+ enum { Error, Warning };
+
+ QString fileName;
+ quint32 offset;
+ quint32 length;
+ quint32 startLine;
+ unsigned startColumn: 31;
+ unsigned type: 1;
+ QString message;
+ DiagnosticMessage *next;
+
+ DiagnosticMessage();
+ ~DiagnosticMessage();
+ String *buildFullMessage(ExecutionContext *ctx) const;
+};
+
+struct CallContext;
+
+struct ExecutionContext
+{
+ enum Type {
+ Type_GlobalContext = 0x1,
+ Type_CatchContext = 0x2,
+ Type_WithContext = 0x3,
+ Type_SimpleCallContext = 0x4,
+ Type_CallContext = 0x5,
+ Type_QmlContext = 0x6
+ };
+
+ Type type;
+ bool strictMode;
+ bool marked;
+
+ Value thisObject;
+
+ ExecutionEngine *engine;
+ ExecutionContext *parent;
+ ExecutionContext *outer;
+ Lookup *lookups;
+ ExecutionContext *next; // used in the GC
+
+ String * const *formals() const;
+ unsigned int formalCount() const;
+ String * const *variables() const;
+ unsigned int variableCount() const;
+
+ void createMutableBinding(String *name, bool deletable);
+
+ void Q_NORETURN throwError(const Value &value);
+ void Q_NORETURN throwError(const QString &message);
+ void Q_NORETURN throwSyntaxError(DiagnosticMessage *message);
+ void Q_NORETURN throwTypeError();
+ void Q_NORETURN throwReferenceError(Value value);
+ void Q_NORETURN throwRangeError(Value value);
+ void Q_NORETURN throwURIError(Value msg);
+ void Q_NORETURN throwUnimplemented(const QString &message);
+
+ void setProperty(String *name, const Value &value);
+ Value getProperty(String *name);
+ Value getPropertyNoThrow(String *name);
+ Value getPropertyAndBase(String *name, Object **base);
+ void inplaceBitOp(String *name, const QQmlJS::VM::Value &value, BinOp op);
+ bool deleteProperty(String *name);
+
+ inline Value argument(unsigned int index = 0);
+
+ void mark();
+
+ inline CallContext *asCallContext();
+};
+
+struct SimpleCallContext : public ExecutionContext
+{
+ FunctionObject *function;
+ Value *arguments;
+ unsigned int argumentCount;
+};
+
+struct CallContext : public SimpleCallContext
+{
+ void initCallContext(QQmlJS::VM::ExecutionEngine *engine);
+ bool needsOwnArguments() const;
+
+ Value *locals;
+ Object *activation;
+};
+
+struct GlobalContext : public ExecutionContext
+{
+ void init(ExecutionEngine *e);
+
+ Object *global;
+};
+
+struct CatchContext : public ExecutionContext
+{
+ void init(ExecutionContext *p, String *exceptionVarName, const QQmlJS::VM::Value &exceptionValue);
+
+ String *exceptionVarName;
+ Value exceptionValue;
+};
+
+struct WithContext : public ExecutionContext
+{
+ Object *withObject;
+
+ void init(ExecutionContext *p, Object *with);
+};
+
+inline Value ExecutionContext::argument(unsigned int index)
+{
+ if (type >= Type_SimpleCallContext) {
+ CallContext *ctx = static_cast<CallContext *>(this);
+ if (index < ctx->argumentCount)
+ return ctx->arguments[index];
+ }
+ return Value::undefinedValue();
+}
+
+inline CallContext *ExecutionContext::asCallContext()
+{
+ return type >= Type_CallContext ? static_cast<CallContext *>(this) : 0;
+}
+
+/* Function *f, int argc */
+#define requiredMemoryForExecutionContect(f, argc) \
+ sizeof(CallContext) + sizeof(Value) * (f->varCount + qMax((uint)argc, f->formalParameterCount))
+#define stackContextSize (sizeof(CallContext) + 32*sizeof(Value))
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4dateobject.cpp b/src/qml/qml/v4vm/qv4dateobject.cpp
new file mode 100644
index 0000000000..adeb11f862
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4dateobject.cpp
@@ -0,0 +1,1316 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+
+#include "qv4dateobject.h"
+#include "qv4objectproto.h"
+#include "qv4mm.h"
+#include <QtCore/qnumeric.h>
+#include <QtCore/qmath.h>
+#include <QtCore/QDateTime>
+#include <QtCore/QStringList>
+#include <QtCore/QDebug>
+#include <cmath>
+#include <qmath.h>
+#include <qnumeric.h>
+#include <cassert>
+#include <time.h>
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include <qv4isel_masm_p.h>
+
+#include <wtf/MathExtras.h>
+
+#ifdef Q_OS_WIN
+# include <windows.h>
+#else
+# ifndef Q_OS_VXWORKS
+# include <sys/time.h>
+# else
+# include "qplatformdefs.h"
+# endif
+#endif
+
+using namespace QQmlJS::VM;
+
+static const double HoursPerDay = 24.0;
+static const double MinutesPerHour = 60.0;
+static const double SecondsPerMinute = 60.0;
+static const double msPerSecond = 1000.0;
+static const double msPerMinute = 60000.0;
+static const double msPerHour = 3600000.0;
+static const double msPerDay = 86400000.0;
+
+static double LocalTZA = 0.0; // initialized at startup
+
+static inline double TimeWithinDay(double t)
+{
+ double r = ::fmod(t, msPerDay);
+ return (r >= 0) ? r : r + msPerDay;
+}
+
+static inline int HourFromTime(double t)
+{
+ int r = int(::fmod(::floor(t / msPerHour), HoursPerDay));
+ return (r >= 0) ? r : r + int(HoursPerDay);
+}
+
+static inline int MinFromTime(double t)
+{
+ int r = int(::fmod(::floor(t / msPerMinute), MinutesPerHour));
+ return (r >= 0) ? r : r + int(MinutesPerHour);
+}
+
+static inline int SecFromTime(double t)
+{
+ int r = int(::fmod(::floor(t / msPerSecond), SecondsPerMinute));
+ return (r >= 0) ? r : r + int(SecondsPerMinute);
+}
+
+static inline int msFromTime(double t)
+{
+ int r = int(::fmod(t, msPerSecond));
+ return (r >= 0) ? r : r + int(msPerSecond);
+}
+
+static inline double Day(double t)
+{
+ return ::floor(t / msPerDay);
+}
+
+static inline double DaysInYear(double y)
+{
+ if (::fmod(y, 4))
+ return 365;
+
+ else if (::fmod(y, 100))
+ return 366;
+
+ else if (::fmod(y, 400))
+ return 365;
+
+ return 366;
+}
+
+static inline double DayFromYear(double y)
+{
+ return 365 * (y - 1970)
+ + ::floor((y - 1969) / 4)
+ - ::floor((y - 1901) / 100)
+ + ::floor((y - 1601) / 400);
+}
+
+static inline double TimeFromYear(double y)
+{
+ return msPerDay * DayFromYear(y);
+}
+
+static inline double YearFromTime(double t)
+{
+ int y = 1970;
+ y += (int) ::floor(t / (msPerDay * 365.2425));
+
+ double t2 = TimeFromYear(y);
+ return (t2 > t) ? y - 1 : ((t2 + msPerDay * DaysInYear(y)) <= t) ? y + 1 : y;
+}
+
+static inline bool InLeapYear(double t)
+{
+ double x = DaysInYear(YearFromTime(t));
+ if (x == 365)
+ return 0;
+
+ assert(x == 366);
+ return 1;
+}
+
+static inline double DayWithinYear(double t)
+{
+ return Day(t) - DayFromYear(YearFromTime(t));
+}
+
+static inline double MonthFromTime(double t)
+{
+ double d = DayWithinYear(t);
+ double l = InLeapYear(t);
+
+ if (d < 31.0)
+ return 0;
+
+ else if (d < 59.0 + l)
+ return 1;
+
+ else if (d < 90.0 + l)
+ return 2;
+
+ else if (d < 120.0 + l)
+ return 3;
+
+ else if (d < 151.0 + l)
+ return 4;
+
+ else if (d < 181.0 + l)
+ return 5;
+
+ else if (d < 212.0 + l)
+ return 6;
+
+ else if (d < 243.0 + l)
+ return 7;
+
+ else if (d < 273.0 + l)
+ return 8;
+
+ else if (d < 304.0 + l)
+ return 9;
+
+ else if (d < 334.0 + l)
+ return 10;
+
+ else if (d < 365.0 + l)
+ return 11;
+
+ return qSNaN(); // ### assert?
+}
+
+static inline double DateFromTime(double t)
+{
+ int m = (int) Value::toInteger(MonthFromTime(t));
+ double d = DayWithinYear(t);
+ double l = InLeapYear(t);
+
+ switch (m) {
+ case 0: return d + 1.0;
+ case 1: return d - 30.0;
+ case 2: return d - 58.0 - l;
+ case 3: return d - 89.0 - l;
+ case 4: return d - 119.0 - l;
+ case 5: return d - 150.0 - l;
+ case 6: return d - 180.0 - l;
+ case 7: return d - 211.0 - l;
+ case 8: return d - 242.0 - l;
+ case 9: return d - 272.0 - l;
+ case 10: return d - 303.0 - l;
+ case 11: return d - 333.0 - l;
+ }
+
+ return qSNaN(); // ### assert
+}
+
+static inline double WeekDay(double t)
+{
+ double r = ::fmod (Day(t) + 4.0, 7.0);
+ return (r >= 0) ? r : r + 7.0;
+}
+
+
+static inline double MakeTime(double hour, double min, double sec, double ms)
+{
+ return ((hour * MinutesPerHour + min) * SecondsPerMinute + sec) * msPerSecond + ms;
+}
+
+static inline double DayFromMonth(double month, double leap)
+{
+ switch ((int) month) {
+ case 0: return 0;
+ case 1: return 31.0;
+ case 2: return 59.0 + leap;
+ case 3: return 90.0 + leap;
+ case 4: return 120.0 + leap;
+ case 5: return 151.0 + leap;
+ case 6: return 181.0 + leap;
+ case 7: return 212.0 + leap;
+ case 8: return 243.0 + leap;
+ case 9: return 273.0 + leap;
+ case 10: return 304.0 + leap;
+ case 11: return 334.0 + leap;
+ }
+
+ return qSNaN(); // ### assert?
+}
+
+static double MakeDay(double year, double month, double day)
+{
+ year += ::floor(month / 12.0);
+
+ month = ::fmod(month, 12.0);
+ if (month < 0)
+ month += 12.0;
+
+ double d = DayFromYear(year);
+ bool leap = InLeapYear(d*msPerDay);
+
+ d += DayFromMonth(month, leap);
+ d += day - 1;
+
+ return d;
+}
+
+static inline double MakeDate(double day, double time)
+{
+ return day * msPerDay + time;
+}
+
+static inline double DaylightSavingTA(double t)
+{
+ struct tm tmtm;
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ __time64_t tt = (__time64_t)(t / msPerSecond);
+ if (!_localtime64_s(&tmtm, &tt))
+#else
+ long int tt = (long int)(t / msPerSecond);
+ if (!localtime_r((const time_t*) &tt, &tmtm))
+#endif
+ return 0;
+ return (tmtm.tm_isdst > 0) ? msPerHour : 0;
+}
+
+static inline double LocalTime(double t)
+{
+ return t + LocalTZA + DaylightSavingTA(t);
+}
+
+static inline double UTC(double t)
+{
+ return t - LocalTZA - DaylightSavingTA(t - LocalTZA);
+}
+
+static inline double currentTime()
+{
+#ifndef Q_OS_WIN
+ struct timeval tv;
+
+ gettimeofday(&tv, 0);
+ return ::floor(tv.tv_sec * msPerSecond + (tv.tv_usec / 1000.0));
+#else
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ FILETIME ft;
+ SystemTimeToFileTime(&st, &ft);
+ LARGE_INTEGER li;
+ li.LowPart = ft.dwLowDateTime;
+ li.HighPart = ft.dwHighDateTime;
+ return double(li.QuadPart - Q_INT64_C(116444736000000000)) / 10000.0;
+#endif
+}
+
+static inline double TimeClip(double t)
+{
+ if (! qIsFinite(t) || fabs(t) > 8.64e15)
+ return qSNaN();
+ return Value::toInteger(t);
+}
+
+static inline double FromDateTime(const QDateTime &dt)
+{
+ if (!dt.isValid())
+ return qSNaN();
+ QDate date = dt.date();
+ QTime taim = dt.time();
+ int year = date.year();
+ int month = date.month() - 1;
+ int day = date.day();
+ int hours = taim.hour();
+ int mins = taim.minute();
+ int secs = taim.second();
+ int ms = taim.msec();
+ double t = MakeDate(MakeDay(year, month, day),
+ MakeTime(hours, mins, secs, ms));
+ if (dt.timeSpec() == Qt::LocalTime)
+ t = UTC(t);
+ return TimeClip(t);
+}
+
+static inline double ParseString(const QString &s)
+{
+ // first try the format defined in 15.9.1.15, only if that fails fall back to
+ // QDateTime for parsing
+
+ // the define string format is YYYY-MM-DDTHH:mm:ss.sssZ
+ // It can be date or time only, and the second and later components
+ // of both fields are optional
+ // and extended syntax for negative and large positive years exists: +/-YYYYYY
+
+ enum Format {
+ Year,
+ Month,
+ Day,
+ Hour,
+ Minute,
+ Second,
+ MilliSecond,
+ TimezoneHour,
+ TimezoneMinute,
+ Done
+ };
+
+ const QChar *ch = s.constData();
+ const QChar *end = ch + s.length();
+
+ uint format = Year;
+ int current = 0;
+ int currentSize = 0;
+ bool extendedYear = false;
+
+ int yearSign = 1;
+ int year = 0;
+ int month = 0;
+ int day = 1;
+ int hour = 0;
+ int minute = 0;
+ int second = 0;
+ int msec = 0;
+ int offsetSign = 1;
+ int offset = 0;
+
+ bool error = false;
+ if (*ch == '+' || *ch == '-') {
+ extendedYear = true;
+ if (*ch == '-')
+ yearSign = -1;
+ ++ch;
+ }
+ while (ch <= end) {
+ if (*ch >= '0' && *ch <= '9') {
+ current *= 10;
+ current += ch->unicode() - '0';
+ ++currentSize;
+ } else { // other char, delimits field
+ switch (format) {
+ case Year:
+ year = current;
+ if (extendedYear)
+ error = (currentSize != 6);
+ else
+ error = (currentSize != 4);
+ break;
+ case Month:
+ month = current - 1;
+ error = (currentSize != 2) || month > 11;
+ break;
+ case Day:
+ day = current;
+ error = (currentSize != 2) || day > 31;
+ break;
+ case Hour:
+ hour = current;
+ error = (currentSize != 2) || hour > 24;
+ break;
+ case Minute:
+ minute = current;
+ error = (currentSize != 2) || minute > 60;
+ break;
+ case Second:
+ second = current;
+ error = (currentSize != 2) || second > 60;
+ break;
+ case MilliSecond:
+ msec = current;
+ error = (currentSize != 3);
+ break;
+ case TimezoneHour:
+ offset = current*60;
+ error = (currentSize != 2) || offset > 23*60;
+ break;
+ case TimezoneMinute:
+ offset += current;
+ error = (currentSize != 2) || current >= 60;
+ break;
+ }
+ if (*ch == 'T') {
+ if (format >= Hour)
+ error = true;
+ format = Hour;
+ } else if (*ch == '-') {
+ if (format < Day)
+ ++format;
+ else if (format < Minute)
+ error = true;
+ else if (format >= TimezoneHour)
+ error = true;
+ else {
+ offsetSign = -1;
+ format = TimezoneHour;
+ }
+ } else if (*ch == ':') {
+ if (format != Hour && format != Minute && format != TimezoneHour)
+ error = true;
+ ++format;
+ } else if (*ch == '.') {
+ if (format != Second)
+ error = true;
+ ++format;
+ } else if (*ch == '+') {
+ if (format < Minute || format >= TimezoneHour)
+ error = true;
+ format = TimezoneHour;
+ } else if (*ch == 'Z' || *ch == 0) {
+ format = Done;
+ }
+ current = 0;
+ currentSize = 0;
+ }
+ if (error || format == Done)
+ break;
+ ++ch;
+ }
+
+ if (!error) {
+ double t = MakeDate(MakeDay(year * yearSign, month, day), MakeTime(hour, minute, second, msec));
+ t += offset * offsetSign * 60 * 1000;
+ return t;
+ }
+
+ QDateTime dt = QDateTime::fromString(s, Qt::TextDate);
+ if (!dt.isValid())
+ dt = QDateTime::fromString(s, Qt::ISODate);
+ if (!dt.isValid()) {
+ QStringList formats;
+ formats << QStringLiteral("M/d/yyyy")
+ << QStringLiteral("M/d/yyyy hh:mm")
+ << QStringLiteral("M/d/yyyy hh:mm A")
+
+ << QStringLiteral("M/d/yyyy, hh:mm")
+ << QStringLiteral("M/d/yyyy, hh:mm A")
+
+ << QStringLiteral("MMM d yyyy")
+ << QStringLiteral("MMM d yyyy hh:mm")
+ << QStringLiteral("MMM d yyyy hh:mm:ss")
+ << QStringLiteral("MMM d yyyy, hh:mm")
+ << QStringLiteral("MMM d yyyy, hh:mm:ss")
+
+ << QStringLiteral("MMMM d yyyy")
+ << QStringLiteral("MMMM d yyyy hh:mm")
+ << QStringLiteral("MMMM d yyyy hh:mm:ss")
+ << QStringLiteral("MMMM d yyyy, hh:mm")
+ << QStringLiteral("MMMM d yyyy, hh:mm:ss")
+
+ << QStringLiteral("MMM d, yyyy")
+ << QStringLiteral("MMM d, yyyy hh:mm")
+ << QStringLiteral("MMM d, yyyy hh:mm:ss")
+
+ << QStringLiteral("MMMM d, yyyy")
+ << QStringLiteral("MMMM d, yyyy hh:mm")
+ << QStringLiteral("MMMM d, yyyy hh:mm:ss")
+
+ << QStringLiteral("d MMM yyyy")
+ << QStringLiteral("d MMM yyyy hh:mm")
+ << QStringLiteral("d MMM yyyy hh:mm:ss")
+ << QStringLiteral("d MMM yyyy, hh:mm")
+ << QStringLiteral("d MMM yyyy, hh:mm:ss")
+
+ << QStringLiteral("d MMMM yyyy")
+ << QStringLiteral("d MMMM yyyy hh:mm")
+ << QStringLiteral("d MMMM yyyy hh:mm:ss")
+ << QStringLiteral("d MMMM yyyy, hh:mm")
+ << QStringLiteral("d MMMM yyyy, hh:mm:ss")
+
+ << QStringLiteral("d MMM, yyyy")
+ << QStringLiteral("d MMM, yyyy hh:mm")
+ << QStringLiteral("d MMM, yyyy hh:mm:ss")
+
+ << QStringLiteral("d MMMM, yyyy")
+ << QStringLiteral("d MMMM, yyyy hh:mm")
+ << QStringLiteral("d MMMM, yyyy hh:mm:ss");
+
+ for (int i = 0; i < formats.size(); ++i) {
+ dt = QDateTime::fromString(s, formats.at(i));
+ if (dt.isValid())
+ break;
+ }
+ }
+ return FromDateTime(dt);
+}
+
+/*!
+ \internal
+
+ Converts the ECMA Date value \tt (in UTC form) to QDateTime
+ according to \a spec.
+*/
+static inline QDateTime ToDateTime(double t, Qt::TimeSpec spec)
+{
+ if (isnan(t))
+ return QDateTime();
+ if (spec == Qt::LocalTime)
+ t = LocalTime(t);
+ int year = int(YearFromTime(t));
+ int month = int(MonthFromTime(t) + 1);
+ int day = int(DateFromTime(t));
+ int hours = HourFromTime(t);
+ int mins = MinFromTime(t);
+ int secs = SecFromTime(t);
+ int ms = msFromTime(t);
+ return QDateTime(QDate(year, month, day), QTime(hours, mins, secs, ms), spec);
+}
+
+static inline QString ToString(double t)
+{
+ if (isnan(t))
+ return QStringLiteral("Invalid Date");
+ QString str = ToDateTime(t, Qt::LocalTime).toString() + QStringLiteral(" GMT");
+ double tzoffset = LocalTZA + DaylightSavingTA(t);
+ if (tzoffset) {
+ int hours = static_cast<int>(::fabs(tzoffset) / 1000 / 60 / 60);
+ int mins = int(::fabs(tzoffset) / 1000 / 60) % 60;
+ str.append(QLatin1Char((tzoffset > 0) ? '+' : '-'));
+ if (hours < 10)
+ str.append(QLatin1Char('0'));
+ str.append(QString::number(hours));
+ if (mins < 10)
+ str.append(QLatin1Char('0'));
+ str.append(QString::number(mins));
+ }
+ return str;
+}
+
+static inline QString ToUTCString(double t)
+{
+ if (isnan(t))
+ return QStringLiteral("Invalid Date");
+ return ToDateTime(t, Qt::UTC).toString() + QStringLiteral(" GMT");
+}
+
+static inline QString ToDateString(double t)
+{
+ return ToDateTime(t, Qt::LocalTime).date().toString();
+}
+
+static inline QString ToTimeString(double t)
+{
+ return ToDateTime(t, Qt::LocalTime).time().toString();
+}
+
+static inline QString ToLocaleString(double t)
+{
+ return ToDateTime(t, Qt::LocalTime).toString(Qt::LocaleDate);
+}
+
+static inline QString ToLocaleDateString(double t)
+{
+ return ToDateTime(t, Qt::LocalTime).date().toString(Qt::LocaleDate);
+}
+
+static inline QString ToLocaleTimeString(double t)
+{
+ return ToDateTime(t, Qt::LocalTime).time().toString(Qt::LocaleDate);
+}
+
+static double getLocalTZA()
+{
+#ifndef Q_OS_WIN
+ struct tm t;
+ time_t curr;
+ time(&curr);
+ localtime_r(&curr, &t);
+ time_t locl = mktime(&t);
+ gmtime_r(&curr, &t);
+ time_t globl = mktime(&t);
+ return double(locl - globl) * 1000.0;
+#else
+ TIME_ZONE_INFORMATION tzInfo;
+ GetTimeZoneInformation(&tzInfo);
+ return -tzInfo.Bias * 60.0 * 1000.0;
+#endif
+}
+
+DEFINE_MANAGED_VTABLE(DateCtor);
+
+DateCtor::DateCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value DateCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ double t = 0;
+
+ if (argc == 0)
+ t = currentTime();
+
+ else if (argc == 1) {
+ Value arg = args[0];
+ if (DateObject *d = arg.asDateObject())
+ arg = d->value;
+ else
+ arg = __qmljs_to_primitive(arg, PREFERREDTYPE_HINT);
+
+ if (arg.isString())
+ t = ParseString(arg.stringValue()->toQString());
+ else
+ t = TimeClip(arg.toNumber());
+ }
+
+ else { // argc > 1
+ double year = args[0].toNumber();
+ double month = args[1].toNumber();
+ double day = argc >= 3 ? args[2].toNumber() : 1;
+ double hours = argc >= 4 ? args[3].toNumber() : 0;
+ double mins = argc >= 5 ? args[4].toNumber() : 0;
+ double secs = argc >= 6 ? args[5].toNumber() : 0;
+ double ms = argc >= 7 ? args[6].toNumber() : 0;
+ if (year >= 0 && year <= 99)
+ year += 1900;
+ t = MakeDate(MakeDay(year, month, day), MakeTime(hours, mins, secs, ms));
+ t = TimeClip(UTC(t));
+ }
+
+ Object *d = ctx->engine->newDateObject(Value::fromDouble(t));
+ return Value::fromObject(d);
+}
+
+Value DateCtor::call(Managed *, ExecutionContext *ctx, const Value &, Value *, int)
+{
+ double t = currentTime();
+ return Value::fromString(ctx, ToString(t));
+}
+
+void DatePrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(7));
+ LocalTZA = getLocalTZA();
+
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("parse"), method_parse, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("UTC"), method_UTC, 7);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("now"), method_now, 0);
+
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toDateString"), method_toDateString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toTimeString"), method_toTimeString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleString"), method_toLocaleString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleDateString"), method_toLocaleDateString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleTimeString"), method_toLocaleTimeString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("valueOf"), method_valueOf, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getTime"), method_getTime, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getYear"), method_getYear, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getFullYear"), method_getFullYear, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCFullYear"), method_getUTCFullYear, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getMonth"), method_getMonth, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCMonth"), method_getUTCMonth, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getDate"), method_getDate, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCDate"), method_getUTCDate, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getDay"), method_getDay, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCDay"), method_getUTCDay, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getHours"), method_getHours, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCHours"), method_getUTCHours, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getMinutes"), method_getMinutes, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCMinutes"), method_getUTCMinutes, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getSeconds"), method_getSeconds, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCSeconds"), method_getUTCSeconds, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getMilliseconds"), method_getMilliseconds, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getUTCMilliseconds"), method_getUTCMilliseconds, 0);
+ defineDefaultProperty(ctx, QStringLiteral("getTimezoneOffset"), method_getTimezoneOffset, 0);
+ defineDefaultProperty(ctx, QStringLiteral("setTime"), method_setTime, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setMilliseconds"), method_setMilliseconds, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCMilliseconds"), method_setUTCMilliseconds, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setSeconds"), method_setSeconds, 2);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCSeconds"), method_setUTCSeconds, 2);
+ defineDefaultProperty(ctx, QStringLiteral("setMinutes"), method_setMinutes, 3);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCMinutes"), method_setUTCMinutes, 3);
+ defineDefaultProperty(ctx, QStringLiteral("setHours"), method_setHours, 4);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCHours"), method_setUTCHours, 4);
+ defineDefaultProperty(ctx, QStringLiteral("setDate"), method_setDate, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCDate"), method_setUTCDate, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setMonth"), method_setMonth, 2);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCMonth"), method_setUTCMonth, 2);
+ defineDefaultProperty(ctx, QStringLiteral("setYear"), method_setYear, 1);
+ defineDefaultProperty(ctx, QStringLiteral("setFullYear"), method_setFullYear, 3);
+ defineDefaultProperty(ctx, QStringLiteral("setUTCFullYear"), method_setUTCFullYear, 3);
+ defineDefaultProperty(ctx, QStringLiteral("toUTCString"), method_toUTCString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toGMTString"), method_toUTCString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toISOString"), method_toISOString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toJSON"), method_toJSON, 1);
+}
+
+double DatePrototype::getThisDate(ExecutionContext *ctx)
+{
+ if (DateObject *thisObject = ctx->thisObject.asDateObject())
+ return thisObject->value.asDouble();
+ else {
+ ctx->throwTypeError();
+ return 0;
+ }
+}
+
+Value DatePrototype::method_parse(SimpleCallContext *ctx)
+{
+ return Value::fromDouble(ParseString(ctx->argument(0).toString(ctx)->toQString()));
+}
+
+Value DatePrototype::method_UTC(SimpleCallContext *ctx)
+{
+ const int numArgs = ctx->argumentCount;
+ if (numArgs >= 2) {
+ double year = ctx->argument(0).toNumber();
+ double month = ctx->argument(1).toNumber();
+ double day = numArgs >= 3 ? ctx->argument(2).toNumber() : 1;
+ double hours = numArgs >= 4 ? ctx->argument(3).toNumber() : 0;
+ double mins = numArgs >= 5 ? ctx->argument(4).toNumber() : 0;
+ double secs = numArgs >= 6 ? ctx->argument(5).toNumber() : 0;
+ double ms = numArgs >= 7 ? ctx->argument(6).toNumber() : 0;
+ if (year >= 0 && year <= 99)
+ year += 1900;
+ double t = MakeDate(MakeDay(year, month, day),
+ MakeTime(hours, mins, secs, ms));
+ return Value::fromDouble(TimeClip(t));
+ }
+ return Value::undefinedValue();
+}
+
+Value DatePrototype::method_now(SimpleCallContext *ctx)
+{
+ Q_UNUSED(ctx);
+ double t = currentTime();
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_toString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToString(t));
+}
+
+Value DatePrototype::method_toDateString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToDateString(t));
+}
+
+Value DatePrototype::method_toTimeString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToTimeString(t));
+}
+
+Value DatePrototype::method_toLocaleString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToLocaleString(t));
+}
+
+Value DatePrototype::method_toLocaleDateString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToLocaleDateString(t));
+}
+
+Value DatePrototype::method_toLocaleTimeString(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromString(ctx, ToLocaleTimeString(t));
+}
+
+Value DatePrototype::method_valueOf(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getTime(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getYear(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = YearFromTime(LocalTime(t)) - 1900;
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getFullYear(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = YearFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCFullYear(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = YearFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getMonth(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = MonthFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCMonth(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = MonthFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getDate(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = DateFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCDate(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = DateFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getDay(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = WeekDay(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCDay(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = WeekDay(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getHours(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = HourFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCHours(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = HourFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getMinutes(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = MinFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCMinutes(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = MinFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getSeconds(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = SecFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCSeconds(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = SecFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getMilliseconds(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = msFromTime(LocalTime(t));
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getUTCMilliseconds(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = msFromTime(t);
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_getTimezoneOffset(SimpleCallContext *ctx)
+{
+ double t = getThisDate(ctx);
+ if (! isnan(t))
+ t = (t - LocalTime(t)) / msPerMinute;
+ return Value::fromDouble(t);
+}
+
+Value DatePrototype::method_setTime(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ self->value.setDouble(TimeClip(ctx->argument(0).toNumber()));
+ return self->value;
+}
+
+Value DatePrototype::method_setMilliseconds(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double ms = ctx->argument(0).toNumber();
+ self->value.setDouble(TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms)))));
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCMilliseconds(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double ms = ctx->argument(0).toNumber();
+ self->value.setDouble(TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms)))));
+ return self->value;
+}
+
+Value DatePrototype::method_setSeconds(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double sec = ctx->argument(0).toNumber();
+ double ms = (ctx->argumentCount < 2) ? msFromTime(t) : ctx->argument(1).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), MinFromTime(t), sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCSeconds(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double sec = ctx->argument(0).toNumber();
+ double ms = (ctx->argumentCount < 2) ? msFromTime(t) : ctx->argument(1).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), MinFromTime(t), sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setMinutes(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double min = ctx->argument(0).toNumber();
+ double sec = (ctx->argumentCount < 2) ? SecFromTime(t) : ctx->argument(1).toNumber();
+ double ms = (ctx->argumentCount < 3) ? msFromTime(t) : ctx->argument(2).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), min, sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCMinutes(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double min = ctx->argument(0).toNumber();
+ double sec = (ctx->argumentCount < 2) ? SecFromTime(t) : ctx->argument(1).toNumber();
+ double ms = (ctx->argumentCount < 3) ? msFromTime(t) : ctx->argument(2).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(HourFromTime(t), min, sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setHours(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double hour = ctx->argument(0).toNumber();
+ double min = (ctx->argumentCount < 2) ? MinFromTime(t) : ctx->argument(1).toNumber();
+ double sec = (ctx->argumentCount < 3) ? SecFromTime(t) : ctx->argument(2).toNumber();
+ double ms = (ctx->argumentCount < 4) ? msFromTime(t) : ctx->argument(3).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(hour, min, sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCHours(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double hour = ctx->argument(0).toNumber();
+ double min = (ctx->argumentCount < 2) ? MinFromTime(t) : ctx->argument(1).toNumber();
+ double sec = (ctx->argumentCount < 3) ? SecFromTime(t) : ctx->argument(2).toNumber();
+ double ms = (ctx->argumentCount < 4) ? msFromTime(t) : ctx->argument(3).toNumber();
+ t = TimeClip(UTC(MakeDate(Day(t), MakeTime(hour, min, sec, ms))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setDate(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double date = ctx->argument(0).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(YearFromTime(t), MonthFromTime(t), date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCDate(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double date = ctx->argument(0).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(YearFromTime(t), MonthFromTime(t), date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setMonth(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ double month = ctx->argument(0).toNumber();
+ double date = (ctx->argumentCount < 2) ? DateFromTime(t) : ctx->argument(1).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(YearFromTime(t), month, date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCMonth(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double month = ctx->argument(0).toNumber();
+ double date = (ctx->argumentCount < 2) ? DateFromTime(t) : ctx->argument(1).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(YearFromTime(t), month, date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setYear(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ if (isnan(t))
+ t = 0;
+ else
+ t = LocalTime(t);
+ double year = ctx->argument(0).toNumber();
+ double r;
+ if (isnan(year)) {
+ r = qSNaN();
+ } else {
+ if ((Value::toInteger(year) >= 0) && (Value::toInteger(year) <= 99))
+ year += 1900;
+ r = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ r = UTC(MakeDate(r, TimeWithinDay(t)));
+ r = TimeClip(r);
+ }
+ self->value.setDouble(r);
+ return self->value;
+}
+
+Value DatePrototype::method_setUTCFullYear(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ double year = ctx->argument(0).toNumber();
+ double month = (ctx->argumentCount < 2) ? MonthFromTime(t) : ctx->argument(1).toNumber();
+ double date = (ctx->argumentCount < 3) ? DateFromTime(t) : ctx->argument(2).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(year, month, date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_setFullYear(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = LocalTime(self->value.asDouble());
+ if (isnan(t))
+ t = 0;
+ double year = ctx->argument(0).toNumber();
+ double month = (ctx->argumentCount < 2) ? MonthFromTime(t) : ctx->argument(1).toNumber();
+ double date = (ctx->argumentCount < 3) ? DateFromTime(t) : ctx->argument(2).toNumber();
+ t = TimeClip(UTC(MakeDate(MakeDay(year, month, date), TimeWithinDay(t))));
+ self->value.setDouble(t);
+ return self->value;
+}
+
+Value DatePrototype::method_toUTCString(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ return Value::fromString(ctx, ToUTCString(t));
+}
+
+static void addZeroPrefixedInt(QString &str, int num, int nDigits)
+{
+ str.resize(str.size() + nDigits);
+
+ QChar *c = str.data() + str.size() - 1;
+ while (nDigits) {
+ *c = QChar(num % 10 + '0');
+ num /= 10;
+ --c;
+ --nDigits;
+ }
+}
+
+Value DatePrototype::method_toISOString(SimpleCallContext *ctx)
+{
+ DateObject *self = ctx->thisObject.asDateObject();
+ if (!self)
+ ctx->throwTypeError();
+
+ double t = self->value.asDouble();
+ if (!std::isfinite(t))
+ ctx->throwRangeError(ctx->thisObject);
+
+ QString result;
+ int year = (int)YearFromTime(t);
+ if (year < 0 || year > 9999) {
+ if (qAbs(year) >= 1000000)
+ return Value::fromString(ctx, QStringLiteral("Invalid Date"));
+ result += year < 0 ? '-' : '+';
+ year = qAbs(year);
+ addZeroPrefixedInt(result, year, 6);
+ } else {
+ addZeroPrefixedInt(result, year, 4);
+ }
+ result += '-';
+ addZeroPrefixedInt(result, (int)MonthFromTime(t) + 1, 2);
+ result += '-';
+ addZeroPrefixedInt(result, (int)DateFromTime(t), 2);
+ result += 'T';
+ addZeroPrefixedInt(result, HourFromTime(t), 2);
+ result += ':';
+ addZeroPrefixedInt(result, MinFromTime(t), 2);
+ result += ':';
+ addZeroPrefixedInt(result, SecFromTime(t), 2);
+ result += '.';
+ addZeroPrefixedInt(result, msFromTime(t), 3);
+ result += 'Z';
+
+ return Value::fromString(ctx, result);
+}
+
+Value DatePrototype::method_toJSON(SimpleCallContext *ctx)
+{
+ Value O = __qmljs_to_object(ctx, ctx->thisObject);
+ Value tv = __qmljs_to_primitive(O, NUMBER_HINT);
+
+ if (tv.isNumber() && !std::isfinite(tv.toNumber()))
+ return Value::nullValue();
+
+ FunctionObject *toIso = O.objectValue()->get(ctx, ctx->engine->newString(QStringLiteral("toISOString"))).asFunctionObject();
+
+ if (!toIso)
+ ctx->throwTypeError();
+
+ return toIso->call(ctx, ctx->thisObject, 0, 0);
+}
diff --git a/src/qml/qml/v4vm/qv4dateobject.h b/src/qml/qml/v4vm/qv4dateobject.h
new file mode 100644
index 0000000000..49a879e809
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4dateobject.h
@@ -0,0 +1,132 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4DATEOBJECT_P_H
+#define QV4DATEOBJECT_P_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct DateObject: Object {
+ Value value;
+ DateObject(ExecutionEngine *engine, const Value &value): Object(engine), value(value) { type = Type_DateObject; }
+};
+
+struct DateCtor: FunctionObject
+{
+ DateCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct DatePrototype: DateObject
+{
+ DatePrototype(ExecutionEngine *engine): DateObject(engine, Value::fromDouble(qSNaN())) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static double getThisDate(ExecutionContext *ctx);
+
+ static Value method_parse(SimpleCallContext *ctx);
+ static Value method_UTC(SimpleCallContext *ctx);
+ static Value method_now(SimpleCallContext *ctx);
+
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_toDateString(SimpleCallContext *ctx);
+ static Value method_toTimeString(SimpleCallContext *ctx);
+ static Value method_toLocaleString(SimpleCallContext *ctx);
+ static Value method_toLocaleDateString(SimpleCallContext *ctx);
+ static Value method_toLocaleTimeString(SimpleCallContext *ctx);
+ static Value method_valueOf(SimpleCallContext *ctx);
+ static Value method_getTime(SimpleCallContext *ctx);
+ static Value method_getYear(SimpleCallContext *ctx);
+ static Value method_getFullYear(SimpleCallContext *ctx);
+ static Value method_getUTCFullYear(SimpleCallContext *ctx);
+ static Value method_getMonth(SimpleCallContext *ctx);
+ static Value method_getUTCMonth(SimpleCallContext *ctx);
+ static Value method_getDate(SimpleCallContext *ctx);
+ static Value method_getUTCDate(SimpleCallContext *ctx);
+ static Value method_getDay(SimpleCallContext *ctx);
+ static Value method_getUTCDay(SimpleCallContext *ctx);
+ static Value method_getHours(SimpleCallContext *ctx);
+ static Value method_getUTCHours(SimpleCallContext *ctx);
+ static Value method_getMinutes(SimpleCallContext *ctx);
+ static Value method_getUTCMinutes(SimpleCallContext *ctx);
+ static Value method_getSeconds(SimpleCallContext *ctx);
+ static Value method_getUTCSeconds(SimpleCallContext *ctx);
+ static Value method_getMilliseconds(SimpleCallContext *ctx);
+ static Value method_getUTCMilliseconds(SimpleCallContext *ctx);
+ static Value method_getTimezoneOffset(SimpleCallContext *ctx);
+ static Value method_setTime(SimpleCallContext *ctx);
+ static Value method_setMilliseconds(SimpleCallContext *ctx);
+ static Value method_setUTCMilliseconds(SimpleCallContext *ctx);
+ static Value method_setSeconds(SimpleCallContext *ctx);
+ static Value method_setUTCSeconds(SimpleCallContext *ctx);
+ static Value method_setMinutes(SimpleCallContext *ctx);
+ static Value method_setUTCMinutes(SimpleCallContext *ctx);
+ static Value method_setHours(SimpleCallContext *ctx);
+ static Value method_setUTCHours(SimpleCallContext *ctx);
+ static Value method_setDate(SimpleCallContext *ctx);
+ static Value method_setUTCDate(SimpleCallContext *ctx);
+ static Value method_setMonth(SimpleCallContext *ctx);
+ static Value method_setUTCMonth(SimpleCallContext *ctx);
+ static Value method_setYear(SimpleCallContext *ctx);
+ static Value method_setFullYear(SimpleCallContext *ctx);
+ static Value method_setUTCFullYear(SimpleCallContext *ctx);
+ static Value method_toUTCString(SimpleCallContext *ctx);
+ static Value method_toISOString(SimpleCallContext *ctx);
+ static Value method_toJSON(SimpleCallContext *ctx);
+};
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4engine.cpp b/src/qml/qml/v4vm/qv4engine.cpp
new file mode 100644
index 0000000000..7d5c79f333
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4engine.cpp
@@ -0,0 +1,551 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <qv4engine.h>
+#include <qv4value.h>
+#include <qv4object.h>
+#include <qv4objectproto.h>
+#include <qv4arrayobject.h>
+#include <qv4booleanobject.h>
+#include <qv4globalobject.h>
+#include <qv4errorobject.h>
+#include <qv4functionobject.h>
+#include <qv4mathobject.h>
+#include <qv4numberobject.h>
+#include <qv4regexpobject.h>
+#include <qv4runtime.h>
+#include "qv4mm.h"
+#include <qv4argumentsobject.h>
+#include <qv4dateobject.h>
+#include <qv4jsonobject.h>
+#include <qv4stringobject.h>
+#include <qv4identifier.h>
+#include <qv4unwindhelper.h>
+#include "qv4isel_masm_p.h"
+#include "debugging.h"
+#include "qv4executableallocator.h"
+
+namespace QQmlJS {
+namespace VM {
+
+ExecutionEngine::ExecutionEngine(EvalISelFactory *factory)
+ : memoryManager(new QQmlJS::VM::MemoryManager)
+ , executableAllocator(new QQmlJS::VM::ExecutableAllocator)
+ , debugger(0)
+ , globalObject(0)
+ , globalCode(0)
+ , externalResourceComparison(0)
+ , regExpCache(0)
+{
+ MemoryManager::GCBlocker gcBlocker(memoryManager);
+
+ if (!factory)
+ factory = new MASM::ISelFactory;
+ iselFactory.reset(factory);
+
+ memoryManager->setExecutionEngine(this);
+
+ identifierCache = new Identifiers(this);
+
+ id_undefined = newIdentifier(QStringLiteral("undefined"));
+ id_null = newIdentifier(QStringLiteral("null"));
+ id_true = newIdentifier(QStringLiteral("true"));
+ id_false = newIdentifier(QStringLiteral("false"));
+ id_boolean = newIdentifier(QStringLiteral("boolean"));
+ id_number = newIdentifier(QStringLiteral("number"));
+ id_string = newIdentifier(QStringLiteral("string"));
+ id_object = newIdentifier(QStringLiteral("object"));
+ id_function = newIdentifier(QStringLiteral("function"));
+ id_length = newIdentifier(QStringLiteral("length"));
+ id_prototype = newIdentifier(QStringLiteral("prototype"));
+ id_constructor = newIdentifier(QStringLiteral("constructor"));
+ id_arguments = newIdentifier(QStringLiteral("arguments"));
+ id_caller = newIdentifier(QStringLiteral("caller"));
+ id_this = newIdentifier(QStringLiteral("this"));
+ id___proto__ = newIdentifier(QStringLiteral("__proto__"));
+ id_enumerable = newIdentifier(QStringLiteral("enumerable"));
+ id_configurable = newIdentifier(QStringLiteral("configurable"));
+ id_writable = newIdentifier(QStringLiteral("writable"));
+ id_value = newIdentifier(QStringLiteral("value"));
+ id_get = newIdentifier(QStringLiteral("get"));
+ id_set = newIdentifier(QStringLiteral("set"));
+ id_eval = newIdentifier(QStringLiteral("eval"));
+
+ emptyClass = new InternalClass(this);
+ arrayClass = emptyClass->addMember(id_length, Attr_NotConfigurable|Attr_NotEnumerable);
+ initRootContext();
+
+ objectPrototype = new (memoryManager) ObjectPrototype(this);
+ stringPrototype = new (memoryManager) StringPrototype(rootContext);
+ numberPrototype = new (memoryManager) NumberPrototype(this);
+ booleanPrototype = new (memoryManager) BooleanPrototype(this);
+ arrayPrototype = new (memoryManager) ArrayPrototype(rootContext);
+ datePrototype = new (memoryManager) DatePrototype(this);
+ functionPrototype = new (memoryManager) FunctionPrototype(rootContext);
+ regExpPrototype = new (memoryManager) RegExpPrototype(this);
+ errorPrototype = new (memoryManager) ErrorPrototype(rootContext);
+ evalErrorPrototype = new (memoryManager) EvalErrorPrototype(rootContext);
+ rangeErrorPrototype = new (memoryManager) RangeErrorPrototype(rootContext);
+ referenceErrorPrototype = new (memoryManager) ReferenceErrorPrototype(rootContext);
+ syntaxErrorPrototype = new (memoryManager) SyntaxErrorPrototype(rootContext);
+ typeErrorPrototype = new (memoryManager) TypeErrorPrototype(rootContext);
+ uRIErrorPrototype = new (memoryManager) URIErrorPrototype(rootContext);
+
+ stringPrototype->prototype = objectPrototype;
+ numberPrototype->prototype = objectPrototype;
+ booleanPrototype->prototype = objectPrototype;
+ arrayPrototype->prototype = objectPrototype;
+ datePrototype->prototype = objectPrototype;
+ functionPrototype->prototype = objectPrototype;
+ regExpPrototype->prototype = objectPrototype;
+ errorPrototype->prototype = objectPrototype;
+ evalErrorPrototype->prototype = objectPrototype;
+ rangeErrorPrototype->prototype = objectPrototype;
+ referenceErrorPrototype->prototype = objectPrototype;
+ syntaxErrorPrototype->prototype = objectPrototype;
+ typeErrorPrototype->prototype = objectPrototype;
+ uRIErrorPrototype->prototype = objectPrototype;
+
+ objectCtor = Value::fromObject(new (memoryManager) ObjectCtor(rootContext));
+ stringCtor = Value::fromObject(new (memoryManager) StringCtor(rootContext));
+ numberCtor = Value::fromObject(new (memoryManager) NumberCtor(rootContext));
+ booleanCtor = Value::fromObject(new (memoryManager) BooleanCtor(rootContext));
+ arrayCtor = Value::fromObject(new (memoryManager) ArrayCtor(rootContext));
+ functionCtor = Value::fromObject(new (memoryManager) FunctionCtor(rootContext));
+ dateCtor = Value::fromObject(new (memoryManager) DateCtor(rootContext));
+ regExpCtor = Value::fromObject(new (memoryManager) RegExpCtor(rootContext));
+ errorCtor = Value::fromObject(new (memoryManager) ErrorCtor(rootContext));
+ evalErrorCtor = Value::fromObject(new (memoryManager) EvalErrorCtor(rootContext));
+ rangeErrorCtor = Value::fromObject(new (memoryManager) RangeErrorCtor(rootContext));
+ referenceErrorCtor = Value::fromObject(new (memoryManager) ReferenceErrorCtor(rootContext));
+ syntaxErrorCtor = Value::fromObject(new (memoryManager) SyntaxErrorCtor(rootContext));
+ typeErrorCtor = Value::fromObject(new (memoryManager) TypeErrorCtor(rootContext));
+ uRIErrorCtor = Value::fromObject(new (memoryManager) URIErrorCtor(rootContext));
+
+ objectCtor.objectValue()->prototype = functionPrototype;
+ stringCtor.objectValue()->prototype = functionPrototype;
+ numberCtor.objectValue()->prototype = functionPrototype;
+ booleanCtor.objectValue()->prototype = functionPrototype;
+ arrayCtor.objectValue()->prototype = functionPrototype;
+ functionCtor.objectValue()->prototype = functionPrototype;
+ dateCtor.objectValue()->prototype = functionPrototype;
+ regExpCtor.objectValue()->prototype = functionPrototype;
+ errorCtor.objectValue()->prototype = functionPrototype;
+ evalErrorCtor.objectValue()->prototype = functionPrototype;
+ rangeErrorCtor.objectValue()->prototype = functionPrototype;
+ referenceErrorCtor.objectValue()->prototype = functionPrototype;
+ syntaxErrorCtor.objectValue()->prototype = functionPrototype;
+ typeErrorCtor.objectValue()->prototype = functionPrototype;
+ uRIErrorCtor.objectValue()->prototype = functionPrototype;
+
+ objectPrototype->init(rootContext, objectCtor);
+ stringPrototype->init(rootContext, stringCtor);
+ numberPrototype->init(rootContext, numberCtor);
+ booleanPrototype->init(rootContext, booleanCtor);
+ arrayPrototype->init(rootContext, arrayCtor);
+ datePrototype->init(rootContext, dateCtor);
+ functionPrototype->init(rootContext, functionCtor);
+ regExpPrototype->init(rootContext, regExpCtor);
+ errorPrototype->init(rootContext, errorCtor);
+ evalErrorPrototype->init(rootContext, evalErrorCtor);
+ rangeErrorPrototype->init(rootContext, rangeErrorCtor);
+ referenceErrorPrototype->init(rootContext, referenceErrorCtor);
+ syntaxErrorPrototype->init(rootContext, syntaxErrorCtor);
+ typeErrorPrototype->init(rootContext, typeErrorCtor);
+ uRIErrorPrototype->init(rootContext, uRIErrorCtor);
+
+ //
+ // set up the global object
+ //
+ globalObject = newObject(/*rootContext*/);
+ rootContext->global = globalObject;
+ rootContext->thisObject = Value::fromObject(globalObject);
+
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Object"), objectCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("String"), stringCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Number"), numberCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Boolean"), booleanCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Array"), arrayCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Function"), functionCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Date"), dateCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("RegExp"), regExpCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Error"), errorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("EvalError"), evalErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("RangeError"), rangeErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("ReferenceError"), referenceErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("SyntaxError"), syntaxErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("TypeError"), typeErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("URIError"), uRIErrorCtor);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("Math"), Value::fromObject(new (memoryManager) MathObject(rootContext)));
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("JSON"), Value::fromObject(new (memoryManager) JsonObject(rootContext)));
+
+ globalObject->defineReadonlyProperty(this, QStringLiteral("undefined"), Value::undefinedValue());
+ globalObject->defineReadonlyProperty(this, QStringLiteral("NaN"), Value::fromDouble(std::numeric_limits<double>::quiet_NaN()));
+ globalObject->defineReadonlyProperty(this, QStringLiteral("Infinity"), Value::fromDouble(Q_INFINITY));
+
+ evalFunction = new (memoryManager) EvalFunction(rootContext);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("eval"), Value::fromObject(evalFunction));
+
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("parseInt"), GlobalFunctions::method_parseInt, 2);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("parseFloat"), GlobalFunctions::method_parseFloat, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("isNaN"), GlobalFunctions::method_isNaN, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("isFinite"), GlobalFunctions::method_isFinite, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("decodeURI"), GlobalFunctions::method_decodeURI, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("decodeURIComponent"), GlobalFunctions::method_decodeURIComponent, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("encodeURI"), GlobalFunctions::method_encodeURI, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("encodeURIComponent"), GlobalFunctions::method_encodeURIComponent, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("escape"), GlobalFunctions::method_escape, 1);
+ globalObject->defineDefaultProperty(rootContext, QStringLiteral("unescape"), GlobalFunctions::method_unescape, 1);
+}
+
+ExecutionEngine::~ExecutionEngine()
+{
+ delete regExpCache;
+ UnwindHelper::deregisterFunctions(functions);
+ qDeleteAll(functions);
+ delete memoryManager;
+ delete executableAllocator;
+}
+
+void ExecutionEngine::initRootContext()
+{
+ rootContext = static_cast<GlobalContext *>(memoryManager->allocContext(sizeof(GlobalContext)));
+ current = rootContext;
+ current->parent = 0;
+ rootContext->init(this);
+}
+
+WithContext *ExecutionEngine::newWithContext(Object *with)
+{
+ ExecutionContext *p = current;
+ WithContext *w = static_cast<WithContext *>(memoryManager->allocContext(sizeof(WithContext)));
+ w->parent = current;
+ current = w;
+
+ w->init(p, with);
+ return w;
+}
+
+CatchContext *ExecutionEngine::newCatchContext(String *exceptionVarName, const Value &exceptionValue)
+{
+ ExecutionContext *p = current;
+ CatchContext *c = static_cast<CatchContext *>(memoryManager->allocContext(sizeof(CatchContext)));
+ c->parent = current;
+ current = c;
+
+ c->init(p, exceptionVarName, exceptionValue);
+ return c;
+}
+
+CallContext *ExecutionEngine::newCallContext(FunctionObject *f, const Value &thisObject, Value *args, int argc)
+{
+ CallContext *c = static_cast<CallContext *>(memoryManager->allocContext(requiredMemoryForExecutionContect(f, argc)));
+ c->parent = current;
+ current = c;
+
+ c->function = f;
+ c->thisObject = thisObject;
+ c->arguments = args;
+ c->argumentCount = argc;
+ c->initCallContext(this);
+
+ return c;
+}
+
+CallContext *ExecutionEngine::newCallContext(void *stackSpace, FunctionObject *f, const Value &thisObject, Value *args, int argc)
+{
+ CallContext *c;
+ uint memory = requiredMemoryForExecutionContect(f, argc);
+ if (f->needsActivation || memory > stackContextSize) {
+ c = static_cast<CallContext *>(memoryManager->allocContext(memory));
+ } else {
+ c = (CallContext *)stackSpace;
+#ifndef QT_NO_DEBUG
+ c->next = (CallContext *)0x1;
+#endif
+ }
+ c->parent = current;
+ current = c;
+
+ c->function = f;
+ c->thisObject = thisObject;
+ c->arguments = args;
+ c->argumentCount = argc;
+ c->initCallContext(this);
+
+ return c;
+}
+
+
+ExecutionContext *ExecutionEngine::pushGlobalContext()
+{
+ GlobalContext *g = static_cast<GlobalContext *>(memoryManager->allocContext(sizeof(GlobalContext)));
+ *g = *rootContext;
+ g->parent = current;
+ current = g;
+
+ return current;
+}
+
+Function *ExecutionEngine::newFunction(const QString &name)
+{
+ VM::Function *f = new VM::Function(newIdentifier(name));
+ functions.append(f);
+ return f;
+}
+
+FunctionObject *ExecutionEngine::newBuiltinFunction(ExecutionContext *scope, String *name, Value (*code)(SimpleCallContext *))
+{
+ BuiltinFunctionOld *f = new (memoryManager) BuiltinFunctionOld(scope, name, code);
+ return f;
+}
+
+FunctionObject *ExecutionEngine::newScriptFunction(ExecutionContext *scope, VM::Function *function)
+{
+ assert(function);
+
+ ScriptFunction *f = new (memoryManager) ScriptFunction(scope, function);
+ return f;
+}
+
+BoundFunction *ExecutionEngine::newBoundFunction(ExecutionContext *scope, FunctionObject *target, Value boundThis, const QVector<Value> &boundArgs)
+{
+ assert(target);
+
+ BoundFunction *f = new (memoryManager) BoundFunction(scope, target, boundThis, boundArgs);
+ return f;
+}
+
+
+Object *ExecutionEngine::newObject()
+{
+ Object *object = new (memoryManager) Object(this);
+ object->prototype = objectPrototype;
+ return object;
+}
+
+String *ExecutionEngine::newString(const QString &s)
+{
+ return new (memoryManager) String(s);
+}
+
+String *ExecutionEngine::newIdentifier(const QString &text)
+{
+ return identifierCache->insert(text);
+}
+
+Object *ExecutionEngine::newStringObject(ExecutionContext *ctx, const Value &value)
+{
+ StringObject *object = new (memoryManager) StringObject(ctx, value);
+ object->prototype = stringPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newNumberObject(const Value &value)
+{
+ NumberObject *object = new (memoryManager) NumberObject(this, value);
+ object->prototype = numberPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newBooleanObject(const Value &value)
+{
+ Object *object = new (memoryManager) BooleanObject(this, value);
+ object->prototype = booleanPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newFunctionObject(ExecutionContext *ctx)
+{
+ Object *object = new (memoryManager) FunctionObject(ctx);
+ object->prototype = functionPrototype;
+ return object;
+}
+
+ArrayObject *ExecutionEngine::newArrayObject(ExecutionContext *ctx)
+{
+ ArrayObject *object = new (memoryManager) ArrayObject(ctx);
+ object->prototype = arrayPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newDateObject(const Value &value)
+{
+ Object *object = new (memoryManager) DateObject(this, value);
+ object->prototype = datePrototype;
+ return object;
+}
+
+RegExpObject *ExecutionEngine::newRegExpObject(const QString &pattern, int flags)
+{
+ bool global = (flags & V4IR::RegExp::RegExp_Global);
+ bool ignoreCase = false;
+ bool multiline = false;
+ if (flags & V4IR::RegExp::RegExp_IgnoreCase)
+ ignoreCase = true;
+ if (flags & V4IR::RegExp::RegExp_Multiline)
+ multiline = true;
+
+ return newRegExpObject(RegExp::create(this, pattern, ignoreCase, multiline), global);
+}
+
+RegExpObject *ExecutionEngine::newRegExpObject(RegExp* re, bool global)
+{
+ RegExpObject *object = new (memoryManager) RegExpObject(this, re, global);
+ object->prototype = regExpPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newErrorObject(const Value &value)
+{
+ ErrorObject *object = new (memoryManager) ErrorObject(rootContext, value);
+ object->prototype = errorPrototype;
+ return object;
+}
+
+Object *ExecutionEngine::newSyntaxErrorObject(ExecutionContext *ctx, DiagnosticMessage *message)
+{
+ return new (memoryManager) SyntaxErrorObject(ctx, message);
+}
+
+Object *ExecutionEngine::newReferenceErrorObject(ExecutionContext *ctx, const QString &message)
+{
+ return new (memoryManager) ReferenceErrorObject(ctx, message);
+}
+
+Object *ExecutionEngine::newTypeErrorObject(ExecutionContext *ctx, const QString &message)
+{
+ return new (memoryManager) TypeErrorObject(ctx, message);
+}
+
+Object *ExecutionEngine::newRangeErrorObject(ExecutionContext *ctx, const QString &message)
+{
+ return new (memoryManager) RangeErrorObject(ctx, message);
+}
+
+Object *ExecutionEngine::newURIErrorObject(ExecutionContext *ctx, Value message)
+{
+ return new (memoryManager) URIErrorObject(ctx, message);
+}
+
+Object *ExecutionEngine::newForEachIteratorObject(ExecutionContext *ctx, Object *o)
+{
+ return new (memoryManager) ForEachIteratorObject(ctx, o);
+}
+
+void ExecutionEngine::requireArgumentsAccessors(int n)
+{
+ if (n <= argumentsAccessors.size())
+ return;
+
+ uint oldSize = argumentsAccessors.size();
+ argumentsAccessors.resize(n);
+ for (int i = oldSize; i < n; ++i) {
+ FunctionObject *get = new (memoryManager) ArgumentsGetterFunction(rootContext, i);
+ get->prototype = functionPrototype;
+ FunctionObject *set = new (memoryManager) ArgumentsSetterFunction(rootContext, i);
+ set->prototype = functionPrototype;
+ Property pd = Property::fromAccessor(get, set);
+ argumentsAccessors[i] = pd;
+ }
+}
+
+void ExecutionEngine::markObjects()
+{
+ identifierCache->mark();
+
+ globalObject->mark();
+
+ if (globalCode)
+ globalCode->mark();
+
+ for (int i = 0; i < argumentsAccessors.size(); ++i) {
+ const Property &pd = argumentsAccessors.at(i);
+ pd.getter()->mark();
+ pd.setter()->mark();
+ }
+
+ ExecutionContext *c = current;
+ while (c) {
+ c->mark();
+ c = c->parent;
+ }
+
+ for (int i = 0; i < functions.size(); ++i)
+ functions.at(i)->mark();
+
+ id_length->mark();
+ id_prototype->mark();
+ id_constructor->mark();
+ id_arguments->mark();
+ id_caller->mark();
+ id_this->mark();
+ id___proto__->mark();
+ id_enumerable->mark();
+ id_configurable->mark();
+ id_writable->mark();
+ id_value->mark();
+ id_get->mark();
+ id_set->mark();
+ id_eval->mark();
+}
+
+Value ExecutionEngine::run(Function *function, ExecutionContext *ctx)
+{
+ if (!ctx)
+ ctx = rootContext;
+
+ TemporaryAssignment<Function*>(globalCode, function);
+
+ // ### Would be better to have a SavedExecutionState object that
+ // saves this and restores it in the destructor (to survive an exception).
+ ctx->strictMode = function->isStrict;
+ ctx->lookups = function->lookups;
+
+ if (debugger)
+ debugger->aboutToCall(0, ctx);
+ QQmlJS::VM::Value result = function->code(ctx, function->codeData);
+ if (debugger)
+ debugger->justLeft(ctx);
+ return result;
+}
+
+} // namespace VM
+} // namespace QQmlJS
diff --git a/src/qml/qml/v4vm/qv4engine.h b/src/qml/qml/v4vm/qv4engine.h
new file mode 100644
index 0000000000..538796eefa
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4engine.h
@@ -0,0 +1,271 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ENGINE_H
+#define QV4ENGINE_H
+
+#include "qv4global.h"
+#include "qv4isel_p.h"
+#include "qv4object.h"
+#include "qv4util.h"
+#include "qv4context.h"
+#include "qv4property.h"
+#include <setjmp.h>
+
+#include <wtf/BumpPointerAllocator.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+
+namespace Debugging {
+class Debugger;
+} // namespace Debugging
+
+namespace VM {
+
+struct Value;
+struct Function;
+struct Object;
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct ArrayObject;
+struct DateObject;
+struct FunctionObject;
+struct BoundFunction;
+struct RegExpObject;
+struct ErrorObject;
+struct ArgumentsObject;
+struct ExecutionContext;
+struct ExecutionEngine;
+class MemoryManager;
+class UnwindHelper;
+class ExecutableAllocator;
+
+struct ObjectPrototype;
+struct StringPrototype;
+struct NumberPrototype;
+struct BooleanPrototype;
+struct ArrayPrototype;
+struct FunctionPrototype;
+struct DatePrototype;
+struct RegExpPrototype;
+struct ErrorPrototype;
+struct EvalErrorPrototype;
+struct RangeErrorPrototype;
+struct ReferenceErrorPrototype;
+struct SyntaxErrorPrototype;
+struct TypeErrorPrototype;
+struct URIErrorPrototype;
+struct EvalFunction;
+struct Identifiers;
+struct InternalClass;
+
+class RegExp;
+class RegExpCache;
+
+typedef bool (*ExternalResourceComparison)(const VM::Value &a, const VM::Value &b);
+
+struct Q_V4_EXPORT ExecutionEngine
+{
+ MemoryManager *memoryManager;
+ ExecutableAllocator *executableAllocator;
+ QScopedPointer<EvalISelFactory> iselFactory;
+
+ ExecutionContext *current;
+ GlobalContext *rootContext;
+
+ WTF::BumpPointerAllocator bumperPointerAllocator; // Used by Yarr Regex engine.
+
+ Identifiers *identifierCache;
+
+ Debugging::Debugger *debugger;
+
+ Object *globalObject;
+
+ VM::Function *globalCode;
+
+ Value objectCtor;
+ Value stringCtor;
+ Value numberCtor;
+ Value booleanCtor;
+ Value arrayCtor;
+ Value functionCtor;
+ Value dateCtor;
+ Value regExpCtor;
+ Value errorCtor;
+ Value evalErrorCtor;
+ Value rangeErrorCtor;
+ Value referenceErrorCtor;
+ Value syntaxErrorCtor;
+ Value typeErrorCtor;
+ Value uRIErrorCtor;
+
+ ObjectPrototype *objectPrototype;
+ StringPrototype *stringPrototype;
+ NumberPrototype *numberPrototype;
+ BooleanPrototype *booleanPrototype;
+ ArrayPrototype *arrayPrototype;
+ FunctionPrototype *functionPrototype;
+ DatePrototype *datePrototype;
+ RegExpPrototype *regExpPrototype;
+ ErrorPrototype *errorPrototype;
+ EvalErrorPrototype *evalErrorPrototype;
+ RangeErrorPrototype *rangeErrorPrototype;
+ ReferenceErrorPrototype *referenceErrorPrototype;
+ SyntaxErrorPrototype *syntaxErrorPrototype;
+ TypeErrorPrototype *typeErrorPrototype;
+ URIErrorPrototype *uRIErrorPrototype;
+
+ InternalClass *emptyClass;
+ InternalClass *arrayClass;
+
+ EvalFunction *evalFunction;
+
+ QVector<Property> argumentsAccessors;
+
+ String *id_undefined;
+ String *id_null;
+ String *id_true;
+ String *id_false;
+ String *id_boolean;
+ String *id_number;
+ String *id_string;
+ String *id_object;
+ String *id_function;
+ String *id_length;
+ String *id_prototype;
+ String *id_constructor;
+ String *id_arguments;
+ String *id_caller;
+ String *id_this;
+ String *id___proto__;
+ String *id_enumerable;
+ String *id_configurable;
+ String *id_writable;
+ String *id_value;
+ String *id_get;
+ String *id_set;
+ String *id_eval;
+
+ QVector<Function *> functions;
+
+ ExternalResourceComparison externalResourceComparison;
+
+ RegExpCache *regExpCache;
+
+ ExecutionEngine(EvalISelFactory *iselFactory = 0);
+ ~ExecutionEngine();
+
+ WithContext *newWithContext(Object *with);
+ CatchContext *newCatchContext(String* exceptionVarName, const QQmlJS::VM::Value &exceptionValue);
+ CallContext *newCallContext(FunctionObject *f, const QQmlJS::VM::Value &thisObject, QQmlJS::VM::Value *args, int argc);
+ CallContext *newCallContext(void *stackSpace, FunctionObject *f, const QQmlJS::VM::Value &thisObject, QQmlJS::VM::Value *args, int argc);
+ ExecutionContext *pushGlobalContext();
+ void pushContext(SimpleCallContext *context);
+ ExecutionContext *popContext();
+
+ VM::Function *newFunction(const QString &name);
+
+ FunctionObject *newBuiltinFunction(ExecutionContext *scope, String *name, Value (*code)(SimpleCallContext *));
+ FunctionObject *newScriptFunction(ExecutionContext *scope, VM::Function *function);
+ BoundFunction *newBoundFunction(ExecutionContext *scope, FunctionObject *target, Value boundThis, const QVector<Value> &boundArgs);
+
+ Object *newObject();
+
+ String *newString(const QString &s);
+ String *newIdentifier(const QString &text);
+
+ Object *newStringObject(ExecutionContext *ctx, const Value &value);
+ Object *newNumberObject(const Value &value);
+ Object *newBooleanObject(const Value &value);
+ Object *newFunctionObject(ExecutionContext *ctx);
+
+ ArrayObject *newArrayObject(ExecutionContext *ctx);
+
+ Object *newDateObject(const Value &value);
+
+ RegExpObject *newRegExpObject(const QString &pattern, int flags);
+ RegExpObject *newRegExpObject(RegExp* re, bool global);
+
+ Object *newErrorObject(const Value &value);
+ Object *newSyntaxErrorObject(ExecutionContext *ctx, DiagnosticMessage *message);
+ Object *newReferenceErrorObject(ExecutionContext *ctx, const QString &message);
+ Object *newTypeErrorObject(ExecutionContext *ctx, const QString &message);
+ Object *newRangeErrorObject(ExecutionContext *ctx, const QString &message);
+ Object *newURIErrorObject(ExecutionContext *ctx, Value message);
+
+ Object *newForEachIteratorObject(ExecutionContext *ctx, Object *o);
+
+ void requireArgumentsAccessors(int n);
+
+ void markObjects();
+
+ Value run(VM::Function *function, ExecutionContext *ctx = 0);
+
+ void initRootContext();
+};
+
+inline void ExecutionEngine::pushContext(SimpleCallContext *context)
+{
+ context->parent = current;
+ current = context;
+}
+
+inline ExecutionContext *ExecutionEngine::popContext()
+{
+ CallContext *c = current->asCallContext();
+ if (c && !c->needsOwnArguments()) {
+ c->arguments = 0;
+ c->argumentCount = 0;
+ }
+
+ current = current->parent;
+ return current;
+}
+
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ENGINE_H
diff --git a/src/qml/qml/v4vm/qv4errorobject.cpp b/src/qml/qml/v4vm/qv4errorobject.cpp
new file mode 100644
index 0000000000..b69262e77c
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4errorobject.cpp
@@ -0,0 +1,238 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+
+#include "qv4errorobject.h"
+#include "qv4mm.h"
+#include <QtCore/qnumeric.h>
+#include <QtCore/qmath.h>
+#include <QtCore/QDateTime>
+#include <QtCore/QStringList>
+#include <QtCore/QDebug>
+#include <cmath>
+#include <qmath.h>
+#include <qnumeric.h>
+#include <cassert>
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include <qv4isel_masm_p.h>
+
+#ifndef Q_OS_WIN
+# include <time.h>
+# ifndef Q_OS_VXWORKS
+# include <sys/time.h>
+# else
+# include "qplatformdefs.h"
+# endif
+#else
+# include <windows.h>
+#endif
+
+using namespace QQmlJS::VM;
+
+ErrorObject::ErrorObject(ExecutionContext *context, const Value &message, ErrorType t)
+ : Object(context->engine)
+{
+ type = Type_ErrorObject;
+ subtype = t;
+
+ if (!message.isUndefined())
+ defineDefaultProperty(context->engine->newString(QStringLiteral("message")), message);
+ defineDefaultProperty(context, QLatin1String("name"), Value::fromString(context, className()));
+}
+
+DEFINE_MANAGED_VTABLE(SyntaxErrorObject);
+
+SyntaxErrorObject::SyntaxErrorObject(ExecutionContext *ctx, DiagnosticMessage *message)
+ : ErrorObject(ctx, message ? Value::fromString(message->buildFullMessage(ctx)) : ctx->argument(0), SyntaxError)
+ , msg(message)
+{
+ vtbl = &static_vtbl;
+ prototype = ctx->engine->syntaxErrorPrototype;
+}
+
+
+
+EvalErrorObject::EvalErrorObject(ExecutionContext *ctx, const Value &message)
+ : ErrorObject(ctx, message, EvalError)
+{
+ prototype = ctx->engine->evalErrorPrototype;
+}
+
+RangeErrorObject::RangeErrorObject(ExecutionContext *ctx, const Value &message)
+ : ErrorObject(ctx, message, RangeError)
+{
+ prototype = ctx->engine->rangeErrorPrototype;
+}
+
+RangeErrorObject::RangeErrorObject(ExecutionContext *ctx, const QString &message)
+ : ErrorObject(ctx, Value::fromString(ctx,message), RangeError)
+{
+ prototype = ctx->engine->rangeErrorPrototype;
+}
+
+ReferenceErrorObject::ReferenceErrorObject(ExecutionContext *ctx, const Value &message)
+ : ErrorObject(ctx, message, ReferenceError)
+{
+ prototype = ctx->engine->referenceErrorPrototype;
+}
+
+ReferenceErrorObject::ReferenceErrorObject(ExecutionContext *ctx, const QString &message)
+ : ErrorObject(ctx, Value::fromString(ctx,message), ReferenceError)
+{
+ prototype = ctx->engine->referenceErrorPrototype;
+}
+
+TypeErrorObject::TypeErrorObject(ExecutionContext *ctx, const Value &message)
+ : ErrorObject(ctx, message, TypeError)
+{
+ prototype = ctx->engine->typeErrorPrototype;
+}
+
+TypeErrorObject::TypeErrorObject(ExecutionContext *ctx, const QString &message)
+ : ErrorObject(ctx, Value::fromString(ctx,message), TypeError)
+{
+ prototype = ctx->engine->typeErrorPrototype;
+}
+
+URIErrorObject::URIErrorObject(ExecutionContext *ctx, const Value &message)
+ : ErrorObject(ctx, message, URIError)
+{
+ prototype = ctx->engine->uRIErrorPrototype;
+}
+
+DEFINE_MANAGED_VTABLE(ErrorCtor);
+DEFINE_MANAGED_VTABLE(EvalErrorCtor);
+DEFINE_MANAGED_VTABLE(RangeErrorCtor);
+DEFINE_MANAGED_VTABLE(ReferenceErrorCtor);
+DEFINE_MANAGED_VTABLE(SyntaxErrorCtor);
+DEFINE_MANAGED_VTABLE(TypeErrorCtor);
+DEFINE_MANAGED_VTABLE(URIErrorCtor);
+
+ErrorCtor::ErrorCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value ErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(ctx->engine->newErrorObject(argc ? args[0] : Value::undefinedValue()));
+}
+
+Value ErrorCtor::call(Managed *that, ExecutionContext *ctx, const Value &, Value *args, int argc)
+{
+ return that->construct(ctx, args, argc);
+}
+
+Value EvalErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) EvalErrorObject(ctx, argc ? args[0] : Value::undefinedValue()));
+}
+
+Value RangeErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) RangeErrorObject(ctx, argc ? args[0] : Value::undefinedValue()));
+}
+
+Value ReferenceErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) ReferenceErrorObject(ctx, argc ? args[0] : Value::undefinedValue()));
+}
+
+Value SyntaxErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *, int)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) SyntaxErrorObject(ctx, 0));
+}
+
+Value TypeErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) TypeErrorObject(ctx, argc ? args[0] : Value::undefinedValue()));
+}
+
+Value URIErrorCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ return Value::fromObject(new (ctx->engine->memoryManager) URIErrorObject(ctx, argc ? args[0] : Value::undefinedValue()));
+}
+
+void ErrorPrototype::init(ExecutionContext *ctx, const Value &ctor, Object *obj)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(obj));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ obj->defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ obj->defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ obj->defineDefaultProperty(ctx, QStringLiteral("message"), Value::fromString(ctx, QString()));
+}
+
+Value ErrorPrototype::method_toString(SimpleCallContext *ctx)
+{
+ Object *o = ctx->thisObject.asObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ Value name = o->get(ctx, ctx->engine->newString(QString::fromLatin1("name")));
+ QString qname;
+ if (name.isUndefined())
+ qname = QString::fromLatin1("Error");
+ else
+ qname = __qmljs_to_string(name, ctx).stringValue()->toQString();
+
+ Value message = o->get(ctx, ctx->engine->newString(QString::fromLatin1("message")));
+ QString qmessage;
+ if (!message.isUndefined())
+ qmessage = __qmljs_to_string(message, ctx).stringValue()->toQString();
+
+ QString str;
+ if (qname.isEmpty()) {
+ str = qmessage;
+ } else if (qmessage.isEmpty()) {
+ str = qname;
+ } else {
+ str = qname + QLatin1String(": ") + qmessage;
+ }
+
+ return Value::fromString(ctx, str);
+}
diff --git a/src/qml/qml/v4vm/qv4errorobject.h b/src/qml/qml/v4vm/qv4errorobject.h
new file mode 100644
index 0000000000..20a20ad980
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4errorobject.h
@@ -0,0 +1,235 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ERROROBJECT_H
+#define QV4ERROROBJECT_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct SyntaxErrorObject;
+
+struct ErrorObject: Object {
+ enum ErrorType {
+ Error,
+ EvalError,
+ RangeError,
+ ReferenceError,
+ SyntaxError,
+ TypeError,
+ URIError
+ };
+
+ ErrorObject(ExecutionContext *context, const Value &message, ErrorType t = Error);
+
+ SyntaxErrorObject *asSyntaxError();
+};
+
+struct EvalErrorObject: ErrorObject {
+ EvalErrorObject(ExecutionContext *ctx, const Value &message);
+};
+
+struct RangeErrorObject: ErrorObject {
+ RangeErrorObject(ExecutionContext *ctx, const Value &message);
+ RangeErrorObject(ExecutionContext *ctx, const QString &msg);
+};
+
+struct ReferenceErrorObject: ErrorObject {
+ ReferenceErrorObject(ExecutionContext *ctx, const Value &message);
+ ReferenceErrorObject(ExecutionContext *ctx, const QString &msg);
+};
+
+struct SyntaxErrorObject: ErrorObject {
+ SyntaxErrorObject(ExecutionContext *ctx, DiagnosticMessage *msg);
+ ~SyntaxErrorObject() { delete msg; }
+ static void destroy(Managed *that) { static_cast<SyntaxErrorObject *>(that)->~SyntaxErrorObject(); }
+
+ DiagnosticMessage *message() { return msg; }
+
+private:
+ DiagnosticMessage *msg;
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct TypeErrorObject: ErrorObject {
+ TypeErrorObject(ExecutionContext *ctx, const Value &message);
+ TypeErrorObject(ExecutionContext *ctx, const QString &msg);
+};
+
+struct URIErrorObject: ErrorObject {
+ URIErrorObject(ExecutionContext *ctx, const Value &message);
+};
+
+struct ErrorCtor: FunctionObject
+{
+ ErrorCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct EvalErrorCtor: ErrorCtor
+{
+ EvalErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct RangeErrorCtor: ErrorCtor
+{
+ RangeErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct ReferenceErrorCtor: ErrorCtor
+{
+ ReferenceErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct SyntaxErrorCtor: ErrorCtor
+{
+ SyntaxErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct TypeErrorCtor: ErrorCtor
+{
+ TypeErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct URIErrorCtor: ErrorCtor
+{
+ URIErrorCtor(ExecutionContext *scope): ErrorCtor(scope) { vtbl = &static_vtbl; }
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+
+struct ErrorPrototype: ErrorObject
+{
+ // ### shouldn't be undefined
+ ErrorPrototype(ExecutionContext *context): ErrorObject(context, Value::undefinedValue()) {}
+ void init(ExecutionContext *ctx, const Value &ctor) { init(ctx, ctor, this); }
+
+ static void init(ExecutionContext *ctx, const Value &ctor, Object *obj);
+ static Value method_toString(SimpleCallContext *ctx);
+};
+
+struct EvalErrorPrototype: EvalErrorObject
+{
+ EvalErrorPrototype(ExecutionContext *ctx): EvalErrorObject(ctx, Value::undefinedValue()) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+struct RangeErrorPrototype: RangeErrorObject
+{
+ RangeErrorPrototype(ExecutionContext *ctx): RangeErrorObject(ctx, Value::undefinedValue()) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+struct ReferenceErrorPrototype: ReferenceErrorObject
+{
+ ReferenceErrorPrototype(ExecutionContext *ctx): ReferenceErrorObject(ctx, Value::undefinedValue()) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+struct SyntaxErrorPrototype: SyntaxErrorObject
+{
+ SyntaxErrorPrototype(ExecutionContext *ctx): SyntaxErrorObject(ctx, 0) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+struct TypeErrorPrototype: TypeErrorObject
+{
+ TypeErrorPrototype(ExecutionContext *ctx): TypeErrorObject(ctx, Value::undefinedValue()) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+struct URIErrorPrototype: URIErrorObject
+{
+ URIErrorPrototype(ExecutionContext *ctx): URIErrorObject(ctx, Value::undefinedValue()) { vtbl = &static_vtbl; }
+ void init(ExecutionContext *ctx, const Value &ctor) { ErrorPrototype::init(ctx, ctor, this); }
+};
+
+
+inline SyntaxErrorObject *ErrorObject::asSyntaxError()
+{
+ return subtype == SyntaxError ? static_cast<SyntaxErrorObject *>(this) : 0;
+}
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4executableallocator.cpp b/src/qml/qml/v4vm/qv4executableallocator.cpp
new file mode 100644
index 0000000000..2ad300a74b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4executableallocator.cpp
@@ -0,0 +1,208 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4executableallocator.h"
+
+#include <assert.h>
+#include <wtf/StdLibExtras.h>
+#include <wtf/PageAllocation.h>
+
+using namespace QQmlJS::VM;
+
+void *ExecutableAllocator::Allocation::start() const
+{
+ return reinterpret_cast<void*>(addr);
+}
+
+ExecutableAllocator::Allocation *ExecutableAllocator::Allocation::split(size_t dividingSize)
+{
+ Allocation *remainder = new Allocation;
+ if (next)
+ next->prev = remainder;
+
+ remainder->next = next;
+ next = remainder;
+
+ remainder->prev = this;
+
+ remainder->size = size - dividingSize;
+ remainder->free = free;
+ remainder->addr = addr + dividingSize;
+ size = dividingSize;
+
+ return remainder;
+}
+
+bool ExecutableAllocator::Allocation::mergeNext(ExecutableAllocator *allocator)
+{
+ assert(free);
+ if (!next || !next->free)
+ return false;
+
+ allocator->freeAllocations.remove(size, this);
+ allocator->freeAllocations.remove(next->size, next);
+
+ size += next->size;
+ Allocation *newNext = next->next;
+ delete next;
+ next = newNext;
+ if (next)
+ next->prev = this;
+
+ allocator->freeAllocations.insert(size, this);
+ return true;
+}
+
+bool ExecutableAllocator::Allocation::mergePrevious(ExecutableAllocator *allocator)
+{
+ assert(free);
+ if (!prev || !prev->free)
+ return false;
+
+ allocator->freeAllocations.remove(size, this);
+ allocator->freeAllocations.remove(prev->size, prev);
+
+ prev->size += size;
+ if (next)
+ next->prev = prev;
+ prev->next = next;
+
+ allocator->freeAllocations.insert(prev->size, prev);
+
+ delete this;
+ return true;
+}
+
+ExecutableAllocator::ChunkOfPages::~ChunkOfPages()
+{
+ Allocation *alloc = firstAllocation;
+ while (alloc) {
+ Allocation *next = alloc->next;
+ delete alloc;
+ alloc = next;
+ }
+ pages->deallocate();
+ delete pages;
+}
+
+bool ExecutableAllocator::ChunkOfPages::contains(Allocation *alloc) const
+{
+ Allocation *it = firstAllocation;
+ while (it) {
+ if (it == alloc)
+ return true;
+ it = it->next;
+ }
+ return false;
+}
+
+ExecutableAllocator::~ExecutableAllocator()
+{
+ qDeleteAll(chunks);
+}
+
+ExecutableAllocator::Allocation *ExecutableAllocator::allocate(size_t size)
+{
+ Allocation *allocation = 0;
+
+ // Code is best aligned to 16-byte boundaries.
+ size = WTF::roundUpToMultipleOf(16, size);
+
+ QMultiMap<size_t, Allocation*>::Iterator it = freeAllocations.lowerBound(size);
+ if (it != freeAllocations.end()) {
+ allocation = *it;
+ freeAllocations.erase(it);
+ }
+
+ if (!allocation) {
+ ChunkOfPages *chunk = new ChunkOfPages;
+ size_t allocSize = WTF::roundUpToMultipleOf(WTF::pageSize(), size);
+ chunk->pages = new WTF::PageAllocation(WTF::PageAllocation::allocate(allocSize, OSAllocator::JSJITCodePages));
+ chunks.insert(reinterpret_cast<quintptr>(chunk->pages->base()) - 1, chunk);
+ allocation = new Allocation;
+ allocation->addr = reinterpret_cast<quintptr>(chunk->pages->base());
+ allocation->size = allocSize;
+ allocation->free = true;
+ chunk->firstAllocation = allocation;
+ }
+
+ assert(allocation);
+ assert(allocation->free);
+
+ allocation->free = false;
+
+ if (allocation->size > size) {
+ Allocation *remainder = allocation->split(size);
+ remainder->free = true;
+ if (!remainder->mergeNext(this))
+ freeAllocations.insert(remainder->size, remainder);
+ }
+
+ return allocation;
+}
+
+void ExecutableAllocator::free(Allocation *allocation)
+{
+ assert(allocation);
+
+ allocation->free = true;
+
+ QMap<quintptr, ChunkOfPages*>::Iterator it = chunks.lowerBound(allocation->addr);
+ if (it != chunks.begin())
+ --it;
+ assert(it != chunks.end());
+ ChunkOfPages *chunk = *it;
+ assert(chunk->contains(allocation));
+
+ bool merged = allocation->mergeNext(this);
+ merged |= allocation->mergePrevious(this);
+ if (!merged)
+ freeAllocations.insert(allocation->size, allocation);
+
+ allocation = 0;
+
+ if (!chunk->firstAllocation->next) {
+ freeAllocations.remove(chunk->firstAllocation->size, chunk->firstAllocation);
+ chunks.erase(it);
+ delete chunk;
+ return;
+ }
+}
diff --git a/src/qml/qml/v4vm/qv4executableallocator.h b/src/qml/qml/v4vm/qv4executableallocator.h
new file mode 100644
index 0000000000..8e45288c54
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4executableallocator.h
@@ -0,0 +1,121 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4EXECUTABLEALLOCATOR_H
+#define QV4EXECUTABLEALLOCATOR_H
+
+#include "qv4global.h"
+
+#include <QMultiMap>
+#include <QHash>
+#include <QVector>
+
+namespace WTF {
+struct PageAllocation;
+};
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+class Q_AUTOTEST_EXPORT ExecutableAllocator
+{
+ struct ChunkOfPages;
+public:
+ struct Allocation;
+
+ ~ExecutableAllocator();
+
+ Allocation *allocate(size_t size);
+ void free(Allocation *allocation);
+
+ struct Allocation
+ {
+ Allocation()
+ : addr(0)
+ , size(0)
+ , free(true)
+ , next(0)
+ , prev(0)
+ {}
+
+ void *start() const;
+
+ private:
+ friend class ExecutableAllocator;
+
+ Allocation *split(size_t dividingSize);
+ bool mergeNext(ExecutableAllocator *allocator);
+ bool mergePrevious(ExecutableAllocator *allocator);
+
+ quintptr addr;
+ uint size : 31; // More than 2GB of function code? nah :)
+ uint free : 1;
+ Allocation *next;
+ Allocation *prev;
+ };
+
+ // for debugging / unit-testing
+ int freeAllocationCount() const { return freeAllocations.count(); }
+ int chunkCount() const { return chunks.count(); }
+
+private:
+ struct ChunkOfPages
+ {
+ ~ChunkOfPages();
+
+ WTF::PageAllocation *pages;
+ Allocation *firstAllocation;
+
+ bool contains(Allocation *alloc) const;
+ };
+
+ QMultiMap<size_t, Allocation*> freeAllocations;
+ QMap<quintptr, ChunkOfPages*> chunks;
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4EXECUTABLEALLOCATOR_H
diff --git a/src/qml/qml/v4vm/qv4functionobject.cpp b/src/qml/qml/v4vm/qv4functionobject.cpp
new file mode 100644
index 0000000000..2380c1a994
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4functionobject.cpp
@@ -0,0 +1,522 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4object.h"
+#include "qv4jsir_p.h"
+#include "qv4isel_p.h"
+#include "qv4objectproto.h"
+#include "qv4stringobject.h"
+#include "qv4mm.h"
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include "private/qlocale_tools_p.h"
+
+#include <QtCore/qmath.h>
+#include <QtCore/QDebug>
+#include <cassert>
+#include <typeinfo>
+#include <iostream>
+#include "qv4alloca_p.h"
+
+using namespace QQmlJS::VM;
+
+
+DEFINE_MANAGED_VTABLE(FunctionObject);
+
+Function::~Function()
+{
+ delete[] codeData;
+}
+
+void Function::mark()
+{
+ if (name)
+ name->mark();
+ for (int i = 0; i < formals.size(); ++i)
+ formals.at(i)->mark();
+ for (int i = 0; i < locals.size(); ++i)
+ locals.at(i)->mark();
+ for (int i = 0; i < generatedValues.size(); ++i)
+ if (Managed *m = generatedValues.at(i).asManaged())
+ m->mark();
+ for (int i = 0; i < identifiers.size(); ++i)
+ identifiers.at(i)->mark();
+}
+
+FunctionObject::FunctionObject(ExecutionContext *scope)
+ : Object(scope->engine)
+ , scope(scope)
+ , name(0)
+ , formalParameterList(0)
+ , varList(0)
+ , formalParameterCount(0)
+ , varCount(0)
+ , function(0)
+{
+ vtbl = &static_vtbl;
+ prototype = scope->engine->functionPrototype;
+
+ type = Type_FunctionObject;
+ needsActivation = true;
+ usesArgumentsObject = false;
+ strictMode = false;
+#ifndef QT_NO_DEBUG
+ assert(scope->next != (ExecutionContext *)0x1);
+#endif
+}
+
+bool FunctionObject::hasInstance(Managed *that, ExecutionContext *ctx, const Value &value)
+{
+ FunctionObject *f = static_cast<FunctionObject *>(that);
+
+ Object *v = value.asObject();
+ if (!v)
+ return false;
+
+ Object *o = f->get(ctx, ctx->engine->id_prototype).asObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ while (v) {
+ v = v->prototype;
+
+ if (! v)
+ break;
+ else if (o == v)
+ return true;
+ }
+
+ return false;
+}
+
+Value FunctionObject::construct(Managed *that, ExecutionContext *context, Value *, int)
+{
+ FunctionObject *f = static_cast<FunctionObject *>(that);
+
+ Object *obj = context->engine->newObject();
+ Value proto = f->get(context, context->engine->id_prototype);
+ if (proto.isObject())
+ obj->prototype = proto.objectValue();
+ return Value::fromObject(obj);
+}
+
+Value FunctionObject::call(Managed *, ExecutionContext *, const Value &, Value *, int)
+{
+ return Value::undefinedValue();
+}
+
+void FunctionObject::markObjects(Managed *that)
+{
+ FunctionObject *o = static_cast<FunctionObject *>(that);
+ if (o->name)
+ o->name->mark();
+ // these are marked in VM::Function:
+// for (uint i = 0; i < formalParameterCount; ++i)
+// formalParameterList[i]->mark();
+// for (uint i = 0; i < varCount; ++i)
+// varList[i]->mark();
+ o->scope->mark();
+ if (o->function)
+ o->function->mark();
+
+ Object::markObjects(that);
+}
+
+
+DEFINE_MANAGED_VTABLE(FunctionCtor);
+
+FunctionCtor::FunctionCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+// 15.3.2
+Value FunctionCtor::construct(Managed *that, ExecutionContext *ctx, Value *args, int argc)
+{
+ FunctionCtor *f = static_cast<FunctionCtor *>(that);
+ MemoryManager::GCBlocker gcBlocker(ctx->engine->memoryManager);
+
+ QString arguments;
+ QString body;
+ if (argc > 0) {
+ for (uint i = 0; i < argc - 1; ++i) {
+ if (i)
+ arguments += QLatin1String(", ");
+ arguments += args[i].toString(ctx)->toQString();
+ }
+ body = args[argc - 1].toString(ctx)->toQString();
+ }
+
+ QString function = QLatin1String("function(") + arguments + QLatin1String("){") + body + QLatin1String("}");
+
+ QQmlJS::Engine ee, *engine = &ee;
+ Lexer lexer(engine);
+ lexer.setCode(function, 1, false);
+ Parser parser(engine);
+
+ const bool parsed = parser.parseExpression();
+
+ if (!parsed)
+ ctx->throwSyntaxError(0);
+
+ using namespace AST;
+ FunctionExpression *fe = AST::cast<FunctionExpression *>(parser.rootNode());
+ if (!fe)
+ ctx->throwSyntaxError(0);
+
+ V4IR::Module module;
+
+ Codegen cg(ctx, f->strictMode);
+ V4IR::Function *irf = cg(QString(), function, fe, &module);
+
+ QScopedPointer<EvalInstructionSelection> isel(ctx->engine->iselFactory->create(ctx->engine, &module));
+ VM::Function *vmf = isel->vmFunction(irf);
+
+ return Value::fromObject(ctx->engine->newScriptFunction(ctx->engine->rootContext, vmf));
+}
+
+// 15.3.1: This is equivalent to new Function(...)
+Value FunctionCtor::call(Managed *that, ExecutionContext *context, const Value &thisObject, Value *args, int argc)
+{
+ return construct(that, context, args, argc);
+}
+
+void FunctionPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+
+ defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(0));
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("apply"), method_apply, 2);
+ defineDefaultProperty(ctx, QStringLiteral("call"), method_call, 1);
+ defineDefaultProperty(ctx, QStringLiteral("bind"), method_bind, 1);
+
+}
+
+Value FunctionPrototype::method_toString(SimpleCallContext *ctx)
+{
+ FunctionObject *fun = ctx->thisObject.asFunctionObject();
+ if (!fun)
+ ctx->throwTypeError();
+
+ return Value::fromString(ctx, QStringLiteral("function() { [code] }"));
+}
+
+Value FunctionPrototype::method_apply(SimpleCallContext *ctx)
+{
+ Value thisArg = ctx->argument(0);
+
+ Value arg = ctx->argument(1);
+ QVector<Value> args;
+
+ if (Object *arr = arg.asObject()) {
+ quint32 len = arr->get(ctx, ctx->engine->id_length).toUInt32();
+ for (quint32 i = 0; i < len; ++i) {
+ Value a = arr->getIndexed(ctx, i);
+ args.append(a);
+ }
+ } else if (!(arg.isUndefined() || arg.isNull())) {
+ ctx->throwTypeError();
+ return Value::undefinedValue();
+ }
+
+ FunctionObject *o = ctx->thisObject.asFunctionObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ return o->call(ctx, thisArg, args.data(), args.size());
+}
+
+Value FunctionPrototype::method_call(SimpleCallContext *ctx)
+{
+ Value thisArg = ctx->argument(0);
+
+ QVector<Value> args(ctx->argumentCount ? ctx->argumentCount - 1 : 0);
+ if (ctx->argumentCount)
+ qCopy(ctx->arguments + 1,
+ ctx->arguments + ctx->argumentCount, args.begin());
+
+ FunctionObject *o = ctx->thisObject.asFunctionObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ return o->call(ctx, thisArg, args.data(), args.size());
+}
+
+Value FunctionPrototype::method_bind(SimpleCallContext *ctx)
+{
+ FunctionObject *target = ctx->thisObject.asFunctionObject();
+ if (!target)
+ ctx->throwTypeError();
+
+ Value boundThis = ctx->argument(0);
+ QVector<Value> boundArgs;
+ for (uint i = 1; i < ctx->argumentCount; ++i)
+ boundArgs += ctx->argument(i);
+
+
+ BoundFunction *f = ctx->engine->newBoundFunction(ctx->engine->rootContext, target, boundThis, boundArgs);
+ return Value::fromObject(f);
+}
+
+
+static Value throwTypeError(SimpleCallContext *ctx)
+{
+ ctx->throwTypeError();
+ return Value::undefinedValue();
+}
+
+DEFINE_MANAGED_VTABLE(ScriptFunction);
+
+ScriptFunction::ScriptFunction(ExecutionContext *scope, Function *function)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+ this->function = function;
+ assert(function);
+ assert(function->code);
+
+ // global function
+ if (!scope)
+ return;
+
+ MemoryManager::GCBlocker gcBlocker(scope->engine->memoryManager);
+
+ name = function->name;
+ needsActivation = function->needsActivation();
+ usesArgumentsObject = function->usesArgumentsObject;
+ strictMode = function->isStrict;
+ formalParameterCount = function->formals.size();
+ formalParameterList = function->formals.constData();
+ defineReadonlyProperty(scope->engine->id_length, Value::fromInt32(formalParameterCount));
+
+ varCount = function->locals.size();
+ varList = function->locals.constData();
+
+ Object *proto = scope->engine->newObject();
+ proto->defineDefaultProperty(scope->engine->id_constructor, Value::fromObject(this));
+ Property *pd = insertMember(scope->engine->id_prototype, Attr_NotEnumerable|Attr_NotConfigurable);
+ pd->value = Value::fromObject(proto);
+
+ if (scope->strictMode) {
+ FunctionObject *thrower = scope->engine->newBuiltinFunction(scope, 0, throwTypeError);
+ Property pd = Property::fromAccessor(thrower, thrower);
+ __defineOwnProperty__(scope, QStringLiteral("caller"), pd, Attr_Accessor|Attr_NotEnumerable|Attr_NotConfigurable);
+ __defineOwnProperty__(scope, QStringLiteral("arguments"), pd, Attr_Accessor|Attr_NotEnumerable|Attr_NotConfigurable);
+ }
+}
+
+Value ScriptFunction::construct(Managed *that, ExecutionContext *context, Value *args, int argc)
+{
+ ScriptFunction *f = static_cast<ScriptFunction *>(that);
+ assert(f->function->code);
+ Object *obj = context->engine->newObject();
+ Value proto = f->get(context, context->engine->id_prototype);
+ if (proto.isObject())
+ obj->prototype = proto.objectValue();
+
+ quintptr stackSpace[stackContextSize/sizeof(quintptr)];
+ ExecutionContext *ctx = context->engine->newCallContext(stackSpace, f, Value::fromObject(obj), args, argc);
+
+ Value result;
+ try {
+ result = f->function->code(ctx, f->function->codeData);
+ } catch (Exception &ex) {
+ ex.partiallyUnwindContext(context);
+ throw;
+ }
+ ctx->engine->popContext();
+
+ if (result.isObject())
+ return result;
+ return Value::fromObject(obj);
+}
+
+Value ScriptFunction::call(Managed *that, ExecutionContext *context, const Value &thisObject, Value *args, int argc)
+{
+ ScriptFunction *f = static_cast<ScriptFunction *>(that);
+ assert(f->function->code);
+ quintptr stackSpace[stackContextSize/sizeof(quintptr)];
+ ExecutionContext *ctx = context->engine->newCallContext(stackSpace, f, thisObject, args, argc);
+
+ if (!f->strictMode && !thisObject.isObject()) {
+ if (thisObject.isUndefined() || thisObject.isNull()) {
+ ctx->thisObject = Value::fromObject(context->engine->globalObject);
+ } else {
+ ctx->thisObject = Value::fromObject(thisObject.toObject(context));
+ }
+ }
+
+ Value result;
+ try {
+ result = f->function->code(ctx, f->function->codeData);
+ } catch (Exception &ex) {
+ ex.partiallyUnwindContext(context);
+ throw;
+ }
+ ctx->engine->popContext();
+ return result;
+}
+
+
+
+DEFINE_MANAGED_VTABLE(BuiltinFunctionOld);
+
+BuiltinFunctionOld::BuiltinFunctionOld(ExecutionContext *scope, String *name, Value (*code)(SimpleCallContext *))
+ : FunctionObject(scope)
+ , code(code)
+{
+ vtbl = &static_vtbl;
+ this->name = name;
+ isBuiltinFunction = true;
+}
+
+Value BuiltinFunctionOld::construct(Managed *, ExecutionContext *ctx, Value *, int)
+{
+ ctx->throwTypeError();
+ return Value::undefinedValue();
+}
+
+Value BuiltinFunctionOld::call(Managed *that, ExecutionContext *context, const Value &thisObject, Value *args, int argc)
+{
+ BuiltinFunctionOld *f = static_cast<BuiltinFunctionOld *>(that);
+ SimpleCallContext ctx;
+ ctx.type = ExecutionContext::Type_SimpleCallContext;
+ ctx.strictMode = f->scope->strictMode; // ### needed? scope or parent context?
+ ctx.marked = false;
+ ctx.thisObject = thisObject;
+ ctx.engine = f->scope->engine;
+ ctx.arguments = args;
+ ctx.argumentCount = argc;
+ context->engine->pushContext(&ctx);
+
+ if (!f->strictMode && !thisObject.isObject()) {
+ // Built-in functions allow for the this object to be null or undefined. This overrides
+ // the behaviour of changing thisObject to the global object if null/undefined and allows
+ // the built-in functions for example to throw a type error if null is passed.
+ if (!thisObject.isUndefined() && !thisObject.isNull())
+ ctx.thisObject = Value::fromObject(thisObject.toObject(context));
+ }
+
+ Value result = Value::undefinedValue();
+ try {
+ result = f->code(&ctx);
+ } catch (Exception &ex) {
+ ex.partiallyUnwindContext(context);
+ throw;
+ }
+
+ context->engine->popContext();
+ return result;
+}
+
+
+DEFINE_MANAGED_VTABLE(BoundFunction);
+
+BoundFunction::BoundFunction(ExecutionContext *scope, FunctionObject *target, Value boundThis, const QVector<Value> &boundArgs)
+ : FunctionObject(scope)
+ , target(target)
+ , boundThis(boundThis)
+ , boundArgs(boundArgs)
+{
+ vtbl = &static_vtbl;
+ int len = target->get(scope, scope->engine->id_length).toUInt32();
+ len -= boundArgs.size();
+ if (len < 0)
+ len = 0;
+ defineReadonlyProperty(scope->engine->id_length, Value::fromInt32(len));
+
+ FunctionObject *thrower = scope->engine->newBuiltinFunction(scope, 0, throwTypeError);
+ Property pd = Property::fromAccessor(thrower, thrower);
+ *insertMember(scope->engine->id_arguments, Attr_Accessor|Attr_NotConfigurable|Attr_NotEnumerable) = pd;
+ *insertMember(scope->engine->id_caller, Attr_Accessor|Attr_NotConfigurable|Attr_NotEnumerable) = pd;
+}
+
+void BoundFunction::destroy(Managed *that)
+{
+ static_cast<BoundFunction *>(that)->~BoundFunction();
+}
+
+Value BoundFunction::call(Managed *that, ExecutionContext *context, const Value &, Value *args, int argc)
+{
+ BoundFunction *f = static_cast<BoundFunction *>(that);
+ Value *newArgs = static_cast<Value *>(alloca(sizeof(Value)*(f->boundArgs.size() + argc)));
+ memcpy(newArgs, f->boundArgs.constData(), f->boundArgs.size()*sizeof(Value));
+ memcpy(newArgs + f->boundArgs.size(), args, argc*sizeof(Value));
+
+ return f->target->call(context, f->boundThis, newArgs, f->boundArgs.size() + argc);
+}
+
+Value BoundFunction::construct(Managed *that, ExecutionContext *context, Value *args, int argc)
+{
+ BoundFunction *f = static_cast<BoundFunction *>(that);
+ Value *newArgs = static_cast<Value *>(alloca(sizeof(Value)*(f->boundArgs.size() + argc)));
+ memcpy(newArgs, f->boundArgs.constData(), f->boundArgs.size()*sizeof(Value));
+ memcpy(newArgs + f->boundArgs.size(), args, argc*sizeof(Value));
+
+ return f->target->construct(context, newArgs, f->boundArgs.size() + argc);
+}
+
+bool BoundFunction::hasInstance(Managed *that, ExecutionContext *ctx, const Value &value)
+{
+ BoundFunction *f = static_cast<BoundFunction *>(that);
+ return FunctionObject::hasInstance(f->target, ctx, value);
+}
+
+void BoundFunction::markObjects(Managed *that)
+{
+ BoundFunction *o = static_cast<BoundFunction *>(that);
+ o->target->mark();
+ if (Managed *m = o->boundThis.asManaged())
+ m->mark();
+ for (int i = 0; i < o->boundArgs.size(); ++i)
+ if (Managed *m = o->boundArgs.at(i).asManaged())
+ m->mark();
+ FunctionObject::markObjects(that);
+}
diff --git a/src/qml/qml/v4vm/qv4functionobject.h b/src/qml/qml/v4vm/qv4functionobject.h
new file mode 100644
index 0000000000..4dddc048f9
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4functionobject.h
@@ -0,0 +1,243 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4FUNCTIONOBJECT_H
+#define QV4FUNCTIONOBJECT_H
+
+#include "qv4global.h"
+#include "qv4runtime.h"
+#include "qv4engine.h"
+#include "qv4context.h"
+#include "qv4object.h"
+#include "qv4string.h"
+#include "qv4codegen_p.h"
+#include "qv4isel_p.h"
+#include "qv4managed.h"
+#include "qv4property.h"
+#include "qv4objectiterator.h"
+#include "qv4regexp.h"
+
+#include <QtCore/QString>
+#include <QtCore/QHash>
+#include <QtCore/QScopedPointer>
+#include <cstdio>
+#include <cassert>
+
+#include <config.h>
+#include <assembler/MacroAssemblerCodeRef.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Value;
+struct Function;
+struct Object;
+struct ObjectIterator;
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct ArrayObject;
+struct DateObject;
+struct FunctionObject;
+struct RegExpObject;
+struct ErrorObject;
+struct ArgumentsObject;
+struct ExecutionContext;
+struct ExecutionEngine;
+class MemoryManager;
+
+struct ObjectPrototype;
+struct StringPrototype;
+struct NumberPrototype;
+struct BooleanPrototype;
+struct ArrayPrototype;
+struct FunctionPrototype;
+struct DatePrototype;
+struct RegExpPrototype;
+struct ErrorPrototype;
+struct EvalErrorPrototype;
+struct RangeErrorPrototype;
+struct ReferenceErrorPrototype;
+struct SyntaxErrorPrototype;
+struct TypeErrorPrototype;
+struct URIErrorPrototype;
+struct InternalClass;
+struct Lookup;
+
+struct Function {
+ String *name;
+
+ VM::Value (*code)(VM::ExecutionContext *, const uchar *);
+ const uchar *codeData;
+ JSC::MacroAssemblerCodeRef codeRef;
+ quint32 codeSize;
+ QByteArray unwindInfo; // CIE+FDE on x86/x86-64. Stored directly in code on ARM.
+
+ QVector<String *> formals;
+ QVector<String *> locals;
+ QVector<Value> generatedValues;
+ QVector<String *> identifiers;
+ QVector<Function *> nestedFunctions;
+ Function *outer;
+
+ Lookup *lookups;
+
+ bool hasNestedFunctions;
+ bool hasDirectEval;
+ bool usesArgumentsObject;
+ bool isStrict;
+ bool isNamedExpression;
+
+ Function(String *name)
+ : name(name)
+ , code(0)
+ , codeData(0)
+ , codeSize(0)
+ , outer(0)
+ , lookups(0)
+ , hasNestedFunctions(0)
+ , hasDirectEval(false)
+ , usesArgumentsObject(false)
+ , isStrict(false)
+ , isNamedExpression(false)
+ {}
+ ~Function();
+
+ inline bool needsActivation() const { return hasNestedFunctions || hasDirectEval || usesArgumentsObject; }
+
+ void mark();
+};
+
+struct Q_V4_EXPORT FunctionObject: Object {
+ ExecutionContext *scope;
+ String *name;
+ String * const *formalParameterList;
+ String * const *varList;
+ unsigned int formalParameterCount;
+ unsigned int varCount;
+ VM::Function *function;
+
+ FunctionObject(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+ inline Value construct(ExecutionContext *context, Value *args, int argc) {
+ return vtbl->construct(this, context, args, argc);
+ }
+ inline Value call(ExecutionContext *context, const Value &thisObject, Value *args, int argc) {
+ return vtbl->call(this, context, thisObject, args, argc);
+ }
+
+protected:
+ static const ManagedVTable static_vtbl;
+ static void markObjects(Managed *that);
+ static bool hasInstance(Managed *that, ExecutionContext *ctx, const Value &value);
+};
+
+struct FunctionCtor: FunctionObject
+{
+ FunctionCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct FunctionPrototype: FunctionObject
+{
+ FunctionPrototype(ExecutionContext *ctx): FunctionObject(ctx) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_apply(SimpleCallContext *ctx);
+ static Value method_call(SimpleCallContext *ctx);
+ static Value method_bind(SimpleCallContext *ctx);
+};
+
+struct BuiltinFunctionOld: FunctionObject {
+ Value (*code)(SimpleCallContext *);
+
+ BuiltinFunctionOld(ExecutionContext *scope, String *name, Value (*code)(SimpleCallContext *));
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct ScriptFunction: FunctionObject {
+ ScriptFunction(ExecutionContext *scope, VM::Function *function);
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct BoundFunction: FunctionObject {
+ FunctionObject *target;
+ Value boundThis;
+ QVector<Value> boundArgs;
+
+ BoundFunction(ExecutionContext *scope, FunctionObject *target, Value boundThis, const QVector<Value> &boundArgs);
+ ~BoundFunction() {}
+
+
+ static Value construct(Managed *, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+ static const ManagedVTable static_vtbl;
+ static void destroy(Managed *);
+ static void markObjects(Managed *that);
+ static bool hasInstance(Managed *that, ExecutionContext *ctx, const Value &value);
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_OBJECTS_H
diff --git a/src/qml/qml/v4vm/qv4global.h b/src/qml/qml/v4vm/qv4global.h
new file mode 100644
index 0000000000..5f3f1b6d17
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4global.h
@@ -0,0 +1,169 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4GLOBAL_H
+#define QV4GLOBAL_H
+
+#include <QtCore/qglobal.h>
+
+QT_BEGIN_NAMESPACE
+
+#ifndef QT_STATIC
+# if defined(QT_BUILD_V4_LIB)
+# define Q_V4_EXPORT Q_DECL_EXPORT
+# else
+# define Q_V4_EXPORT Q_DECL_IMPORT
+# endif
+#else
+# define Q_V4_EXPORT
+#endif
+
+namespace QQmlJS {
+namespace VM {
+
+
+enum PropertyFlag {
+ Attr_Data = 0,
+ Attr_Accessor = 0x1,
+ Attr_NotWritable = 0x2,
+ Attr_NotEnumerable = 0x4,
+ Attr_NotConfigurable = 0x8,
+ Attr_ReadOnly = Attr_NotWritable|Attr_NotEnumerable|Attr_NotConfigurable,
+ Attr_Invalid = 0xff
+};
+
+Q_DECLARE_FLAGS(PropertyFlags, PropertyFlag);
+Q_DECLARE_OPERATORS_FOR_FLAGS(PropertyFlags);
+
+struct PropertyAttributes
+{
+ union {
+ uchar m_all;
+ struct {
+ uchar m_flags : 4;
+ uchar m_mask : 4;
+ };
+ struct {
+ uchar m_type : 1;
+ uchar m_writable : 1;
+ uchar m_enumerable : 1;
+ uchar m_configurable : 1;
+ uchar type_set : 1;
+ uchar writable_set : 1;
+ uchar enumerable_set : 1;
+ uchar configurable_set : 1;
+ };
+ };
+
+ enum Type {
+ Data = 0,
+ Accessor = 1,
+ Generic = 2
+ };
+
+ PropertyAttributes() : m_all(0) {}
+ PropertyAttributes(PropertyFlag f) : m_all(0) {
+ if (f != Attr_Invalid) {
+ setType(f & Attr_Accessor ? Accessor : Data);
+ if (!(f & Attr_Accessor))
+ setWritable(!(f & Attr_NotWritable));
+ setEnumerable(!(f & Attr_NotEnumerable));
+ setConfigurable(!(f & Attr_NotConfigurable));
+ }
+ }
+ PropertyAttributes(PropertyFlags f) : m_all(0) {
+ if (f != Attr_Invalid) {
+ setType(f & Attr_Accessor ? Accessor : Data);
+ if (!(f & Attr_Accessor))
+ setWritable(!(f & Attr_NotWritable));
+ setEnumerable(!(f & Attr_NotEnumerable));
+ setConfigurable(!(f & Attr_NotConfigurable));
+ }
+ }
+ PropertyAttributes(const PropertyAttributes &other) : m_all(other.m_all) {}
+ PropertyAttributes & operator=(const PropertyAttributes &other) { m_all = other.m_all; return *this; }
+
+ void setType(Type t) { m_type = t; type_set = true; }
+ Type type() const { return type_set ? (Type)m_type : Generic; }
+
+ bool isData() const { return type() == PropertyAttributes::Data || writable_set; }
+ bool isAccessor() const { return type() == PropertyAttributes::Accessor; }
+ bool isGeneric() const { return type() == PropertyAttributes::Generic && !writable_set; }
+
+ bool hasType() const { return type_set; }
+ bool hasWritable() const { return writable_set; }
+ bool hasConfigurable() const { return configurable_set; }
+ bool hasEnumerable() const { return enumerable_set; }
+
+ void setWritable(bool b) { m_writable = b; writable_set = true; }
+ void setConfigurable(bool b) { m_configurable = b; configurable_set = true; }
+ void setEnumerable(bool b) { m_enumerable = b; enumerable_set = true; }
+
+ void resolve() { m_mask = 0xf; if (m_type == Accessor) { m_writable = false; writable_set = false; } }
+
+ bool isWritable() const { return m_type != Data || m_writable; }
+ bool isEnumerable() const { return m_enumerable; }
+ bool isConfigurable() const { return m_configurable; }
+
+ void clearType() { m_type = Data; type_set = false; }
+ void clearWritable() { m_writable = false; writable_set = false; }
+ void clearEnumerable() { m_enumerable = false; enumerable_set = false; }
+ void clearConfigurable() { m_configurable = false; configurable_set = false; }
+
+ void clear() { m_all = 0; }
+ bool isEmpty() const { return !m_all; }
+
+ uint flags() const { return m_flags; }
+
+ bool operator==(PropertyAttributes other) {
+ return m_all == other.m_all;
+ }
+ bool operator!=(PropertyAttributes other) {
+ return m_all != other.m_all;
+ }
+};
+
+}
+}
+
+QT_END_NAMESPACE
+
+#endif // QV4GLOBAL_H
diff --git a/src/qml/qml/v4vm/qv4globalobject.cpp b/src/qml/qml/v4vm/qv4globalobject.cpp
new file mode 100644
index 0000000000..75da51a9b6
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4globalobject.cpp
@@ -0,0 +1,731 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4globalobject.h"
+#include "qv4mm.h"
+#include "qv4value.h"
+#include "qv4context.h"
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include "private/qlocale_tools_p.h"
+
+#include <QtCore/QDebug>
+#include <QtCore/QString>
+#include <iostream>
+#include "qv4alloca_p.h"
+
+#include <wtf/MathExtras.h>
+
+using namespace QQmlJS::VM;
+
+static inline char toHex(char c)
+{
+ static const char hexnumbers[] = "0123456789ABCDEF";
+ return hexnumbers[c & 0xf];
+}
+
+static int fromHex(QChar ch)
+{
+ ushort c = ch.unicode();
+ if ((c >= '0') && (c <= '9'))
+ return c - '0';
+ if ((c >= 'A') && (c <= 'F'))
+ return c - 'A' + 10;
+ if ((c >= 'a') && (c <= 'f'))
+ return c - 'a' + 10;
+ return -1;
+}
+
+static QString escape(const QString &input)
+{
+ QString output;
+ output.reserve(input.size() * 3);
+ const int length = input.length();
+ for (int i = 0; i < length; ++i) {
+ ushort uc = input.at(i).unicode();
+ if (uc < 0x100) {
+ if ( (uc > 0x60 && uc < 0x7B)
+ || (uc > 0x3F && uc < 0x5B)
+ || (uc > 0x2C && uc < 0x3A)
+ || (uc == 0x2A)
+ || (uc == 0x2B)
+ || (uc == 0x5F)) {
+ output.append(QChar(uc));
+ } else {
+ output.append('%');
+ output.append(QChar(toHex(uc >> 4)));
+ output.append(QChar(toHex(uc)));
+ }
+ } else {
+ output.append('%');
+ output.append('u');
+ output.append(QChar(toHex(uc >> 12)));
+ output.append(QChar(toHex(uc >> 8)));
+ output.append(QChar(toHex(uc >> 4)));
+ output.append(QChar(toHex(uc)));
+ }
+ }
+ return output;
+}
+
+static QString unescape(const QString &input)
+{
+ QString result;
+ result.reserve(input.length());
+ int i = 0;
+ const int length = input.length();
+ while (i < length) {
+ QChar c = input.at(i++);
+ if ((c == '%') && (i + 1 < length)) {
+ QChar a = input.at(i);
+ if ((a == 'u') && (i + 4 < length)) {
+ int d3 = fromHex(input.at(i+1));
+ int d2 = fromHex(input.at(i+2));
+ int d1 = fromHex(input.at(i+3));
+ int d0 = fromHex(input.at(i+4));
+ if ((d3 != -1) && (d2 != -1) && (d1 != -1) && (d0 != -1)) {
+ ushort uc = ushort((d3 << 12) | (d2 << 8) | (d1 << 4) | d0);
+ result.append(QChar(uc));
+ i += 5;
+ } else {
+ result.append(c);
+ }
+ } else {
+ int d1 = fromHex(a);
+ int d0 = fromHex(input.at(i+1));
+ if ((d1 != -1) && (d0 != -1)) {
+ c = (d1 << 4) | d0;
+ i += 2;
+ }
+ result.append(c);
+ }
+ } else {
+ result.append(c);
+ }
+ }
+ return result;
+}
+
+static const char uriReserved[] = ";/?:@&=+$,#";
+static const char uriUnescaped[] = "-_.!~*'()";
+static const char uriUnescapedReserved[] = "-_.!~*'();/?:@&=+$,#";
+
+static void addEscapeSequence(QString &output, uchar ch)
+{
+ output.append(QLatin1Char('%'));
+ output.append(QLatin1Char(toHex(ch >> 4)));
+ output.append(QLatin1Char(toHex(ch & 0xf)));
+}
+
+static QString encode(const QString &input, const char *unescapedSet, bool *ok)
+{
+ *ok = true;
+ QString output;
+ const int length = input.length();
+ int i = 0;
+ while (i < length) {
+ const QChar c = input.at(i);
+ bool escape = true;
+ if ((c.unicode() >= 'a' && c.unicode() <= 'z') ||
+ (c.unicode() >= 'A' && c.unicode() <= 'Z') ||
+ (c.unicode() >= '0' && c.unicode() <= '9')) {
+ escape = false;
+ } else {
+ const char *r = unescapedSet;
+ while (*r) {
+ if (*r == c.unicode()) {
+ escape = false;
+ break;
+ }
+ ++r;
+ }
+ }
+ if (escape) {
+ uint uc = c.unicode();
+ if ((uc >= 0xDC00) && (uc <= 0xDFFF)) {
+ *ok = false;
+ break;
+ }
+ if (!((uc < 0xD800) || (uc > 0xDBFF))) {
+ ++i;
+ if (i == length) {
+ *ok = false;
+ break;
+ }
+ const uint uc2 = input.at(i).unicode();
+ if ((uc2 < 0xDC00) || (uc2 > 0xDFFF)) {
+ *ok = false;
+ break;
+ }
+ uc = ((uc - 0xD800) * 0x400) + (uc2 - 0xDC00) + 0x10000;
+ }
+ if (uc < 0x80) {
+ addEscapeSequence(output, (uchar)uc);
+ } else {
+ if (uc < 0x0800) {
+ addEscapeSequence(output, 0xc0 | ((uchar) (uc >> 6)));
+ } else {
+
+ if (QChar::requiresSurrogates(uc)) {
+ addEscapeSequence(output, 0xf0 | ((uchar) (uc >> 18)));
+ addEscapeSequence(output, 0x80 | (((uchar) (uc >> 12)) & 0x3f));
+ } else {
+ addEscapeSequence(output, 0xe0 | (((uchar) (uc >> 12)) & 0x3f));
+ }
+ addEscapeSequence(output, 0x80 | (((uchar) (uc >> 6)) & 0x3f));
+ }
+ addEscapeSequence(output, 0x80 | ((uchar) (uc&0x3f)));
+ }
+ } else {
+ output.append(c);
+ }
+ ++i;
+ }
+ if (i != length)
+ *ok = false;
+ return output;
+}
+
+enum DecodeMode {
+ DecodeAll,
+ DecodeNonReserved
+};
+
+static QString decode(const QString &input, DecodeMode decodeMode, bool *ok)
+{
+ *ok = true;
+ QString output;
+ output.reserve(input.length());
+ const int length = input.length();
+ int i = 0;
+ const QChar percent = QLatin1Char('%');
+ while (i < length) {
+ const QChar ch = input.at(i);
+ if (ch == percent) {
+ int start = i;
+ if (i + 2 >= length)
+ goto error;
+
+ int d1 = fromHex(input.at(i+1));
+ int d0 = fromHex(input.at(i+2));
+ if ((d1 == -1) || (d0 == -1))
+ goto error;
+
+ int b = (d1 << 4) | d0;
+ i += 2;
+ if (b & 0x80) {
+ int uc;
+ int min_uc;
+ int need;
+ if ((b & 0xe0) == 0xc0) {
+ uc = b & 0x1f;
+ need = 1;
+ min_uc = 0x80;
+ } else if ((b & 0xf0) == 0xe0) {
+ uc = b & 0x0f;
+ need = 2;
+ min_uc = 0x800;
+ } else if ((b & 0xf8) == 0xf0) {
+ uc = b & 0x07;
+ need = 3;
+ min_uc = 0x10000;
+ } else {
+ goto error;
+ }
+
+ if (i + (3 * need) >= length)
+ goto error;
+
+ for (int j = 0; j < need; ++j) {
+ ++i;
+ if (input.at(i) != percent)
+ goto error;
+
+ d1 = fromHex(input.at(i+1));
+ d0 = fromHex(input.at(i+2));
+ if ((d1 == -1) || (d0 == -1))
+ goto error;
+
+ b = (d1 << 4) | d0;
+ if ((b & 0xC0) != 0x80)
+ goto error;
+
+ i += 2;
+ uc = (uc << 6) + (b & 0x3f);
+ }
+ if (uc < min_uc)
+ goto error;
+
+ if (uc < 0x10000) {
+ output.append(QChar(uc));
+ } else {
+ if (uc > 0x10FFFF)
+ goto error;
+
+ ushort l = ushort(((uc - 0x10000) & 0x3FF) + 0xDC00);
+ ushort h = ushort((((uc - 0x10000) >> 10) & 0x3FF) + 0xD800);
+ output.append(QChar(h));
+ output.append(QChar(l));
+ }
+ } else {
+ if (decodeMode == DecodeNonReserved && b <= 0x40) {
+ const char *r = uriReserved;
+ while (*r) {
+ if (*r == b)
+ break;
+ ++r;
+ }
+ if (*r)
+ output.append(input.mid(start, i - start + 1));
+ else
+ output.append(QChar(b));
+ } else {
+ output.append(QChar(b));
+ }
+ }
+ } else {
+ output.append(ch);
+ }
+ ++i;
+ }
+ if (i != length)
+ *ok = false;
+ return output;
+ error:
+ *ok = false;
+ return QString();
+}
+
+DEFINE_MANAGED_VTABLE(EvalFunction);
+
+EvalFunction::EvalFunction(ExecutionContext *scope)
+ : FunctionObject(scope)
+ , qmlActivation(0)
+{
+ vtbl = &static_vtbl;
+ name = scope->engine->id_eval;
+ defineReadonlyProperty(scope->engine->id_length, Value::fromInt32(1));
+}
+
+EvalFunction::EvalFunction(ExecutionContext *scope, Object *qmlActivation)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+ name = scope->engine->id_eval;
+ defineReadonlyProperty(scope->engine->id_length, Value::fromInt32(1));
+ this->qmlActivation = qmlActivation;
+}
+
+Value EvalFunction::evalCall(ExecutionContext *parentContext, Value /*thisObject*/, Value *args, int argc, bool directCall)
+{
+ if (argc < 1)
+ return Value::undefinedValue();
+
+ ExecutionEngine *engine = parentContext->engine;
+ ExecutionContext *ctx = parentContext;
+
+ if (!directCall) {
+ // the context for eval should be the global scope, so we fake a root
+ // context
+ ctx = engine->pushGlobalContext();
+ }
+
+ if (!args[0].isString())
+ return args[0];
+
+ const QString code = args[0].stringValue()->toQString();
+ bool inheritContext = !ctx->strictMode;
+
+ QQmlJS::VM::Function *f = parseSource(ctx, QStringLiteral("eval code"),
+ code, QQmlJS::Codegen::EvalCode,
+ (directCall && parentContext->strictMode), inheritContext);
+
+ if (!f)
+ return Value::undefinedValue();
+
+ strictMode = f->isStrict || (ctx->strictMode);
+ if (qmlActivation)
+ strictMode = true;
+
+ usesArgumentsObject = f->usesArgumentsObject;
+ needsActivation = f->needsActivation();
+
+ if (strictMode) {
+ CallContext *k = ctx->engine->newCallContext(this, ctx->thisObject, 0, 0);
+ if (qmlActivation) {
+ k->activation = qmlActivation;
+ k->type = ExecutionContext::Type_QmlContext;
+ }
+ ctx = k;
+ }
+
+ // set the correct strict mode flag on the context
+ bool cstrict = ctx->strictMode;
+ ctx->strictMode = strictMode;
+
+ Value result = Value::undefinedValue();
+ try {
+ result = f->code(ctx, f->codeData);
+ } catch (Exception &ex) {
+ ctx->strictMode = cstrict;
+ if (strictMode)
+ ex.partiallyUnwindContext(parentContext);
+ throw;
+ }
+
+ ctx->strictMode = cstrict;
+
+ while (engine->current != parentContext)
+ engine->popContext();
+
+ return result;
+}
+
+
+Value EvalFunction::call(Managed *that, ExecutionContext *context, const Value &thisObject, Value *args, int argc)
+{
+ // indirect call
+ return static_cast<EvalFunction *>(that)->evalCall(context, thisObject, args, argc, false);
+}
+
+//Value EvalFunction::construct(ExecutionContext *ctx, Value *, int)
+//{
+// ctx->throwTypeError();
+// return Value::undefinedValue();
+//}
+
+QQmlJS::VM::Function *EvalFunction::parseSource(QQmlJS::VM::ExecutionContext *ctx,
+ const QString &fileName, const QString &source,
+ QQmlJS::Codegen::Mode mode,
+ bool strictMode, bool inheritContext)
+{
+ using namespace QQmlJS;
+
+ MemoryManager::GCBlocker gcBlocker(ctx->engine->memoryManager);
+
+ VM::ExecutionEngine *vm = ctx->engine;
+ V4IR::Module module;
+ VM::Function *globalCode = 0;
+
+ {
+ QQmlJS::Engine ee, *engine = &ee;
+ Lexer lexer(engine);
+ lexer.setCode(source, 1, false);
+ Parser parser(engine);
+
+ const bool parsed = parser.parseProgram();
+
+ VM::DiagnosticMessage *error = 0, **errIt = &error;
+ foreach (const QQmlJS::DiagnosticMessage &m, parser.diagnosticMessages()) {
+ if (m.isError()) {
+ *errIt = new VM::DiagnosticMessage;
+ (*errIt)->fileName = fileName;
+ (*errIt)->offset = m.loc.offset;
+ (*errIt)->length = m.loc.length;
+ (*errIt)->startLine = m.loc.startLine;
+ (*errIt)->startColumn = m.loc.startColumn;
+ (*errIt)->type = VM::DiagnosticMessage::Error;
+ (*errIt)->message = m.message;
+ errIt = &(*errIt)->next;
+ } else {
+ std::cerr << qPrintable(fileName) << ':' << m.loc.startLine << ':' << m.loc.startColumn
+ << ": warning: " << qPrintable(m.message) << std::endl;
+ }
+ }
+ if (error)
+ ctx->throwSyntaxError(error);
+
+ if (parsed) {
+ using namespace AST;
+ Program *program = AST::cast<Program *>(parser.rootNode());
+ if (!program) {
+ // if parsing was successful, and we have no program, then
+ // we're done...:
+ return 0;
+ }
+
+ QStringList inheritedLocals;
+ if (inheritContext)
+ for (String * const *i = ctx->variables(), * const *ei = i + ctx->variableCount(); i < ei; ++i)
+ inheritedLocals.append(*i ? (*i)->toQString() : QString());
+
+ Codegen cg(ctx, strictMode);
+ V4IR::Function *globalIRCode = cg(fileName, source, program, &module, mode, inheritedLocals);
+ QScopedPointer<EvalInstructionSelection> isel(ctx->engine->iselFactory->create(vm, &module));
+ if (inheritContext)
+ isel->setUseFastLookups(false);
+ if (globalIRCode)
+ globalCode = isel->vmFunction(globalIRCode);
+ }
+
+ if (! globalCode)
+ // ### should be a syntax error
+ ctx->throwTypeError();
+ }
+
+ return globalCode;
+}
+
+static inline int toInt(const QChar &qc, int R)
+{
+ ushort c = qc.unicode();
+ int v = -1;
+ if (c >= '0' && c <= '9')
+ v = c - '0';
+ else if (c >= 'A' && c <= 'Z')
+ v = c - 'A' + 10;
+ else if (c >= 'a' && c <= 'z')
+ v = c - 'a' + 10;
+ if (v >= 0 && v < R)
+ return v;
+ else
+ return -1;
+}
+
+// parseInt [15.1.2.2]
+Value GlobalFunctions::method_parseInt(SimpleCallContext *context)
+{
+ Value string = context->argument(0);
+ Value radix = context->argument(1);
+ int R = radix.isUndefined() ? 0 : radix.toInt32();
+
+ // [15.1.2.2] step by step:
+ String *inputString = string.toString(context); // 1
+ QString trimmed = inputString->toQString().trimmed(); // 2
+ const QChar *pos = trimmed.constData();
+ const QChar *end = pos + trimmed.length();
+
+ int sign = 1; // 3
+ if (pos != end) {
+ if (*pos == QLatin1Char('-'))
+ sign = -1; // 4
+ if (*pos == QLatin1Char('-') || *pos == QLatin1Char('+'))
+ ++pos; // 5
+ }
+ bool stripPrefix = true; // 7
+ if (R) { // 8
+ if (R < 2 || R > 36)
+ return Value::fromDouble(std::numeric_limits<double>::quiet_NaN()); // 8a
+ if (R != 16)
+ stripPrefix = false; // 8b
+ } else { // 9
+ R = 10; // 9a
+ }
+ if (stripPrefix) { // 10
+ if ((end - pos >= 2)
+ && (pos[0] == QLatin1Char('0'))
+ && (pos[1] == QLatin1Char('x') || pos[1] == QLatin1Char('X'))) { // 10a
+ pos += 2;
+ R = 16;
+ }
+ }
+ // 11: Z is progressively built below
+ // 13: this is handled by the toInt function
+ if (pos == end) // 12
+ return Value::fromDouble(std::numeric_limits<double>::quiet_NaN());
+ bool overflow = false;
+ qint64 v_overflow;
+ unsigned overflow_digit_count = 0;
+ int d = toInt(*pos++, R);
+ if (d == -1)
+ return Value::fromDouble(std::numeric_limits<double>::quiet_NaN());
+ qint64 v = d;
+ while (pos != end) {
+ d = toInt(*pos++, R);
+ if (d == -1)
+ break;
+ if (overflow) {
+ if (overflow_digit_count == 0) {
+ v_overflow = v;
+ v = 0;
+ }
+ ++overflow_digit_count;
+ v = v * R + d;
+ } else {
+ qint64 vNew = v * R + d;
+ if (vNew < v) {
+ overflow = true;
+ --pos;
+ } else {
+ v = vNew;
+ }
+ }
+ }
+
+ if (overflow) {
+ double result = (double) v_overflow * pow(R, overflow_digit_count);
+ result += v;
+ return Value::fromDouble(sign * result);
+ } else {
+ return Value::fromDouble(sign * (double) v); // 15
+ }
+}
+
+// parseFloat [15.1.2.3]
+Value GlobalFunctions::method_parseFloat(SimpleCallContext *context)
+{
+ Value string = context->argument(0);
+
+ // [15.1.2.3] step by step:
+ String *inputString = string.toString(context); // 1
+ QString trimmed = inputString->toQString().trimmed(); // 2
+
+ // 4:
+ if (trimmed.startsWith(QLatin1String("Infinity"))
+ || trimmed.startsWith(QLatin1String("+Infinity")))
+ return Value::fromDouble(Q_INFINITY);
+ if (trimmed.startsWith("-Infinity"))
+ return Value::fromDouble(-Q_INFINITY);
+ QByteArray ba = trimmed.toLatin1();
+ bool ok;
+ const char *begin = ba.constData();
+ const char *end = 0;
+ double d = qstrtod(begin, &end, &ok);
+ if (end - begin == 0)
+ return Value::fromDouble(std::numeric_limits<double>::quiet_NaN()); // 3
+ else
+ return Value::fromDouble(d);
+}
+
+/// isNaN [15.1.2.4]
+Value GlobalFunctions::method_isNaN(SimpleCallContext *context)
+{
+ const Value &v = context->argument(0);
+ if (v.integerCompatible())
+ return Value::fromBoolean(false);
+
+ double d = v.toNumber();
+ return Value::fromBoolean(isnan(d));
+}
+
+/// isFinite [15.1.2.5]
+Value GlobalFunctions::method_isFinite(SimpleCallContext *context)
+{
+ const Value &v = context->argument(0);
+ if (v.integerCompatible())
+ return Value::fromBoolean(true);
+
+ double d = v.toNumber();
+ return Value::fromBoolean(std::isfinite(d));
+}
+
+/// decodeURI [15.1.3.1]
+Value GlobalFunctions::method_decodeURI(SimpleCallContext *context)
+{
+ if (context->argumentCount == 0)
+ return Value::undefinedValue();
+
+ QString uriString = context->arguments[0].toString(context)->toQString();
+ bool ok;
+ QString out = decode(uriString, DecodeNonReserved, &ok);
+ if (!ok)
+ context->throwURIError(Value::fromString(context, QStringLiteral("malformed URI sequence")));
+
+ return Value::fromString(context, out);
+}
+
+/// decodeURIComponent [15.1.3.2]
+Value GlobalFunctions::method_decodeURIComponent(SimpleCallContext *context)
+{
+ if (context->argumentCount == 0)
+ return Value::undefinedValue();
+
+ QString uriString = context->arguments[0].toString(context)->toQString();
+ bool ok;
+ QString out = decode(uriString, DecodeAll, &ok);
+ if (!ok)
+ context->throwURIError(Value::fromString(context, QStringLiteral("malformed URI sequence")));
+
+ return Value::fromString(context, out);
+}
+
+/// encodeURI [15.1.3.3]
+Value GlobalFunctions::method_encodeURI(SimpleCallContext *context)
+{
+ if (context->argumentCount == 0)
+ return Value::undefinedValue();
+
+ QString uriString = context->arguments[0].toString(context)->toQString();
+ bool ok;
+ QString out = encode(uriString, uriUnescapedReserved, &ok);
+ if (!ok)
+ context->throwURIError(Value::fromString(context, QStringLiteral("malformed URI sequence")));
+
+ return Value::fromString(context, out);
+}
+
+/// encodeURIComponent [15.1.3.4]
+Value GlobalFunctions::method_encodeURIComponent(SimpleCallContext *context)
+{
+ if (context->argumentCount == 0)
+ return Value::undefinedValue();
+
+ QString uriString = context->arguments[0].toString(context)->toQString();
+ bool ok;
+ QString out = encode(uriString, uriUnescaped, &ok);
+ if (!ok)
+ context->throwURIError(Value::fromString(context, QStringLiteral("malformed URI sequence")));
+
+ return Value::fromString(context, out);
+}
+
+Value GlobalFunctions::method_escape(SimpleCallContext *context)
+{
+ if (!context->argumentCount)
+ return Value::fromString(context, QStringLiteral("undefined"));
+
+ QString str = context->argument(0).toString(context)->toQString();
+ return Value::fromString(context, escape(str));
+}
+
+Value GlobalFunctions::method_unescape(SimpleCallContext *context)
+{
+ if (!context->argumentCount)
+ return Value::fromString(context, QStringLiteral("undefined"));
+
+ QString str = context->argument(0).toString(context)->toQString();
+ return Value::fromString(context, unescape(str));
+}
diff --git a/src/qml/qml/v4vm/qv4globalobject.h b/src/qml/qml/v4vm/qv4globalobject.h
new file mode 100644
index 0000000000..f9776f95cb
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4globalobject.h
@@ -0,0 +1,93 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4GLOBALOBJECT_H
+#define QV4GLOBALOBJECT_H
+
+#include "qv4global.h"
+#include "qv4functionobject.h"
+
+QT_END_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Q_V4_EXPORT EvalFunction : FunctionObject
+{
+ EvalFunction(ExecutionContext *scope);
+ EvalFunction(ExecutionContext *scope, Object *qmlActivation);
+
+ static QQmlJS::VM::Function *parseSource(QQmlJS::VM::ExecutionContext *ctx,
+ const QString &fileName,
+ const QString &source,
+ QQmlJS::Codegen::Mode mode, bool strictMode,
+ bool inheritContext);
+
+ Value evalCall(ExecutionContext *context, Value thisObject, Value *args, int argc, bool directCall);
+
+ using Managed::construct;
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+ Object *qmlActivation;
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct GlobalFunctions
+{
+ static Value method_parseInt(SimpleCallContext *context);
+ static Value method_parseFloat(SimpleCallContext *context);
+ static Value method_isNaN(SimpleCallContext *context);
+ static Value method_isFinite(SimpleCallContext *context);
+ static Value method_decodeURI(SimpleCallContext *context);
+ static Value method_decodeURIComponent(SimpleCallContext *context);
+ static Value method_encodeURI(SimpleCallContext *context);
+ static Value method_encodeURIComponent(SimpleCallContext *context);
+ static Value method_escape(SimpleCallContext *context);
+ static Value method_unescape(SimpleCallContext *context);
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_OBJECTS_H
diff --git a/src/qml/qml/v4vm/qv4identifier.h b/src/qml/qml/v4vm/qv4identifier.h
new file mode 100644
index 0000000000..1467e80986
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4identifier.h
@@ -0,0 +1,111 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4IDENTIFIER_H
+#define QV4IDENTIFIER_H
+
+#include <qv4string.h>
+#include <qv4engine.h>
+#include <limits.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Identifiers
+{
+ ExecutionEngine *engine;
+ uint currentIndex;
+ QHash<QString, String *> identifiers;
+public:
+
+ Identifiers(ExecutionEngine *engine) : engine(engine), currentIndex(0) {}
+
+ String *insert(const QString &s)
+ {
+ QHash<QString, String*>::const_iterator it = identifiers.find(s);
+ if (it != identifiers.constEnd())
+ return it.value();
+
+ String *str = engine->newString(s);
+ str->createHashValue();
+ if (str->subtype == String::StringType_ArrayIndex)
+ return str;
+
+ str->identifier = currentIndex;
+ if (currentIndex <= USHRT_MAX) {
+ identifiers.insert(s, str);
+ ++currentIndex;
+ }
+ return str;
+ }
+
+ void toIdentifier(String *s) {
+ if (s->identifier != UINT_MAX)
+ return;
+ if (s->subtype == String::StringType_Unknown)
+ s->createHashValue();
+ if (s->subtype == String::StringType_ArrayIndex)
+ return;
+ QHash<QString, String*>::const_iterator it = identifiers.find(s->toQString());
+ if (it != identifiers.constEnd()) {
+ s->identifier = (*it)->identifier;
+ return;
+ }
+ s->identifier = currentIndex;
+ if (currentIndex <= USHRT_MAX) {
+ identifiers.insert(s->toQString(), s);
+ ++currentIndex;
+ }
+ }
+
+ void mark() {
+ for (QHash<QString, String *>::const_iterator it = identifiers.constBegin(); it != identifiers.constEnd(); ++it)
+ (*it)->mark();
+ }
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4internalclass.cpp b/src/qml/qml/v4vm/qv4internalclass.cpp
new file mode 100644
index 0000000000..4d9bb93d0d
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4internalclass.cpp
@@ -0,0 +1,188 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <qv4internalclass.h>
+#include <qv4string.h>
+#include <qv4engine.h>
+#include <qv4identifier.h>
+
+namespace QQmlJS {
+namespace VM {
+
+
+InternalClass::InternalClass(const QQmlJS::VM::InternalClass &other)
+ : engine(other.engine)
+ , propertyTable(other.propertyTable)
+ , nameMap(other.nameMap)
+ , propertyData(other.propertyData)
+ , transitions()
+ , m_sealed(0)
+ , m_frozen(0)
+ , size(other.size)
+{
+}
+
+// ### Should we build this up from the empty class to avoid duplication?
+InternalClass *InternalClass::changeMember(String *string, PropertyAttributes data, uint *index)
+{
+// qDebug() << "InternalClass::changeMember()" << string->toQString() << hex << (uint)data.m_all;
+ data.resolve();
+ uint idx = find(string);
+ if (index)
+ *index = idx;
+
+ assert(idx != UINT_MAX);
+
+ if (data == propertyData[idx])
+ return this;
+
+ uint tid = string->identifier | (data.flags() << 27);
+
+ QHash<int, InternalClass *>::const_iterator tit = transitions.constFind(tid);
+ if (tit != transitions.constEnd())
+ return tit.value();
+
+ // create a new class and add it to the tree
+ InternalClass *newClass = new InternalClass(*this);
+ newClass->propertyData[idx] = data;
+ return newClass;
+
+}
+
+InternalClass *InternalClass::addMember(String *string, PropertyAttributes data, uint *index)
+{
+// qDebug() << "InternalClass::addMember()" << string->toQString() << size << hex << (uint)data.m_all << data.type();
+ data.resolve();
+ engine->identifierCache->toIdentifier(string);
+ uint id = string->identifier | (data.flags() << 27);
+
+ assert(propertyTable.constFind(id) == propertyTable.constEnd());
+
+ QHash<int, InternalClass *>::const_iterator tit = transitions.constFind(id);
+
+ if (index)
+ *index = size;
+ if (tit != transitions.constEnd())
+ return tit.value();
+
+ // create a new class and add it to the tree
+ InternalClass *newClass = new InternalClass(*this);
+ newClass->propertyTable.insert(string->identifier, size);
+ newClass->nameMap.append(string);
+ newClass->propertyData.append(data);
+ ++newClass->size;
+ transitions.insert(id, newClass);
+ return newClass;
+}
+
+void InternalClass::removeMember(Object *object, uint id)
+{
+ assert (propertyTable.constFind(id) != propertyTable.constEnd());
+ int propIdx = propertyTable.constFind(id).value();
+ assert(propIdx < size);
+
+ int toRemove = - (int)id;
+ QHash<int, InternalClass *>::const_iterator tit = transitions.constFind(toRemove);
+
+ if (tit != transitions.constEnd()) {
+ object->internalClass = tit.value();
+ return;
+ }
+
+ // create a new class and add it to the tree
+ object->internalClass = engine->emptyClass;
+ for (int i = 0; i < nameMap.size(); ++i) {
+ if (i == propIdx)
+ continue;
+ object->internalClass = object->internalClass->addMember(nameMap.at(i), propertyData.at(i));
+ }
+
+ transitions.insert(toRemove, object->internalClass);
+}
+
+uint InternalClass::find(String *string)
+{
+ engine->identifierCache->toIdentifier(string);
+ uint id = string->identifier;
+
+ QHash<uint, uint>::const_iterator it = propertyTable.constFind(id);
+ if (it != propertyTable.constEnd())
+ return it.value();
+
+ return UINT_MAX;
+}
+
+InternalClass *InternalClass::sealed()
+{
+ if (m_sealed)
+ return m_sealed;
+
+ m_sealed = engine->emptyClass;
+ for (int i = 0; i < nameMap.size(); ++i) {
+ PropertyAttributes attrs = propertyData.at(i);
+ attrs.setConfigurable(false);
+ m_sealed = m_sealed->addMember(nameMap.at(i), attrs);
+ }
+
+ m_sealed->m_sealed = m_sealed;
+ return m_sealed;
+}
+
+InternalClass *InternalClass::frozen()
+{
+ if (m_frozen)
+ return m_frozen;
+
+ m_frozen = engine->emptyClass;
+ for (int i = 0; i < nameMap.size(); ++i) {
+ PropertyAttributes attrs = propertyData.at(i);
+ attrs.setWritable(false);
+ attrs.setConfigurable(false);
+ m_frozen = m_frozen->addMember(nameMap.at(i), attrs);
+ }
+
+ m_frozen->m_frozen = m_frozen;
+ return m_frozen;
+}
+
+
+}
+}
diff --git a/src/qml/qml/v4vm/qv4internalclass.h b/src/qml/qml/v4vm/qv4internalclass.h
new file mode 100644
index 0000000000..cc3b03190b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4internalclass.h
@@ -0,0 +1,91 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4INTERNALCLASS_H
+#define QV4INTERNALCLASS_H
+
+#include <QHash>
+#include <QVector>
+#include <qv4global.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct String;
+struct ExecutionEngine;
+struct Object;
+
+struct InternalClass {
+ ExecutionEngine *engine;
+ QHash<uint, uint> propertyTable; // id to valueIndex
+ QVector<String *> nameMap;
+
+ QVector<PropertyAttributes> propertyData;
+
+ QHash<int, InternalClass *> transitions; // id to next class, positive means add, negative delete
+
+ InternalClass *m_sealed;
+ InternalClass *m_frozen;
+
+ uint size;
+
+ InternalClass(ExecutionEngine *engine) : engine(engine), m_sealed(0), m_frozen(0), size(0) {}
+
+ InternalClass *addMember(String *string, PropertyAttributes data, uint *index = 0);
+ InternalClass *changeMember(String *string, PropertyAttributes data, uint *index = 0);
+ void removeMember(Object *object, uint id);
+ uint find(String *s);
+
+ InternalClass *sealed();
+ InternalClass *frozen();
+
+private:
+ InternalClass(const InternalClass &other);
+};
+
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4isel_llvm.cpp b/src/qml/qml/v4vm/qv4isel_llvm.cpp
new file mode 100644
index 0000000000..27614bae62
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_llvm.cpp
@@ -0,0 +1,1375 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunused-parameter"
+#endif // __clang__
+
+#include <llvm/Analysis/Passes.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Assembly/PrintModulePass.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/ExecutionEngine/ExecutionEngine.h>
+#include <llvm/ExecutionEngine/JIT.h>
+#include <llvm/ExecutionEngine/JITMemoryManager.h>
+#include <llvm/Support/FormattedStream.h>
+#include <llvm/Support/Host.h>
+#include <llvm/Support/MemoryBuffer.h>
+#include <llvm/Support/raw_ostream.h>
+#include <llvm/Support/system_error.h>
+#include <llvm/Support/TargetRegistry.h>
+#include <llvm/Support/TargetSelect.h>
+#include <llvm/Target/TargetMachine.h>
+#include <llvm/Transforms/Scalar.h>
+#include <llvm/Transforms/IPO.h>
+#include <llvm/Linker.h>
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif // __clang__
+
+#include <QtCore/QFileInfo>
+#include <QtCore/QLibrary>
+#include <QtCore/QStringList>
+#include <QtCore/QTextStream>
+#include <cstdio>
+#include <iostream>
+
+// These includes have to come last, because WTF/Platform.h defines some macros
+// with very unfriendly names that collide with class fields in LLVM.
+#include "qv4isel_llvm_p.h"
+#include "qv4_llvm_p.h"
+#include "qv4jsir_p.h"
+#include "qv4string.h"
+#include "qv4global.h"
+
+namespace QQmlJS {
+
+Q_V4_EXPORT int compileWithLLVM(IR::Module *module, const QString &fileName, LLVMOutputType outputType, int (*exec)(void *))
+{
+ Q_ASSERT(module);
+ Q_ASSERT(exec || outputType != LLVMOutputJit);
+
+ // TODO: should this be done here?
+ LLVMInitializeX86TargetInfo();
+ LLVMInitializeX86Target();
+ LLVMInitializeX86AsmPrinter();
+ LLVMInitializeX86AsmParser();
+ LLVMInitializeX86Disassembler();
+ LLVMInitializeX86TargetMC();
+
+ //----
+
+ llvm::InitializeNativeTarget();
+ LLVM::InstructionSelection llvmIsel(llvm::getGlobalContext());
+
+ const QString moduleName = QFileInfo(fileName).fileName();
+ llvm::StringRef moduleId(moduleName.toUtf8().constData());
+ llvm::Module *llvmModule = new llvm::Module(moduleId, llvmIsel.getContext());
+
+ if (outputType == LLVMOutputJit) {
+ // The execution engine takes ownership of the model. No need to delete it anymore.
+ std::string errStr;
+ llvm::ExecutionEngine *execEngine = llvm::EngineBuilder(llvmModule)
+// .setUseMCJIT(true)
+ .setErrorStr(&errStr).create();
+ if (!execEngine) {
+ std::cerr << "Could not create LLVM JIT: " << errStr << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ llvm::FunctionPassManager functionPassManager(llvmModule);
+ // Set up the optimizer pipeline. Start with registering info about how the
+ // target lays out data structures.
+ functionPassManager.add(new llvm::DataLayout(*execEngine->getDataLayout()));
+ // Promote allocas to registers.
+ functionPassManager.add(llvm::createPromoteMemoryToRegisterPass());
+ // Provide basic AliasAnalysis support for GVN.
+ functionPassManager.add(llvm::createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ functionPassManager.add(llvm::createInstructionCombiningPass());
+ // Reassociate expressions.
+ functionPassManager.add(llvm::createReassociatePass());
+ // Eliminate Common SubExpressions.
+ functionPassManager.add(llvm::createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ functionPassManager.add(llvm::createCFGSimplificationPass());
+
+ functionPassManager.doInitialization();
+
+ llvmIsel.buildLLVMModule(module, llvmModule, &functionPassManager);
+
+ llvm::Function *entryPoint = llvmModule->getFunction("%entry");
+ Q_ASSERT(entryPoint);
+ void *funcPtr = execEngine->getPointerToFunction(entryPoint);
+ return exec(funcPtr);
+ } else {
+ llvm::FunctionPassManager functionPassManager(llvmModule);
+ // Set up the optimizer pipeline.
+ // Promote allocas to registers.
+ functionPassManager.add(llvm::createPromoteMemoryToRegisterPass());
+ // Provide basic AliasAnalysis support for GVN.
+ functionPassManager.add(llvm::createBasicAliasAnalysisPass());
+ // Do simple "peephole" optimizations and bit-twiddling optzns.
+ functionPassManager.add(llvm::createInstructionCombiningPass());
+ // Reassociate expressions.
+ functionPassManager.add(llvm::createReassociatePass());
+ // Eliminate Common SubExpressions.
+ functionPassManager.add(llvm::createGVNPass());
+ // Simplify the control flow graph (deleting unreachable blocks, etc).
+ functionPassManager.add(llvm::createCFGSimplificationPass());
+
+ functionPassManager.doInitialization();
+
+ llvmIsel.buildLLVMModule(module, llvmModule, &functionPassManager);
+
+ // TODO: if output type is .ll, print the module to file
+
+ const std::string triple = llvm::sys::getDefaultTargetTriple();
+
+ std::string err;
+ const llvm::Target *target = llvm::TargetRegistry::lookupTarget(triple, err);
+ if (! err.empty()) {
+ std::cerr << err << ", triple: " << triple << std::endl;
+ assert(!"cannot create target for the host triple");
+ }
+
+ std::string cpu;
+ std::string features;
+ llvm::TargetOptions options;
+ llvm::TargetMachine *targetMachine = target->createTargetMachine(triple, cpu, features, options, llvm::Reloc::PIC_);
+ assert(targetMachine);
+
+ llvm::TargetMachine::CodeGenFileType ft;
+ QString ofName;
+
+ if (outputType == LLVMOutputObject) {
+ ft = llvm::TargetMachine::CGFT_ObjectFile;
+ ofName = fileName + QLatin1String(".o");
+ } else if (outputType == LLVMOutputAssembler) {
+ ft = llvm::TargetMachine::CGFT_AssemblyFile;
+ ofName = fileName + QLatin1String(".s");
+ } else {
+ // ft is not used.
+ ofName = fileName + QLatin1String(".ll");
+ }
+
+ llvm::raw_fd_ostream dest(ofName.toUtf8().constData(), err, llvm::raw_fd_ostream::F_Binary);
+ llvm::formatted_raw_ostream destf(dest);
+ if (!err.empty()) {
+ std::cerr << err << std::endl;
+ delete llvmModule;
+ }
+
+ llvm::PassManager globalPassManager;
+ globalPassManager.add(llvm::createScalarReplAggregatesPass());
+ globalPassManager.add(llvm::createInstructionCombiningPass());
+ globalPassManager.add(llvm::createGlobalOptimizerPass());
+ globalPassManager.add(llvm::createFunctionInliningPass(25));
+// globalPassManager.add(llvm::createFunctionInliningPass(125));
+
+ if (outputType == LLVMOutputObject || outputType == LLVMOutputAssembler) {
+ if (targetMachine->addPassesToEmitFile(globalPassManager, destf, ft)) {
+ std::cerr << err << " (probably no DataLayout in TargetMachine)" << std::endl;
+ } else {
+ globalPassManager.run(*llvmModule);
+
+ destf.flush();
+ dest.flush();
+ }
+ } else { // .ll
+ globalPassManager.run(*llvmModule);
+ llvmModule->print(destf, 0);
+
+ destf.flush();
+ dest.flush();
+ }
+
+ delete llvmModule;
+ return EXIT_SUCCESS;
+ }
+}
+
+} // QQmlJS
+
+using namespace QQmlJS;
+using namespace QQmlJS::LLVM;
+
+namespace {
+QTextStream qerr(stderr, QIODevice::WriteOnly);
+}
+
+InstructionSelection::InstructionSelection(llvm::LLVMContext &context)
+ : llvm::IRBuilder<>(context)
+ , _llvmModule(0)
+ , _llvmFunction(0)
+ , _llvmValue(0)
+ , _numberTy(0)
+ , _valueTy(0)
+ , _contextPtrTy(0)
+ , _stringPtrTy(0)
+ , _functionTy(0)
+ , _allocaInsertPoint(0)
+ , _function(0)
+ , _block(0)
+ , _fpm(0)
+{
+}
+
+void InstructionSelection::buildLLVMModule(IR::Module *module, llvm::Module *llvmModule, llvm::FunctionPassManager *fpm)
+{
+ qSwap(_llvmModule, llvmModule);
+ qSwap(_fpm, fpm);
+
+ _numberTy = getDoubleTy();
+
+ std::string err;
+
+ llvm::OwningPtr<llvm::MemoryBuffer> buffer;
+ qDebug()<<"llvm runtime:"<<LLVM_RUNTIME;
+ llvm::error_code ec = llvm::MemoryBuffer::getFile(llvm::StringRef(LLVM_RUNTIME), buffer);
+ if (ec) {
+ qWarning() << ec.message().c_str();
+ assert(!"cannot load QML/JS LLVM runtime, you can generate the runtime with the command `make llvm_runtime'");
+ }
+
+ llvm::Module *llvmRuntime = llvm::getLazyBitcodeModule(buffer.get(), getContext(), &err);
+ if (! err.empty()) {
+ qWarning() << err.c_str();
+ assert(!"cannot load QML/JS LLVM runtime");
+ }
+
+ err.clear();
+ llvm::Linker::LinkModules(_llvmModule, llvmRuntime, llvm::Linker::DestroySource, &err);
+ if (! err.empty()) {
+ qWarning() << err.c_str();
+ assert(!"cannot link the QML/JS LLVM runtime");
+ }
+
+ _valueTy = _llvmModule->getTypeByName("struct.QQmlJS::VM::Value");
+ _contextPtrTy = _llvmModule->getTypeByName("struct.QQmlJS::VM::ExecutionContext")->getPointerTo();
+ _stringPtrTy = _llvmModule->getTypeByName("struct.QQmlJS::VM::String")->getPointerTo();
+
+ {
+ llvm::Type *args[] = { _contextPtrTy };
+ _functionTy = llvm::FunctionType::get(getVoidTy(), llvm::makeArrayRef(args), false);
+ }
+
+
+ foreach (IR::Function *function, module->functions)
+ (void) compileLLVMFunction(function);
+ qSwap(_fpm, fpm);
+ qSwap(_llvmModule, llvmModule);
+}
+
+void InstructionSelection::callBuiltinInvalid(IR::Name *func, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinTypeofMember(IR::Temp *base, const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinTypeofSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinTypeofName(const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinTypeofValue(IR::Temp *value, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDeleteMember(IR::Temp *base, const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDeleteSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDeleteName(const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDeleteValue(IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostDecrementMember(IR::Temp *base, const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostDecrementSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostDecrementName(const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostDecrementValue(IR::Temp *value, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostIncrementMember(IR::Temp *base, const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostIncrementSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostIncrementName(const QString &name, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPostIncrementValue(IR::Temp *value, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinThrow(IR::Temp *arg)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinCreateExceptionHandler(IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinFinishTry()
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinForeachIteratorObject(IR::Temp *arg, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinForeachNextPropertyname(IR::Temp *arg, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPushWithScope(IR::Temp *arg)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinPopScope()
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name)
+{
+ llvm::ConstantInt *isDeletable = getInt1(deletable != 0);
+ llvm::Value *varName = getIdentifier(name);
+ CreateCall3(getRuntimeFunction("__qmljs_builtin_declare_var"),
+ _llvmFunction->arg_begin(), isDeletable, varName);
+}
+
+void InstructionSelection::callBuiltinDefineGetterSetter(IR::Temp *object, const QString &name, IR::Temp *getter, IR::Temp *setter)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDefineProperty(IR::Temp *object, const QString &name, IR::Temp *value)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callBuiltinDefineArray(IR::Temp *result, IR::ExprList *args)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callValue(IR::Temp *value, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callProperty(IR::Temp *base, const QString &name, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::callSubscript(IR::Temp *base, IR::Temp *index, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::constructActivationProperty(IR::Name *func,
+ IR::ExprList *args,
+ IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::constructProperty(IR::Temp *base, const QString &name, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::constructValue(IR::Temp *value, IR::ExprList *args, IR::Temp *result)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::loadThisObject(IR::Temp *temp)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::loadConst(IR::Const *con, IR::Temp *temp)
+{
+ llvm::Value *target = getLLVMTemp(temp);
+ llvm::Value *source = CreateLoad(createValue(con));
+ CreateStore(source, target);
+}
+
+void InstructionSelection::loadString(const QString &str, IR::Temp *targetTemp)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::loadRegexp(IR::RegExp *sourceRegexp, IR::Temp *targetTemp)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::getActivationProperty(const QString &name, IR::Temp *temp)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::setActivationProperty(IR::Temp *source, const QString &targetName)
+{
+ llvm::Value *name = getIdentifier(targetName);
+ llvm::Value *src = toValuePtr(source);
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_set_activation_property"),
+ _llvmFunction->arg_begin(), name, src);
+}
+
+void InstructionSelection::initClosure(IR::Closure *closure, IR::Temp *target)
+{
+ IR::Function *f = closure->value;
+ QString name;
+ if (f->name)
+ name = *f->name;
+
+ llvm::Value *args[] = {
+ _llvmFunction->arg_begin(),
+ getLLVMTemp(target),
+ getIdentifier(name),
+ getInt1(f->hasDirectEval),
+ getInt1(f->usesArgumentsObject),
+ getInt1(f->isStrict),
+ getInt1(!f->nestedFunctions.isEmpty()),
+ genStringList(f->formals, "formals", "formal"),
+ getInt32(f->formals.size()),
+ genStringList(f->locals, "locals", "local"),
+ getInt32(f->locals.size())
+ };
+ llvm::Function *callee = _llvmModule->getFunction("__qmljs_llvm_init_closure");
+ CreateCall(callee, args);
+}
+
+void InstructionSelection::getProperty(IR::Temp *sourceBase, const QString &sourceName, IR::Temp *target)
+{
+ llvm::Value *base = getLLVMTempReference(sourceBase);
+ llvm::Value *name = getIdentifier(sourceName);
+ llvm::Value *t = getLLVMTemp(target);
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_get_property"),
+ _llvmFunction->arg_begin(), t, base, name);
+}
+
+void InstructionSelection::setProperty(IR::Temp *source, IR::Temp *targetBase, const QString &targetName)
+{
+ llvm::Value *base = getLLVMTempReference(targetBase);
+ llvm::Value *name = getIdentifier(targetName);
+ llvm::Value *src = toValuePtr(source);
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_set_property"),
+ _llvmFunction->arg_begin(), base, name, src);
+}
+
+void InstructionSelection::getElement(IR::Temp *sourceBase, IR::Temp *sourceIndex, IR::Temp *target)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+
+ llvm::Value *base = getLLVMTempReference(sourceBase);
+ llvm::Value *index = getLLVMTempReference(sourceIndex);
+ llvm::Value *t = getLLVMTemp(target);
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_get_element"),
+ _llvmFunction->arg_begin(), t, base, index);
+}
+
+void InstructionSelection::setElement(IR::Temp *source, IR::Temp *targetBase, IR::Temp *targetIndex)
+{
+ llvm::Value *base = getLLVMTempReference(targetBase);
+ llvm::Value *index = getLLVMTempReference(targetIndex);
+ llvm::Value *src = toValuePtr(source);
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_set_element"),
+ _llvmFunction->arg_begin(), base, index, src);
+}
+
+void InstructionSelection::copyValue(IR::Temp *sourceTemp, IR::Temp *targetTemp)
+{
+ llvm::Value *t = getLLVMTemp(targetTemp);
+ llvm::Value *s = getLLVMTemp(sourceTemp);
+ CreateStore(s, t);
+}
+
+void InstructionSelection::unop(IR::AluOp oper, IR::Temp *sourceTemp, IR::Temp *targetTemp)
+{
+ const char *opName = 0;
+ switch (oper) {
+ case IR::OpNot: opName = "__qmljs_not"; break;
+ case IR::OpUMinus: opName = "__qmljs_uminus"; break;
+ case IR::OpUPlus: opName = "__qmljs_uplus"; break;
+ case IR::OpCompl: opName = "__qmljs_compl"; break;
+ case IR::OpIncrement: opName = "__qmljs_increment"; break;
+ case IR::OpDecrement: opName = "__qmljs_decrement"; break;
+ default: assert(!"unreachable"); break;
+ }
+
+ if (opName) {
+ llvm::Value *t = getLLVMTemp(targetTemp);
+ llvm::Value *s = getLLVMTemp(sourceTemp);
+ CreateCall3(getRuntimeFunction(opName),
+ _llvmFunction->arg_begin(), t, s);
+ }
+}
+
+void InstructionSelection::binop(IR::AluOp oper, IR::Temp *leftSource, IR::Temp *rightSource, IR::Temp *target)
+{
+ const char *opName = 0;
+ switch (oper) {
+ case IR::OpBitAnd: opName = "__qmljs_llvm_bit_and"; break;
+ case IR::OpBitOr: opName = "__qmljs_llvm_bit_or"; break;
+ case IR::OpBitXor: opName = "__qmljs_llvm_bit_xor"; break;
+ case IR::OpAdd: opName = "__qmljs_llvm_add"; break;
+ case IR::OpSub: opName = "__qmljs_llvm_sub"; break;
+ case IR::OpMul: opName = "__qmljs_llvm_mul"; break;
+ case IR::OpDiv: opName = "__qmljs_llvm_div"; break;
+ case IR::OpMod: opName = "__qmljs_llvm_mod"; break;
+ case IR::OpLShift: opName = "__qmljs_llvm_shl"; break;
+ case IR::OpRShift: opName = "__qmljs_llvm_shr"; break;
+ case IR::OpURShift: opName = "__qmljs_llvm_ushr"; break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (opName) {
+ llvm::Value *t = getLLVMTemp(target);
+ llvm::Value *s1 = toValuePtr(leftSource);
+ llvm::Value *s2 = toValuePtr(rightSource);
+ CreateCall4(getRuntimeFunction(opName),
+ _llvmFunction->arg_begin(), t, s1, s2);
+ return;
+ }
+}
+
+void InstructionSelection::inplaceNameOp(IR::AluOp oper, IR::Temp *rightSource, const QString &targetName)
+{
+ const char *opName = 0;
+ switch (oper) {
+ case IR::OpBitAnd: opName = "__qmljs_llvm_inplace_bit_and_name"; break;
+ case IR::OpBitOr: opName = "__qmljs_llvm_inplace_bit_or_name"; break;
+ case IR::OpBitXor: opName = "__qmljs_llvm_inplace_bit_xor_name"; break;
+ case IR::OpAdd: opName = "__qmljs_llvm_inplace_add_name"; break;
+ case IR::OpSub: opName = "__qmljs_llvm_inplace_sub_name"; break;
+ case IR::OpMul: opName = "__qmljs_llvm_inplace_mul_name"; break;
+ case IR::OpDiv: opName = "__qmljs_llvm_inplace_div_name"; break;
+ case IR::OpMod: opName = "__qmljs_llvm_inplace_mod_name"; break;
+ case IR::OpLShift: opName = "__qmljs_llvm_inplace_shl_name"; break;
+ case IR::OpRShift: opName = "__qmljs_llvm_inplace_shr_name"; break;
+ case IR::OpURShift: opName = "__qmljs_llvm_inplace_ushr_name"; break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (opName) {
+ llvm::Value *dst = getIdentifier(targetName);
+ llvm::Value *src = toValuePtr(rightSource);
+ CreateCall3(getRuntimeFunction(opName),
+ _llvmFunction->arg_begin(), dst, src);
+ return;
+ }
+}
+
+void InstructionSelection::inplaceElementOp(IR::AluOp oper, IR::Temp *source, IR::Temp *targetBaseTemp, IR::Temp *targetIndexTemp)
+{
+ const char *opName = 0;
+ switch (oper) {
+ case IR::OpBitAnd: opName = "__qmljs_llvm_inplace_bit_and_element"; break;
+ case IR::OpBitOr: opName = "__qmljs_llvm_inplace_bit_or_element"; break;
+ case IR::OpBitXor: opName = "__qmljs_llvm_inplace_bit_xor_element"; break;
+ case IR::OpAdd: opName = "__qmljs_llvm_inplace_add_element"; break;
+ case IR::OpSub: opName = "__qmljs_llvm_inplace_sub_element"; break;
+ case IR::OpMul: opName = "__qmljs_llvm_inplace_mul_element"; break;
+ case IR::OpDiv: opName = "__qmljs_llvm_inplace_div_element"; break;
+ case IR::OpMod: opName = "__qmljs_llvm_inplace_mod_element"; break;
+ case IR::OpLShift: opName = "__qmljs_llvm_inplace_shl_element"; break;
+ case IR::OpRShift: opName = "__qmljs_llvm_inplace_shr_element"; break;
+ case IR::OpURShift: opName = "__qmljs_llvm_inplace_ushr_element"; break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (opName) {
+ llvm::Value *base = getLLVMTemp(targetBaseTemp);
+ llvm::Value *index = getLLVMTemp(targetIndexTemp);
+ llvm::Value *value = toValuePtr(source);
+ CreateCall4(getRuntimeFunction(opName),
+ _llvmFunction->arg_begin(), base, index, value);
+ }
+}
+
+void InstructionSelection::inplaceMemberOp(IR::AluOp oper, IR::Temp *source, IR::Temp *targetBase, const QString &targetName)
+{
+ const char *opName = 0;
+ switch (oper) {
+ case IR::OpBitAnd: opName = "__qmljs_llvm_inplace_bit_and_member"; break;
+ case IR::OpBitOr: opName = "__qmljs_llvm_inplace_bit_or_member"; break;
+ case IR::OpBitXor: opName = "__qmljs_llvm_inplace_bit_xor_member"; break;
+ case IR::OpAdd: opName = "__qmljs_llvm_inplace_add_member"; break;
+ case IR::OpSub: opName = "__qmljs_llvm_inplace_sub_member"; break;
+ case IR::OpMul: opName = "__qmljs_llvm_inplace_mul_member"; break;
+ case IR::OpDiv: opName = "__qmljs_llvm_inplace_div_member"; break;
+ case IR::OpMod: opName = "__qmljs_llvm_inplace_mod_member"; break;
+ case IR::OpLShift: opName = "__qmljs_llvm_inplace_shl_member"; break;
+ case IR::OpRShift: opName = "__qmljs_llvm_inplace_shr_member"; break;
+ case IR::OpURShift: opName = "__qmljs_llvm_inplace_ushr_member"; break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (opName) {
+ llvm::Value *base = getLLVMTemp(targetBase);
+ llvm::Value *member = getIdentifier(targetName);
+ llvm::Value *value = toValuePtr(source);
+ CreateCall4(getRuntimeFunction(opName),
+ _llvmFunction->arg_begin(), value, base, member);
+ }
+}
+
+llvm::Function *InstructionSelection::getLLVMFunction(IR::Function *function)
+{
+ llvm::Function *&f = _functionMap[function];
+ if (! f) {
+ QString name = QStringLiteral("__qmljs_native_");
+ if (function->name) {
+ if (*function->name == QStringLiteral("%entry"))
+ name = *function->name;
+ else
+ name += *function->name;
+ }
+ f = llvm::Function::Create(_functionTy, llvm::Function::ExternalLinkage, // ### make it internal
+ qPrintable(name), _llvmModule);
+ }
+ return f;
+}
+
+llvm::Function *InstructionSelection::compileLLVMFunction(IR::Function *function)
+{
+ llvm::Function *llvmFunction = getLLVMFunction(function);
+
+ QHash<IR::BasicBlock *, llvm::BasicBlock *> blockMap;
+ QVector<llvm::Value *> tempMap;
+
+ qSwap(_llvmFunction, llvmFunction);
+ qSwap(_function, function);
+ qSwap(_tempMap, tempMap);
+ qSwap(_blockMap, blockMap);
+
+ // create the LLVM blocks
+ foreach (IR::BasicBlock *block, _function->basicBlocks)
+ (void) getLLVMBasicBlock(block);
+
+ // entry block
+ SetInsertPoint(getLLVMBasicBlock(_function->basicBlocks.first()));
+
+ llvm::Instruction *allocaInsertPoint = new llvm::BitCastInst(llvm::UndefValue::get(getInt32Ty()),
+ getInt32Ty(), "", GetInsertBlock());
+ qSwap(_allocaInsertPoint, allocaInsertPoint);
+
+ for (int i = 0; i < _function->tempCount; ++i) {
+ llvm::AllocaInst *t = newLLVMTemp(_valueTy);
+ _tempMap.append(t);
+ }
+
+ foreach (llvm::Value *t, _tempMap) {
+ CreateStore(llvm::Constant::getNullValue(_valueTy), t);
+ }
+
+// CreateCall(getRuntimeFunction("__qmljs_llvm_init_this_object"),
+// _llvmFunction->arg_begin());
+
+ foreach (IR::BasicBlock *block, _function->basicBlocks) {
+ qSwap(_block, block);
+ SetInsertPoint(getLLVMBasicBlock(_block));
+ foreach (IR::Stmt *s, _block->statements)
+ s->accept(this);
+ qSwap(_block, block);
+ }
+
+ qSwap(_allocaInsertPoint, allocaInsertPoint);
+
+ allocaInsertPoint->eraseFromParent();
+
+ qSwap(_blockMap, blockMap);
+ qSwap(_tempMap, tempMap);
+ qSwap(_function, function);
+ qSwap(_llvmFunction, llvmFunction);
+
+ // Validate the generated code, checking for consistency.
+ llvm::verifyFunction(*llvmFunction);
+ // Optimize the function.
+ if (_fpm)
+ _fpm->run(*llvmFunction);
+
+ return llvmFunction;
+}
+
+llvm::BasicBlock *InstructionSelection::getLLVMBasicBlock(IR::BasicBlock *block)
+{
+ llvm::BasicBlock *&llvmBlock = _blockMap[block];
+ if (! llvmBlock)
+ llvmBlock = llvm::BasicBlock::Create(getContext(), llvm::Twine(),
+ _llvmFunction);
+ return llvmBlock;
+}
+
+llvm::Value *InstructionSelection::getLLVMTempReference(IR::Expr *expr)
+{
+ if (IR::Temp *t = expr->asTemp())
+ return getLLVMTemp(t);
+
+ assert(!"TODO!");
+ llvm::Value *addr = newLLVMTemp(_valueTy);
+// CreateStore(getLLVMValue(expr), addr);
+ return addr;
+}
+
+llvm::Value *InstructionSelection::getLLVMCondition(IR::Expr *expr)
+{
+ llvm::Value *value = 0;
+ if (IR::Temp *t = expr->asTemp()) {
+ value = getLLVMTemp(t);
+ } else {
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+
+#if 0
+ value = getLLVMValue(expr);
+ if (! value) {
+ Q_UNIMPLEMENTED();
+ return getInt1(false);
+ }
+
+ llvm::Value *tmp = newLLVMTemp(_valueTy);
+ CreateStore(value, tmp);
+ value = tmp;
+#endif
+ }
+
+ return CreateCall2(getRuntimeFunction("__qmljs_llvm_to_boolean"),
+ _llvmFunction->arg_begin(),
+ value);
+}
+
+llvm::Value *InstructionSelection::getLLVMTemp(IR::Temp *temp)
+{
+ if (temp->index < 0) {
+ const int index = -temp->index -1;
+ return CreateCall2(getRuntimeFunction("__qmljs_llvm_get_argument"),
+ _llvmFunction->arg_begin(), getInt32(index));
+ }
+
+ return _tempMap[temp->index];
+}
+
+llvm::Value *InstructionSelection::getStringPtr(const QString &s)
+{
+ llvm::Value *&value = _stringMap[s];
+ if (! value) {
+ const QByteArray bytes = s.toUtf8();
+ value = CreateGlobalStringPtr(llvm::StringRef(bytes.constData(), bytes.size()));
+ _stringMap[s] = value;
+ }
+ return value;
+}
+
+llvm::Value *InstructionSelection::getIdentifier(const QString &s)
+{
+ llvm::Value *str = getStringPtr(s);
+ llvm::Value *id = CreateCall2(getRuntimeFunction("__qmljs_identifier_from_utf8"),
+ _llvmFunction->arg_begin(), str);
+ return id;
+}
+
+void InstructionSelection::visitJump(IR::Jump *s)
+{
+ CreateBr(getLLVMBasicBlock(s->target));
+}
+
+void InstructionSelection::visitCJump(IR::CJump *s)
+{
+ CreateCondBr(getLLVMCondition(s->cond),
+ getLLVMBasicBlock(s->iftrue),
+ getLLVMBasicBlock(s->iffalse));
+}
+
+void InstructionSelection::visitRet(IR::Ret *s)
+{
+ IR::Temp *t = s->expr->asTemp();
+ assert(t != 0);
+ llvm::Value *result = getLLVMTemp(t);
+ llvm::Value *ctx = _llvmFunction->arg_begin();
+ CreateCall2(getRuntimeFunction("__qmljs_llvm_return"), ctx, result);
+ CreateRetVoid();
+}
+
+void InstructionSelection::visitTry(IR::Try *)
+{
+ // TODO
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
+
+#if 0
+void InstructionSelection::visitString(IR::String *e)
+{
+ llvm::Value *tmp = newLLVMTemp(_valueTy);
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_init_string"),
+ _llvmFunction->arg_begin(), tmp,
+ getStringPtr(*e->value));
+ _llvmValue = CreateLoad(tmp);
+}
+#endif
+
+llvm::AllocaInst *InstructionSelection::newLLVMTemp(llvm::Type *type, llvm::Value *size)
+{
+ llvm::AllocaInst *addr = new llvm::AllocaInst(type, size, llvm::Twine(), _allocaInsertPoint);
+ return addr;
+}
+
+llvm::Value * InstructionSelection::genArguments(IR::ExprList *exprs, int &argc)
+{
+ llvm::Value *args = 0;
+
+ argc = 0;
+ for (IR::ExprList *it = exprs; it; it = it->next)
+ ++argc;
+
+ if (argc)
+ args = newLLVMTemp(_valueTy, getInt32(argc));
+ else
+ args = llvm::Constant::getNullValue(_valueTy->getPointerTo());
+
+ int i = 0;
+ for (IR::ExprList *it = exprs; it; it = it->next) {
+// llvm::Value *arg = getLLVMValue(it->expr);
+// CreateStore(arg, CreateConstGEP1_32(args, i++));
+ }
+
+ return args;
+}
+
+void InstructionSelection::genCallMember(IR::Call *e, llvm::Value *result)
+{
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ IR::Member *m = e->base->asMember();
+ llvm::Value *thisObject = getLLVMTemp(m->base->asTemp());
+ llvm::Value *name = getIdentifier(*m->name);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *actuals[] = {
+ _llvmFunction->arg_begin(),
+ result,
+ thisObject,
+ name,
+ args,
+ getInt32(argc)
+ };
+
+ CreateCall(getRuntimeFunction("__qmljs_llvm_call_property"), llvm::ArrayRef<llvm::Value *>(actuals));
+ _llvmValue = CreateLoad(result);
+}
+
+void InstructionSelection::genConstructMember(IR::New *e, llvm::Value *result)
+{
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ IR::Member *m = e->base->asMember();
+ llvm::Value *thisObject = getLLVMTemp(m->base->asTemp());
+ llvm::Value *name = getIdentifier(*m->name);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *actuals[] = {
+ _llvmFunction->arg_begin(),
+ result,
+ thisObject,
+ name,
+ args,
+ getInt32(argc)
+ };
+
+ CreateCall(getRuntimeFunction("__qmljs_llvm_construct_property"), llvm::ArrayRef<llvm::Value *>(actuals));
+ _llvmValue = CreateLoad(result);
+}
+
+void InstructionSelection::genCallTemp(IR::Call *e, llvm::Value *result)
+{
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ llvm::Value *func = getLLVMTempReference(e->base);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *thisObject = llvm::Constant::getNullValue(_valueTy->getPointerTo());
+
+ llvm::Value *actuals[] = {
+ _llvmFunction->arg_begin(),
+ result,
+ thisObject,
+ func,
+ args,
+ getInt32(argc)
+ };
+
+ CreateCall(getRuntimeFunction("__qmljs_llvm_call_value"), actuals);
+
+ _llvmValue = CreateLoad(result);
+}
+
+void InstructionSelection::genConstructTemp(IR::New *e, llvm::Value *result)
+{
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ llvm::Value *func = getLLVMTempReference(e->base);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *actuals[] = {
+ _llvmFunction->arg_begin(),
+ result,
+ func,
+ args,
+ getInt32(argc)
+ };
+
+ CreateCall(getRuntimeFunction("__qmljs_llvm_construct_value"), actuals);
+
+ _llvmValue = CreateLoad(result);
+}
+
+void InstructionSelection::genCallName(IR::Call *e, llvm::Value *result)
+{
+ IR::Name *base = e->base->asName();
+
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ if (! base->id) {
+ switch (base->builtin) {
+ case IR::Name::builtin_invalid:
+ break;
+
+ case IR::Name::builtin_typeof:
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_typeof"),
+ _llvmFunction->arg_begin(), result, getLLVMTempReference(e->args->expr));
+ _llvmValue = CreateLoad(result);
+ return;
+
+ case IR::Name::builtin_throw:
+ CreateCall2(getRuntimeFunction("__qmljs_llvm_throw"),
+ _llvmFunction->arg_begin(), getLLVMTempReference(e->args->expr));
+ _llvmValue = llvm::UndefValue::get(_valueTy);
+ return;
+
+ case IR::Name::builtin_finish_try:
+ // ### FIXME.
+ return;
+
+ case IR::Name::builtin_foreach_iterator_object:
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_foreach_iterator_object"),
+ _llvmFunction->arg_begin(), result, getLLVMTempReference(e->args->expr));
+ _llvmValue = CreateLoad(result);
+ return;
+
+ case IR::Name::builtin_foreach_next_property_name:
+ CreateCall2(getRuntimeFunction("__qmljs_llvm_foreach_next_property_name"),
+ result, getLLVMTempReference(e->args->expr));
+ _llvmValue = CreateLoad(result);
+ return;
+
+ case IR::Name::builtin_delete: {
+ if (IR::Subscript *subscript = e->args->expr->asSubscript()) {
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_delete_subscript"),
+ _llvmFunction->arg_begin(),
+ result,
+ getLLVMTempReference(subscript->base),
+ getLLVMTempReference(subscript->index));
+ _llvmValue = CreateLoad(result);
+ return;
+ } else if (IR::Member *member = e->args->expr->asMember()) {
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_delete_member"),
+ _llvmFunction->arg_begin(),
+ result,
+ getLLVMTempReference(member->base),
+ getIdentifier(*member->name));
+ _llvmValue = CreateLoad(result);
+ return;
+ } else if (IR::Name *name = e->args->expr->asName()) {
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_delete_property"),
+ _llvmFunction->arg_begin(),
+ result,
+ getIdentifier(*name->id));
+ _llvmValue = CreateLoad(result);
+ return;
+ } else {
+ CreateCall3(getRuntimeFunction("__qmljs_llvm_delete_value"),
+ _llvmFunction->arg_begin(),
+ result,
+ getLLVMTempReference(e->args->expr));
+ _llvmValue = CreateLoad(result);
+ return;
+ }
+ } break;
+
+ default:
+ Q_UNREACHABLE();
+ }
+ } else {
+ llvm::Value *name = getIdentifier(*base->id);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ CreateCall5(getRuntimeFunction("__qmljs_llvm_call_activation_property"),
+ _llvmFunction->arg_begin(), result, name, args, getInt32(argc));
+
+ _llvmValue = CreateLoad(result);
+ }
+}
+
+void InstructionSelection::genConstructName(IR::New *e, llvm::Value *result)
+{
+ IR::Name *base = e->base->asName();
+
+ if (! result)
+ result = newLLVMTemp(_valueTy);
+
+ if (! base->id) {
+ Q_UNREACHABLE();
+ } else {
+ llvm::Value *name = getIdentifier(*base->id);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ CreateCall5(getRuntimeFunction("__qmljs_llvm_construct_activation_property"),
+ _llvmFunction->arg_begin(), result, name, args, getInt32(argc));
+
+ _llvmValue = CreateLoad(result);
+ }
+}
+
+#if 0
+void InstructionSelection::visitCall(IR::Call *e)
+{
+ if (e->base->asMember()) {
+ genCallMember(e);
+ } else if (e->base->asTemp()) {
+ genCallTemp(e);
+ } else if (e->base->asName()) {
+ genCallName(e);
+ } else if (IR::Temp *t = e->base->asTemp()) {
+ llvm::Value *base = getLLVMTemp(t);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *result = newLLVMTemp(_valueTy);
+ CreateStore(llvm::Constant::getNullValue(_valueTy), result);
+ CreateCall5(getRuntimeFunction("__qmljs_llvm_call_value"),
+ _llvmFunction->arg_begin(), result, base, args, getInt32(argc));
+ _llvmValue = CreateLoad(result);
+ } else {
+ Q_UNIMPLEMENTED();
+ }
+}
+#endif
+
+#if 0
+void InstructionSelection::visitNew(IR::New *e)
+{
+ if (e->base->asMember()) {
+ genConstructMember(e);
+ } else if (e->base->asTemp()) {
+ genConstructTemp(e);
+ } else if (e->base->asName()) {
+ genConstructName(e);
+ } else if (IR::Temp *t = e->base->asTemp()) {
+ llvm::Value *base = getLLVMTemp(t);
+
+ int argc = 0;
+ llvm::Value *args = genArguments(e->args, argc);
+
+ llvm::Value *result = newLLVMTemp(_valueTy);
+ CreateStore(llvm::Constant::getNullValue(_valueTy), result);
+ CreateCall5(getRuntimeFunction("__qmljs_llvm_construct_value"),
+ _llvmFunction->arg_begin(), result, base, args, getInt32(argc));
+ _llvmValue = CreateLoad(result);
+ } else {
+ Q_UNIMPLEMENTED();
+ }
+}
+#endif
+
+#if 0
+void InstructionSelection::visitSubscript(IR::Subscript *e)
+{
+ llvm::Value *result = newLLVMTemp(_valueTy);
+ llvm::Value *base = getLLVMTempReference(e->base);
+ llvm::Value *index = getLLVMTempReference(e->index);
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_get_element"),
+ _llvmFunction->arg_begin(), result, base, index);
+ _llvmValue = CreateLoad(result);
+}
+#endif
+
+#if 0
+void InstructionSelection::visitMember(IR::Member *e)
+{
+ llvm::Value *result = newLLVMTemp(_valueTy);
+ llvm::Value *base = getLLVMTempReference(e->base);
+ llvm::Value *name = getIdentifier(*e->name);
+
+ CreateCall4(getRuntimeFunction("__qmljs_llvm_get_property"),
+ _llvmFunction->arg_begin(), result, base, name);
+ _llvmValue = CreateLoad(result);
+}
+#endif
+
+llvm::Function *InstructionSelection::getRuntimeFunction(llvm::StringRef str)
+{
+ llvm::Function *func = _llvmModule->getFunction(str);
+ if (!func) {
+ std::cerr << "Cannot find runtime function \""
+ << str.str() << "\"!" << std::endl;
+ assert(func);
+ }
+ return func;
+}
+
+llvm::Value *InstructionSelection::createValue(IR::Const *e)
+{
+ llvm::Value *tmp = newLLVMTemp(_valueTy);
+
+ switch (e->type) {
+ case IR::UndefinedType:
+ CreateCall(getRuntimeFunction("__qmljs_llvm_init_undefined"), tmp);
+ break;
+
+ case IR::NullType:
+ CreateCall(getRuntimeFunction("__qmljs_llvm_init_null"), tmp);
+ break;
+
+ case IR::BoolType:
+ CreateCall2(getRuntimeFunction("__qmljs_llvm_init_boolean"), tmp,
+ getInt1(e->value ? 1 : 0));
+ break;
+
+ case IR::NumberType:
+ CreateCall2(getRuntimeFunction("__qmljs_llvm_init_number"), tmp,
+ llvm::ConstantFP::get(_numberTy, e->value));
+ break;
+
+ default:
+ Q_UNREACHABLE();
+ }
+
+ return tmp;
+}
+
+llvm::Value *InstructionSelection::toValuePtr(IR::Expr *e)
+{
+ if (IR::Temp *t = e->asTemp()) {
+ return getLLVMTemp(t);
+ } else if (IR::Const *c = e->asConst()) {
+ return createValue(c);
+ } else {
+ Q_UNREACHABLE();
+ }
+}
+
+llvm::Value *InstructionSelection::genStringList(const QList<const QString *> &strings, const char *arrayName, const char *elementName)
+{
+ llvm::Value *array = CreateAlloca(_stringPtrTy, getInt32(strings.size()),
+ arrayName);
+ for (int i = 0, ei = strings.size(); i < ei; ++i) {
+ llvm::Value *el;
+ if (const QString *string = strings.at(i))
+ el = getIdentifier(*string);
+ else
+ el = llvm::Constant::getNullValue(_stringPtrTy);
+ llvm::Value *ptr = CreateGEP(array, getInt32(i), elementName);
+ CreateStore(el, ptr);
+ }
+
+ return array;
+}
diff --git a/src/qml/qml/v4vm/qv4isel_llvm_p.h b/src/qml/qml/v4vm/qv4isel_llvm_p.h
new file mode 100644
index 0000000000..00b6527e6a
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_llvm_p.h
@@ -0,0 +1,177 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ISEL_LLVM_P_H
+#define QV4ISEL_LLVM_P_H
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunused-parameter"
+#endif // __clang__
+
+#include <llvm/Module.h>
+#include <llvm/PassManager.h>
+#include <llvm/IRBuilder.h>
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif // __clang__
+
+#include "qv4isel_p.h"
+#include "qv4jsir_p.h"
+
+namespace QQmlJS {
+namespace LLVM {
+
+class InstructionSelection:
+ public llvm::IRBuilder<>,
+ public IR::InstructionSelection
+{
+public:
+ InstructionSelection(llvm::LLVMContext &context);
+
+ void buildLLVMModule(IR::Module *module, llvm::Module *llvmModule, llvm::FunctionPassManager *fpm);
+
+public: // methods from InstructionSelection:
+ virtual void callBuiltinInvalid(IR::Name *func, IR::ExprList *args, IR::Temp *result);
+ virtual void callBuiltinTypeofMember(IR::Temp *base, const QString &name, IR::Temp *result);
+ virtual void callBuiltinTypeofSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result);
+ virtual void callBuiltinTypeofName(const QString &name, IR::Temp *result);
+ virtual void callBuiltinTypeofValue(IR::Temp *value, IR::Temp *result);
+ virtual void callBuiltinDeleteMember(IR::Temp *base, const QString &name, IR::Temp *result);
+ virtual void callBuiltinDeleteSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result);
+ virtual void callBuiltinDeleteName(const QString &name, IR::Temp *result);
+ virtual void callBuiltinDeleteValue(IR::Temp *result);
+ virtual void callBuiltinPostDecrementMember(IR::Temp *base, const QString &name, IR::Temp *result);
+ virtual void callBuiltinPostDecrementSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result);
+ virtual void callBuiltinPostDecrementName(const QString &name, IR::Temp *result);
+ virtual void callBuiltinPostDecrementValue(IR::Temp *value, IR::Temp *result);
+ virtual void callBuiltinPostIncrementMember(IR::Temp *base, const QString &name, IR::Temp *result);
+ virtual void callBuiltinPostIncrementSubscript(IR::Temp *base, IR::Temp *index, IR::Temp *result);
+ virtual void callBuiltinPostIncrementName(const QString &name, IR::Temp *result);
+ virtual void callBuiltinPostIncrementValue(IR::Temp *value, IR::Temp *result);
+ virtual void callBuiltinThrow(IR::Temp *arg);
+ virtual void callBuiltinCreateExceptionHandler(IR::Temp *result);
+ virtual void callBuiltinFinishTry();
+ virtual void callBuiltinForeachIteratorObject(IR::Temp *arg, IR::Temp *result);
+ virtual void callBuiltinForeachNextPropertyname(IR::Temp *arg, IR::Temp *result);
+ virtual void callBuiltinPushWithScope(IR::Temp *arg);
+ virtual void callBuiltinPopScope();
+ virtual void callBuiltinDeclareVar(bool deletable, const QString &name);
+ virtual void callBuiltinDefineGetterSetter(IR::Temp *object, const QString &name, IR::Temp *getter, IR::Temp *setter);
+ virtual void callBuiltinDefineProperty(IR::Temp *object, const QString &name, IR::Temp *value);
+ virtual void callBuiltinDefineArray(IR::Temp *result, IR::ExprList *args);
+ virtual void callValue(IR::Temp *value, IR::ExprList *args, IR::Temp *result);
+ virtual void callProperty(IR::Temp *base, const QString &name, IR::ExprList *args, IR::Temp *result);
+ virtual void callSubscript(IR::Temp *base, IR::Temp *index, IR::ExprList *args, IR::Temp *result);
+ virtual void constructActivationProperty(IR::Name *func, IR::ExprList *args, IR::Temp *result);
+ virtual void constructProperty(IR::Temp *base, const QString &name, IR::ExprList *args, IR::Temp *result);
+ virtual void constructValue(IR::Temp *value, IR::ExprList *args, IR::Temp *result);
+ virtual void loadThisObject(IR::Temp *temp);
+ virtual void loadConst(IR::Const *con, IR::Temp *temp);
+ virtual void loadString(const QString &str, IR::Temp *targetTemp);
+ virtual void loadRegexp(IR::RegExp *sourceRegexp, IR::Temp *targetTemp);
+ virtual void getActivationProperty(const QString &name, IR::Temp *temp);
+ virtual void setActivationProperty(IR::Temp *source, const QString &targetName);
+ virtual void initClosure(IR::Closure *closure, IR::Temp *target);
+ virtual void getProperty(IR::Temp *sourceBase, const QString &sourceName, IR::Temp *target);
+ virtual void setProperty(IR::Temp *source, IR::Temp *targetBase, const QString &targetName);
+ virtual void getElement(IR::Temp *sourceBase, IR::Temp *sourceIndex, IR::Temp *target);
+ virtual void setElement(IR::Temp *source, IR::Temp *targetBase, IR::Temp *targetIndex);
+ virtual void copyValue(IR::Temp *sourceTemp, IR::Temp *targetTemp);
+ virtual void unop(IR::AluOp oper, IR::Temp *sourceTemp, IR::Temp *targetTemp);
+ virtual void binop(IR::AluOp oper, IR::Temp *leftSource, IR::Temp *rightSource, IR::Temp *target);
+ virtual void inplaceNameOp(IR::AluOp oper, IR::Temp *rightSource, const QString &targetName);
+ virtual void inplaceElementOp(IR::AluOp oper, IR::Temp *source, IR::Temp *targetBaseTemp, IR::Temp *targetIndexTemp);
+ virtual void inplaceMemberOp(IR::AluOp oper, IR::Temp *source, IR::Temp *targetBase, const QString &targetName);
+
+public: // visitor methods for StmtVisitor:
+ virtual void visitJump(IR::Jump *);
+ virtual void visitCJump(IR::CJump *);
+ virtual void visitRet(IR::Ret *);
+ virtual void visitTry(IR::Try *);
+
+private:
+ llvm::Function *getRuntimeFunction(llvm::StringRef str);
+ llvm::Function *getLLVMFunction(IR::Function *function);
+ llvm::Function *compileLLVMFunction(IR::Function *function);
+ llvm::BasicBlock *getLLVMBasicBlock(IR::BasicBlock *block);
+ llvm::Value *getLLVMTempReference(IR::Expr *expr);
+ llvm::Value *getLLVMCondition(IR::Expr *expr);
+ llvm::Value *getLLVMTemp(IR::Temp *temp);
+ llvm::Value *getStringPtr(const QString &s);
+ llvm::Value *getIdentifier(const QString &s);
+ llvm::AllocaInst *newLLVMTemp(llvm::Type *type, llvm::Value *size = 0);
+ llvm::Value * genArguments(IR::ExprList *args, int &argc);
+ void genCallTemp(IR::Call *e, llvm::Value *result = 0);
+ void genCallName(IR::Call *e, llvm::Value *result = 0);
+ void genCallMember(IR::Call *e, llvm::Value *result = 0);
+ void genConstructTemp(IR::New *e, llvm::Value *result = 0);
+ void genConstructName(IR::New *e, llvm::Value *result = 0);
+ void genConstructMember(IR::New *e, llvm::Value *result = 0);
+ llvm::Value *createValue(IR::Const *e);
+ llvm::Value *toValuePtr(IR::Expr *e);
+ llvm::Value *genStringList(const QList<const QString *> &strings,
+ const char *arrayName, const char *elementName);
+
+
+private:
+ llvm::Module *_llvmModule;
+ llvm::Function *_llvmFunction;
+ llvm::Value *_llvmValue;
+ llvm::Type *_numberTy;
+ llvm::Type *_valueTy;
+ llvm::Type *_contextPtrTy;
+ llvm::Type *_stringPtrTy;
+ llvm::FunctionType *_functionTy;
+ llvm::Instruction *_allocaInsertPoint;
+ IR::Function *_function;
+ IR::BasicBlock *_block;
+ QHash<IR::Function *, llvm::Function *> _functionMap;
+ QHash<IR::BasicBlock *, llvm::BasicBlock *> _blockMap;
+ QVector<llvm::Value *> _tempMap;
+ QHash<QString, llvm::Value *> _stringMap;
+ llvm::FunctionPassManager *_fpm;
+};
+
+} // LLVM namespace
+} // QQmlJS namespace
+
+#endif // QV4ISEL_LLVM_P_H
diff --git a/src/qml/qml/v4vm/qv4isel_masm.cpp b/src/qml/qml/v4vm/qv4isel_masm.cpp
new file mode 100644
index 0000000000..a9d41ca32e
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_masm.cpp
@@ -0,0 +1,1252 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4isel_masm_p.h"
+#include "qv4runtime.h"
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include "qv4regexpobject.h"
+#include "qv4unwindhelper.h"
+#include "qv4lookup.h"
+
+#include <assembler/LinkBuffer.h>
+#include <WTFStubs.h>
+
+#include <iostream>
+#include <cassert>
+
+#if USE(UDIS86)
+# include <udis86.h>
+#endif
+
+using namespace QQmlJS;
+using namespace QQmlJS::MASM;
+using namespace QQmlJS::VM;
+
+/* Platform/Calling convention/Architecture specific section */
+
+#if CPU(X86_64)
+static const Assembler::RegisterID calleeSavedRegisters[] = {
+ // Not used: JSC::X86Registers::rbx,
+ // Not used: JSC::X86Registers::r10,
+ JSC::X86Registers::r12, // LocalsRegister
+ // Not used: JSC::X86Registers::r13,
+ JSC::X86Registers::r14 // ContextRegister
+ // Not used: JSC::X86Registers::r15,
+};
+#endif
+
+#if CPU(X86)
+static const Assembler::RegisterID calleeSavedRegisters[] = {
+ // Not used: JSC::X86Registers::ebx,
+ JSC::X86Registers::esi, // ContextRegister
+ JSC::X86Registers::edi // LocalsRegister
+};
+#endif
+
+#if CPU(ARM)
+static const Assembler::RegisterID calleeSavedRegisters[] = {
+ // ### FIXME: remove unused registers.
+ // Keep these in reverse order and make sure to also edit the unwind program in
+ // qv4unwindhelper_p-arm.h when changing this list.
+ JSC::ARMRegisters::r12,
+ JSC::ARMRegisters::r10,
+ JSC::ARMRegisters::r9,
+ JSC::ARMRegisters::r8,
+ JSC::ARMRegisters::r7,
+ JSC::ARMRegisters::r6,
+ JSC::ARMRegisters::r5,
+ JSC::ARMRegisters::r4
+};
+#endif
+
+const int Assembler::calleeSavedRegisterCount = sizeof(calleeSavedRegisters) / sizeof(calleeSavedRegisters[0]);
+
+/* End of platform/calling convention/architecture specific section */
+
+
+const Assembler::VoidType Assembler::Void;
+
+Assembler::Assembler(V4IR::Function* function, VM::Function *vmFunction, VM::ExecutionEngine *engine)
+ : _function(function), _vmFunction(vmFunction), _engine(engine)
+{
+}
+
+void Assembler::registerBlock(V4IR::BasicBlock* block)
+{
+ _addrs[block] = label();
+}
+
+void Assembler::jumpToBlock(V4IR::BasicBlock* current, V4IR::BasicBlock *target)
+{
+ if (current->index + 1 != target->index)
+ _patches[target].append(jump());
+}
+
+void Assembler::addPatch(V4IR::BasicBlock* targetBlock, Jump targetJump)
+{
+ _patches[targetBlock].append(targetJump);
+}
+
+void Assembler::addPatch(DataLabelPtr patch, Label target)
+{
+ DataLabelPatch p;
+ p.dataLabel = patch;
+ p.target = target;
+ _dataLabelPatches.append(p);
+}
+
+void Assembler::addPatch(DataLabelPtr patch, V4IR::BasicBlock *target)
+{
+ _labelPatches[target].append(patch);
+}
+
+Assembler::Pointer Assembler::loadTempAddress(RegisterID reg, V4IR::Temp *t)
+{
+ int32_t offset = 0;
+ int scope = t->scope;
+ VM::Function *f = _vmFunction;
+ RegisterID context = ContextRegister;
+ if (scope) {
+ loadPtr(Address(ContextRegister, offsetof(ExecutionContext, outer)), ScratchRegister);
+ --scope;
+ f = f->outer;
+ context = ScratchRegister;
+ while (scope) {
+ loadPtr(Address(context, offsetof(ExecutionContext, outer)), context);
+ f = f->outer;
+ --scope;
+ }
+ }
+ if (t->index < 0) {
+ const int arg = -t->index - 1;
+ loadPtr(Address(context, offsetof(CallContext, arguments)), reg);
+ offset = arg * sizeof(Value);
+ } else if (t->index < f->locals.size()) {
+ loadPtr(Address(context, offsetof(CallContext, locals)), reg);
+ offset = t->index * sizeof(Value);
+ } else {
+ assert(t->scope == 0);
+ const int arg = _function->maxNumberOfArguments + t->index - _function->locals.size() + 1;
+ offset = - sizeof(Value) * (arg + 1);
+ offset -= sizeof(void*) * calleeSavedRegisterCount;
+ reg = LocalsRegister;
+ }
+ return Pointer(reg, offset);
+}
+
+template <typename Result, typename Source>
+void Assembler::copyValue(Result result, Source source)
+{
+#ifdef VALUE_FITS_IN_REGISTER
+ // Use ReturnValueRegister as "scratch" register because loadArgument
+ // and storeArgument are functions that may need a scratch register themselves.
+ loadArgument(source, ReturnValueRegister);
+ storeArgument(ReturnValueRegister, result);
+#else
+ loadDouble(source, FPGpr0);
+ storeDouble(FPGpr0, result);
+#endif
+}
+
+template <typename Result>
+void Assembler::copyValue(Result result, V4IR::Expr* source)
+{
+#ifdef VALUE_FITS_IN_REGISTER
+ // Use ReturnValueRegister as "scratch" register because loadArgument
+ // and storeArgument are functions that may need a scratch register themselves.
+ loadArgument(source, ReturnValueRegister);
+ storeArgument(ReturnValueRegister, result);
+#else
+ if (V4IR::Temp *temp = source->asTemp()) {
+ loadDouble(temp, FPGpr0);
+ storeDouble(FPGpr0, result);
+ } else if (V4IR::Const *c = source->asConst()) {
+ VM::Value v = convertToValue(c);
+ storeValue(v, result);
+ } else {
+ assert(! "not implemented");
+ }
+#endif
+}
+
+
+void Assembler::storeValue(VM::Value value, V4IR::Temp* destination)
+{
+ Address addr = loadTempAddress(ScratchRegister, destination);
+ storeValue(value, addr);
+}
+
+void Assembler::enterStandardStackFrame(int locals)
+{
+ platformEnterStandardStackFrame();
+
+ // ### FIXME: Handle through calleeSavedRegisters mechanism
+ // or eliminate StackFrameRegister altogether.
+ push(StackFrameRegister);
+ move(StackPointerRegister, StackFrameRegister);
+
+ // space for the locals and callee saved registers
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*) * calleeSavedRegisterCount;
+
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ subPtr(TrustedImm32(frameSize), StackPointerRegister);
+
+ for (int i = 0; i < calleeSavedRegisterCount; ++i)
+ storePtr(calleeSavedRegisters[i], Address(StackFrameRegister, -(i + 1) * sizeof(void*)));
+
+ move(StackFrameRegister, LocalsRegister);
+}
+
+void Assembler::leaveStandardStackFrame(int locals)
+{
+ // restore the callee saved registers
+ for (int i = calleeSavedRegisterCount - 1; i >= 0; --i)
+ loadPtr(Address(StackFrameRegister, -(i + 1) * sizeof(void*)), calleeSavedRegisters[i]);
+
+ // space for the locals and the callee saved registers
+ int32_t frameSize = locals * sizeof(QQmlJS::VM::Value) + sizeof(void*) * calleeSavedRegisterCount;
+#if CPU(X86) || CPU(X86_64)
+ frameSize = (frameSize + 15) & ~15; // align on 16 byte boundaries for MMX
+#endif
+ // Work around bug in ARMv7Assembler.h where add32(imm, sp, sp) doesn't
+ // work well for large immediates.
+#if CPU(ARM_THUMB2)
+ move(TrustedImm32(frameSize), Assembler::ScratchRegister);
+ add32(Assembler::ScratchRegister, StackPointerRegister);
+#else
+ addPtr(TrustedImm32(frameSize), StackPointerRegister);
+#endif
+
+ pop(StackFrameRegister);
+ platformLeaveStandardStackFrame();
+}
+
+
+
+#define OP(op) \
+ { isel_stringIfy(op), op, 0, 0 }
+
+#define INLINE_OP(op, memOp, immOp) \
+ { isel_stringIfy(op), op, memOp, immOp }
+
+#define NULL_OP \
+ { 0, 0, 0, 0 }
+
+const Assembler::BinaryOperationInfo Assembler::binaryOperations[QQmlJS::V4IR::LastAluOp + 1] = {
+ NULL_OP, // OpInvalid
+ NULL_OP, // OpIfTrue
+ NULL_OP, // OpNot
+ NULL_OP, // OpUMinus
+ NULL_OP, // OpUPlus
+ NULL_OP, // OpCompl
+ NULL_OP, // OpIncrement
+ NULL_OP, // OpDecrement
+
+ INLINE_OP(__qmljs_bit_and, &Assembler::inline_and32, &Assembler::inline_and32), // OpBitAnd
+ INLINE_OP(__qmljs_bit_or, &Assembler::inline_or32, &Assembler::inline_or32), // OpBitOr
+ INLINE_OP(__qmljs_bit_xor, &Assembler::inline_xor32, &Assembler::inline_xor32), // OpBitXor
+
+ INLINE_OP(__qmljs_add, &Assembler::inline_add32, &Assembler::inline_add32), // OpAdd
+ INLINE_OP(__qmljs_sub, &Assembler::inline_sub32, &Assembler::inline_sub32), // OpSub
+ INLINE_OP(__qmljs_mul, &Assembler::inline_mul32, &Assembler::inline_mul32), // OpMul
+
+ OP(__qmljs_div), // OpDiv
+ OP(__qmljs_mod), // OpMod
+
+ INLINE_OP(__qmljs_shl, &Assembler::inline_shl32, &Assembler::inline_shl32), // OpLShift
+ INLINE_OP(__qmljs_shr, &Assembler::inline_shr32, &Assembler::inline_shr32), // OpRShift
+ INLINE_OP(__qmljs_ushr, &Assembler::inline_ushr32, &Assembler::inline_ushr32), // OpURShift
+
+ OP(__qmljs_gt), // OpGt
+ OP(__qmljs_lt), // OpLt
+ OP(__qmljs_ge), // OpGe
+ OP(__qmljs_le), // OpLe
+ OP(__qmljs_eq), // OpEqual
+ OP(__qmljs_ne), // OpNotEqual
+ OP(__qmljs_se), // OpStrictEqual
+ OP(__qmljs_sne), // OpStrictNotEqual
+
+ OP(__qmljs_instanceof), // OpInstanceof
+ OP(__qmljs_in), // OpIn
+
+ NULL_OP, // OpAnd
+ NULL_OP // OpOr
+};
+
+void Assembler::generateBinOp(V4IR::AluOp operation, V4IR::Temp* target, V4IR::Temp *left, V4IR::Temp *right)
+{
+ const BinaryOperationInfo& info = binaryOperations[operation];
+ if (!info.fallbackImplementation) {
+ assert(!"unreachable");
+ return;
+ }
+
+ Value leftConst = Value::undefinedValue();
+ Value rightConst = Value::undefinedValue();
+
+ bool canDoInline = info.inlineMemRegOp && info.inlineImmRegOp;
+
+ if (canDoInline) {
+ if (left->asConst()) {
+ leftConst = convertToValue(left->asConst());
+ canDoInline = canDoInline && leftConst.tryIntegerConversion();
+ }
+ if (right->asConst()) {
+ rightConst = convertToValue(right->asConst());
+ canDoInline = canDoInline && rightConst.tryIntegerConversion();
+ }
+ }
+
+ Jump binOpFinished;
+
+ if (canDoInline) {
+
+ Jump leftTypeCheck;
+ if (left->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, left->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ leftTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ Jump rightTypeCheck;
+ if (right->asTemp()) {
+ Address typeAddress = loadTempAddress(ScratchRegister, right->asTemp());
+ typeAddress.offset += offsetof(VM::Value, tag);
+ rightTypeCheck = branch32(NotEqual, typeAddress, TrustedImm32(VM::Value::_Integer_Type));
+ }
+
+ if (left->asTemp()) {
+ Address leftValue = loadTempAddress(ScratchRegister, left->asTemp());
+ leftValue.offset += offsetof(VM::Value, int_32);
+ load32(leftValue, IntegerOpRegister);
+ } else { // left->asConst()
+ move(TrustedImm32(leftConst.integerValue()), IntegerOpRegister);
+ }
+
+ Jump overflowCheck;
+
+ if (right->asTemp()) {
+ Address rightValue = loadTempAddress(ScratchRegister, right->asTemp());
+ rightValue.offset += offsetof(VM::Value, int_32);
+
+ overflowCheck = (this->*info.inlineMemRegOp)(rightValue, IntegerOpRegister);
+ } else { // right->asConst()
+ overflowCheck = (this->*info.inlineImmRegOp)(TrustedImm32(rightConst.integerValue()), IntegerOpRegister);
+ }
+
+ Address resultAddr = loadTempAddress(ScratchRegister, target);
+ Address resultValueAddr = resultAddr;
+ resultValueAddr.offset += offsetof(VM::Value, int_32);
+ store32(IntegerOpRegister, resultValueAddr);
+
+ Address resultTypeAddr = resultAddr;
+ resultTypeAddr.offset += offsetof(VM::Value, tag);
+ store32(TrustedImm32(VM::Value::_Integer_Type), resultTypeAddr);
+
+ binOpFinished = jump();
+
+ if (leftTypeCheck.isSet())
+ leftTypeCheck.link(this);
+ if (rightTypeCheck.isSet())
+ rightTypeCheck.link(this);
+ if (overflowCheck.isSet())
+ overflowCheck.link(this);
+ }
+
+ // Fallback
+ generateFunctionCallImp(Assembler::Void, info.name, info.fallbackImplementation, ContextRegister,
+ Assembler::PointerToValue(target), Assembler::Reference(left), Assembler::Reference(right));
+
+ if (binOpFinished.isSet())
+ binOpFinished.link(this);
+}
+#if OS(LINUX)
+static void printDisassembledOutputWithCalls(const char* output, const QHash<void*, const char*>& functions,
+ const QVector<String*> &identifiers)
+{
+ QByteArray processedOutput(output);
+ for (QHash<void*, const char*>::ConstIterator it = functions.begin(), end = functions.end();
+ it != end; ++it) {
+ QByteArray ptrString = QByteArray::number(quintptr(it.key()), 16);
+ ptrString.prepend("0x");
+ processedOutput = processedOutput.replace(ptrString, it.value());
+ }
+ for (QVector<String*>::ConstIterator it = identifiers.begin(), end = identifiers.end();
+ it != end; ++it) {
+ QByteArray ptrString = QByteArray::number(quintptr(*it), 16);
+ ptrString.prepend("0x");
+ QByteArray replacement = "\"" + (*it)->toQString().toUtf8() + "\"";
+ processedOutput = processedOutput.replace(ptrString, replacement);
+ }
+ fprintf(stderr, "%s\n", processedOutput.constData());
+}
+#endif
+
+void Assembler::link(VM::Function *vmFunc)
+{
+ Label endOfCode = label();
+#ifdef Q_PROCESSOR_ARM
+ // Let the ARM exception table follow right after that
+ for (int i = 0, nops = UnwindHelper::unwindInfoSize() / 2; i < nops; ++i)
+ nop();
+#endif
+
+ {
+ QHashIterator<V4IR::BasicBlock *, QVector<Jump> > it(_patches);
+ while (it.hasNext()) {
+ it.next();
+ V4IR::BasicBlock *block = it.key();
+ Label target = _addrs.value(block);
+ assert(target.isSet());
+ foreach (Jump jump, it.value())
+ jump.linkTo(target, this);
+ }
+ }
+
+ JSC::JSGlobalData dummy(_engine->executableAllocator);
+ JSC::LinkBuffer linkBuffer(dummy, this, 0);
+ vmFunc->codeSize = linkBuffer.offsetOf(endOfCode);
+
+ QHash<void*, const char*> functions;
+ foreach (CallToLink ctl, _callsToLink) {
+ linkBuffer.link(ctl.call, ctl.externalFunction);
+ functions[ctl.externalFunction.value()] = ctl.functionName;
+ }
+
+ foreach (const DataLabelPatch &p, _dataLabelPatches)
+ linkBuffer.patch(p.dataLabel, linkBuffer.locationOf(p.target));
+
+ {
+ QHashIterator<V4IR::BasicBlock *, QVector<DataLabelPtr> > it(_labelPatches);
+ while (it.hasNext()) {
+ it.next();
+ V4IR::BasicBlock *block = it.key();
+ Label target = _addrs.value(block);
+ assert(target.isSet());
+ foreach (DataLabelPtr label, it.value())
+ linkBuffer.patch(label, linkBuffer.locationOf(target));
+ }
+ }
+
+#ifdef Q_PROCESSOR_ARM
+ UnwindHelper::writeARMUnwindInfo(linkBuffer.debugAddress(), linkBuffer.offsetOf(endOfCode));
+#endif
+
+ static bool showCode = !qgetenv("SHOW_CODE").isNull();
+ if (showCode) {
+#if OS(LINUX)
+ char* disasmOutput = 0;
+ size_t disasmLength = 0;
+ FILE* disasmStream = open_memstream(&disasmOutput, &disasmLength);
+ WTF::setDataFile(disasmStream);
+#endif
+
+ QByteArray name = _function->name->toUtf8();
+ if (name.isEmpty()) {
+ name = QByteArray::number(quintptr(_function), 16);
+ name.prepend("IR::Function(0x");
+ name.append(")");
+ }
+ vmFunc->codeRef = linkBuffer.finalizeCodeWithDisassembly("%s", name.data());
+
+ WTF::setDataFile(stderr);
+#if OS(LINUX)
+ fclose(disasmStream);
+#if CPU(X86) || CPU(X86_64)
+ QHash<void*, String*> idents;
+ printDisassembledOutputWithCalls(disasmOutput, functions, _vmFunction->identifiers);
+#endif
+ free(disasmOutput);
+#endif
+ } else {
+ vmFunc->codeRef = linkBuffer.finalizeCodeWithoutDisassembly();
+ }
+
+ vmFunc->code = (Value (*)(VM::ExecutionContext *, const uchar *)) vmFunc->codeRef.code().executableAddress();
+}
+
+InstructionSelection::InstructionSelection(VM::ExecutionEngine *engine, V4IR::Module *module)
+ : EvalInstructionSelection(engine, module)
+ , _block(0)
+ , _function(0)
+ , _vmFunction(0)
+ , _as(0)
+{
+}
+
+InstructionSelection::~InstructionSelection()
+{
+ delete _as;
+}
+
+void InstructionSelection::run(VM::Function *vmFunction, V4IR::Function *function)
+{
+ QVector<Lookup> lookups;
+ QSet<V4IR::BasicBlock*> reentryBlocks;
+ qSwap(_function, function);
+ qSwap(_vmFunction, vmFunction);
+ qSwap(_lookups, lookups);
+ qSwap(_reentryBlocks, reentryBlocks);
+ Assembler* oldAssembler = _as;
+ _as = new Assembler(_function, _vmFunction, engine());
+
+ int locals = (_function->tempCount - _function->locals.size() + _function->maxNumberOfArguments) + 1;
+ locals = (locals + 1) & ~1;
+ _as->enterStandardStackFrame(locals);
+
+ int contextPointer = 0;
+#ifndef VALUE_FITS_IN_REGISTER
+ // When the return VM value doesn't fit into a register, then
+ // the caller provides a pointer for storage as first argument.
+ // That shifts the index the context pointer argument by one.
+ contextPointer++;
+#endif
+
+#ifdef ARGUMENTS_IN_REGISTERS
+ _as->move(_as->registerForArgument(contextPointer), Assembler::ContextRegister);
+#else
+ _as->loadPtr(addressForArgument(contextPointer), Assembler::ContextRegister);
+#endif
+
+ foreach (V4IR::BasicBlock *block, _function->basicBlocks) {
+ _block = block;
+ _as->registerBlock(_block);
+
+ if (_reentryBlocks.contains(_block)) {
+ _as->enterStandardStackFrame(/*locals*/0);
+#ifdef ARGUMENTS_IN_REGISTERS
+ _as->move(Assembler::registerForArgument(0), Assembler::ContextRegister);
+ _as->move(Assembler::registerForArgument(1), Assembler::LocalsRegister);
+#else
+ _as->loadPtr(addressForArgument(0), Assembler::ContextRegister);
+ _as->loadPtr(addressForArgument(1), Assembler::LocalsRegister);
+#endif
+ }
+
+ foreach (V4IR::Stmt *s, block->statements) {
+ s->accept(this);
+ }
+ }
+
+ _as->leaveStandardStackFrame(locals);
+#ifndef ARGUMENTS_IN_REGISTERS
+ // Emulate ret(n) instruction
+ // Pop off return address into scratch register ...
+ _as->pop(Assembler::ScratchRegister);
+ // ... and overwrite the invisible argument with
+ // the return address.
+ _as->poke(Assembler::ScratchRegister);
+#endif
+ _as->ret();
+
+ _as->link(_vmFunction);
+
+ if (_lookups.size()) {
+ _vmFunction->lookups = new Lookup[_lookups.size()];
+ memcpy(_vmFunction->lookups, _lookups.constData(), _lookups.size()*sizeof(Lookup));
+ }
+
+ UnwindHelper::registerFunction(_vmFunction);
+
+ qSwap(_vmFunction, vmFunction);
+ qSwap(_function, function);
+ qSwap(_lookups, lookups);
+ qSwap(_reentryBlocks, reentryBlocks);
+ delete _as;
+ _as = oldAssembler;
+}
+
+void InstructionSelection::callBuiltinInvalid(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ int argc = prepareVariableArguments(args);
+ VM::String *s = identifier(*func->id);
+
+ if (useFastLookups && func->global) {
+ uint index = addGlobalLookup(s);
+ generateFunctionCall(Assembler::Void, __qmljs_call_global_lookup,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::TrustedImm32(index),
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+ } else {
+ generateFunctionCall(Assembler::Void, __qmljs_call_activation_property,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ s,
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+ }
+}
+
+void InstructionSelection::callBuiltinTypeofMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_typeof_member, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), identifier(name));
+}
+
+void InstructionSelection::callBuiltinTypeofSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_typeof_element,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::Reference(base), Assembler::Reference(index));
+}
+
+void InstructionSelection::callBuiltinTypeofName(const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_typeof_name, Assembler::ContextRegister, Assembler::PointerToValue(result), identifier(name));
+}
+
+void InstructionSelection::callBuiltinTypeofValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_typeof, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(value));
+}
+
+void InstructionSelection::callBuiltinDeleteMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_delete_member, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), identifier(name));
+}
+
+void InstructionSelection::callBuiltinDeleteSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_delete_subscript, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), Assembler::Reference(index));
+}
+
+void InstructionSelection::callBuiltinDeleteName(const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_delete_name, Assembler::ContextRegister, Assembler::PointerToValue(result), identifier(name));
+}
+
+void InstructionSelection::callBuiltinDeleteValue(V4IR::Temp *result)
+{
+ _as->storeValue(Value::fromBoolean(false), result);
+}
+
+void InstructionSelection::callBuiltinPostIncrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_increment_member, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::PointerToValue(base), identifier(name));
+}
+
+void InstructionSelection::callBuiltinPostIncrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_increment_element, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), Assembler::PointerToValue(index));
+}
+
+void InstructionSelection::callBuiltinPostIncrementName(const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_increment_name, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), identifier(name));
+}
+
+void InstructionSelection::callBuiltinPostIncrementValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_increment,
+ Assembler::PointerToValue(result), Assembler::PointerToValue(value));
+}
+
+void InstructionSelection::callBuiltinPostDecrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_decrement_member, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), identifier(name));
+}
+
+void InstructionSelection::callBuiltinPostDecrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_decrement_element, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base),
+ Assembler::Reference(index));
+}
+
+void InstructionSelection::callBuiltinPostDecrementName(const QString &name, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_decrement_name, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), identifier(name));
+}
+
+void InstructionSelection::callBuiltinPostDecrementValue(V4IR::Temp *value, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_post_decrement,
+ Assembler::PointerToValue(result), Assembler::PointerToValue(value));
+}
+
+void InstructionSelection::callBuiltinThrow(V4IR::Temp *arg)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_throw, Assembler::ContextRegister, Assembler::Reference(arg));
+}
+
+typedef void *(*MiddleOfFunctionEntryPoint(ExecutionContext *, void *localsPtr));
+static void *tryWrapper(ExecutionContext *context, void *localsPtr, MiddleOfFunctionEntryPoint tryBody, MiddleOfFunctionEntryPoint catchBody,
+ VM::String *exceptionVarName, Value *exceptionVar)
+{
+ *exceptionVar = Value::undefinedValue();
+ void *addressToContinueAt = 0;
+ try {
+ addressToContinueAt = tryBody(context, localsPtr);
+ } catch (Exception& ex) {
+ ex.accept(context);
+ *exceptionVar = ex.value();
+ try {
+ ExecutionContext *catchContext = __qmljs_builtin_push_catch_scope(exceptionVarName, ex.value(), context);
+ addressToContinueAt = catchBody(catchContext, localsPtr);
+ context = __qmljs_builtin_pop_scope(catchContext);
+ } catch (Exception& ex) {
+ *exceptionVar = ex.value();
+ ex.accept(context);
+ addressToContinueAt = catchBody(context, localsPtr);
+ }
+ }
+ return addressToContinueAt;
+}
+
+void InstructionSelection::visitTry(V4IR::Try *t)
+{
+ // Call tryWrapper, which is going to re-enter the same function at the address of the try block. At then end
+ // of the try function the JIT code will return with the address of the sub-sequent instruction, which tryWrapper
+ // returns and to which we jump to.
+
+ _reentryBlocks.insert(t->tryBlock);
+ _reentryBlocks.insert(t->catchBlock);
+
+ generateFunctionCall(Assembler::ReturnValueRegister, tryWrapper, Assembler::ContextRegister, Assembler::LocalsRegister,
+ Assembler::ReentryBlock(t->tryBlock), Assembler::ReentryBlock(t->catchBlock),
+ identifier(t->exceptionVarName), Assembler::PointerToValue(t->exceptionVar));
+ _as->jump(Assembler::ReturnValueRegister);
+}
+
+void InstructionSelection::callBuiltinFinishTry()
+{
+ // This assumes that we're in code that was called by tryWrapper, so we return to try wrapper
+ // with the address that we'd like to continue at, which is right after the ret below.
+ Assembler::DataLabelPtr continuation = _as->moveWithPatch(Assembler::TrustedImmPtr(0), Assembler::ReturnValueRegister);
+ _as->leaveStandardStackFrame(/*locals*/0);
+ _as->ret();
+ _as->addPatch(continuation, _as->label());
+}
+
+void InstructionSelection::callBuiltinForeachIteratorObject(V4IR::Temp *arg, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_foreach_iterator_object, Assembler::ContextRegister, Assembler::PointerToValue(result), Assembler::Reference(arg), Assembler::ContextRegister);
+}
+
+void InstructionSelection::callBuiltinForeachNextPropertyname(V4IR::Temp *arg, V4IR::Temp *result)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_foreach_next_property_name, Assembler::PointerToValue(result), Assembler::Reference(arg));
+}
+
+void InstructionSelection::callBuiltinPushWithScope(V4IR::Temp *arg)
+{
+ generateFunctionCall(Assembler::ContextRegister, __qmljs_builtin_push_with_scope, Assembler::Reference(arg), Assembler::ContextRegister);
+}
+
+void InstructionSelection::callBuiltinPopScope()
+{
+ generateFunctionCall(Assembler::ContextRegister, __qmljs_builtin_pop_scope, Assembler::ContextRegister);
+}
+
+void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_declare_var, Assembler::ContextRegister,
+ Assembler::TrustedImm32(deletable), identifier(name));
+}
+
+void InstructionSelection::callBuiltinDefineGetterSetter(V4IR::Temp *object, const QString &name, V4IR::Temp *getter, V4IR::Temp *setter)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_define_getter_setter, Assembler::ContextRegister,
+ Assembler::Reference(object), identifier(name), Assembler::PointerToValue(getter), Assembler::PointerToValue(setter));
+}
+
+void InstructionSelection::callBuiltinDefineProperty(V4IR::Temp *object, const QString &name, V4IR::Temp *value)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_define_property, Assembler::ContextRegister,
+ Assembler::Reference(object), identifier(name), Assembler::PointerToValue(value));
+}
+
+void InstructionSelection::callBuiltinDefineArray(V4IR::Temp *result, V4IR::ExprList *args)
+{
+ int length = prepareVariableArguments(args);
+ generateFunctionCall(Assembler::Void, __qmljs_builtin_define_array, Assembler::ContextRegister,
+ Assembler::PointerToValue(result),
+ baseAddressForCallArguments(), Assembler::TrustedImm32(length));
+}
+
+void InstructionSelection::callValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ int argc = prepareVariableArguments(args);
+ V4IR::Temp* thisObject = 0;
+ generateFunctionCall(Assembler::Void, __qmljs_call_value, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::PointerToValue(thisObject),
+ Assembler::Reference(value), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
+}
+
+void InstructionSelection::loadThisObject(V4IR::Temp *temp)
+{
+#if defined(VALUE_FITS_IN_REGISTER)
+ _as->loadPtr(Pointer(Assembler::ContextRegister, offsetof(ExecutionContext, thisObject)), Assembler::ReturnValueRegister);
+ _as->storeArgument(Assembler::ReturnValueRegister, temp);
+#else
+ _as->copyValue(temp, Pointer(Assembler::ContextRegister, offsetof(ExecutionContext, thisObject)));
+#endif
+}
+
+void InstructionSelection::loadConst(V4IR::Const *sourceConst, V4IR::Temp *targetTemp)
+{
+ _as->storeValue(convertToValue(sourceConst), targetTemp);
+}
+
+void InstructionSelection::loadString(const QString &str, V4IR::Temp *targetTemp)
+{
+ Value v = Value::fromString(identifier(str));
+ _as->storeValue(v, targetTemp);
+}
+
+void InstructionSelection::loadRegexp(V4IR::RegExp *sourceRegexp, V4IR::Temp *targetTemp)
+{
+ Value v = Value::fromObject(engine()->newRegExpObject(*sourceRegexp->value,
+ sourceRegexp->flags));
+ _vmFunction->generatedValues.append(v);
+ _as->storeValue(v, targetTemp);
+}
+
+void InstructionSelection::getActivationProperty(const V4IR::Name *name, V4IR::Temp *temp)
+{
+ String *propertyName = identifier(*name->id);
+ if (useFastLookups && name->global) {
+ uint index = addGlobalLookup(propertyName);
+ generateFunctionCall(Assembler::Void, __qmljs_get_global_lookup, Assembler::ContextRegister, Assembler::PointerToValue(temp),
+ Assembler::TrustedImm32(index));
+ return;
+ }
+ generateFunctionCall(Assembler::Void, __qmljs_get_activation_property, Assembler::ContextRegister, Assembler::PointerToValue(temp), propertyName);
+}
+
+void InstructionSelection::setActivationProperty(V4IR::Temp *source, const QString &targetName)
+{
+ String *propertyName = identifier(targetName);
+ generateFunctionCall(Assembler::Void, __qmljs_set_activation_property,
+ Assembler::ContextRegister, propertyName, Assembler::Reference(source));
+}
+
+void InstructionSelection::initClosure(V4IR::Closure *closure, V4IR::Temp *target)
+{
+ VM::Function *vmFunc = vmFunction(closure->value);
+ assert(vmFunc);
+ generateFunctionCall(Assembler::Void, __qmljs_init_closure, Assembler::ContextRegister, Assembler::PointerToValue(target), Assembler::TrustedImmPtr(vmFunc));
+}
+
+void InstructionSelection::getProperty(V4IR::Temp *base, const QString &name, V4IR::Temp *target)
+{
+ if (useFastLookups) {
+ VM::String *s = identifier(name);
+ uint index = addLookup(s);
+ generateFunctionCall(Assembler::Void, __qmljs_get_property_lookup, Assembler::ContextRegister, Assembler::PointerToValue(target),
+ Assembler::Reference(base), Assembler::TrustedImm32(index));
+ } else {
+ generateFunctionCall(Assembler::Void, __qmljs_get_property, Assembler::ContextRegister, Assembler::PointerToValue(target),
+ Assembler::Reference(base), identifier(name));
+ }
+}
+
+void InstructionSelection::setProperty(V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName)
+{
+ if (useFastLookups) {
+ VM::String *s = identifier(targetName);
+ uint index = addLookup(s);
+ generateFunctionCall(Assembler::Void, __qmljs_set_property_lookup,
+ Assembler::ContextRegister, Assembler::Reference(targetBase),
+ Assembler::TrustedImm32(index), Assembler::Reference(source));
+ } else {
+ generateFunctionCall(Assembler::Void, __qmljs_set_property, Assembler::ContextRegister,
+ Assembler::Reference(targetBase),
+ identifier(targetName), Assembler::Reference(source));
+ }
+}
+
+void InstructionSelection::getElement(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *target)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_get_element, Assembler::ContextRegister,
+ Assembler::PointerToValue(target), Assembler::Reference(base),
+ Assembler::Reference(index));
+}
+
+void InstructionSelection::setElement(V4IR::Temp *source, V4IR::Temp *targetBase, V4IR::Temp *targetIndex)
+{
+ generateFunctionCall(Assembler::Void, __qmljs_set_element, Assembler::ContextRegister,
+ Assembler::Reference(targetBase), Assembler::Reference(targetIndex),
+ Assembler::Reference(source));
+}
+
+void InstructionSelection::copyValue(V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp)
+{
+ _as->copyValue(targetTemp, sourceTemp);
+}
+
+#define setOp(op, opName, operation) \
+ do { op = operation; opName = isel_stringIfy(operation); } while (0)
+
+void InstructionSelection::unop(V4IR::AluOp oper, V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp)
+{
+ VM::UnaryOpName op = 0;
+ const char *opName = 0;
+ switch (oper) {
+ case V4IR::OpIfTrue: assert(!"unreachable"); break;
+ case V4IR::OpNot: setOp(op, opName, __qmljs_not); break;
+ case V4IR::OpUMinus: setOp(op, opName, __qmljs_uminus); break;
+ case V4IR::OpUPlus: setOp(op, opName, __qmljs_uplus); break;
+ case V4IR::OpCompl: setOp(op, opName, __qmljs_compl); break;
+ case V4IR::OpIncrement: setOp(op, opName, __qmljs_increment); break;
+ case V4IR::OpDecrement: setOp(op, opName, __qmljs_decrement); break;
+ default: assert(!"unreachable"); break;
+ } // switch
+
+ if (op)
+ _as->generateFunctionCallImp(Assembler::Void, opName, op, Assembler::PointerToValue(targetTemp),
+ Assembler::Reference(sourceTemp));
+}
+
+void InstructionSelection::binop(V4IR::AluOp oper, V4IR::Temp *leftSource, V4IR::Temp *rightSource, V4IR::Temp *target)
+{
+ _as->generateBinOp(oper, target, leftSource, rightSource);
+}
+
+void InstructionSelection::inplaceNameOp(V4IR::AluOp oper, V4IR::Temp *rightSource, const QString &targetName)
+{
+ VM::InplaceBinOpName op = 0;
+ const char *opName = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: setOp(op, opName, __qmljs_inplace_bit_and_name); break;
+ case V4IR::OpBitOr: setOp(op, opName, __qmljs_inplace_bit_or_name); break;
+ case V4IR::OpBitXor: setOp(op, opName, __qmljs_inplace_bit_xor_name); break;
+ case V4IR::OpAdd: setOp(op, opName, __qmljs_inplace_add_name); break;
+ case V4IR::OpSub: setOp(op, opName, __qmljs_inplace_sub_name); break;
+ case V4IR::OpMul: setOp(op, opName, __qmljs_inplace_mul_name); break;
+ case V4IR::OpDiv: setOp(op, opName, __qmljs_inplace_div_name); break;
+ case V4IR::OpMod: setOp(op, opName, __qmljs_inplace_mod_name); break;
+ case V4IR::OpLShift: setOp(op, opName, __qmljs_inplace_shl_name); break;
+ case V4IR::OpRShift: setOp(op, opName, __qmljs_inplace_shr_name); break;
+ case V4IR::OpURShift: setOp(op, opName, __qmljs_inplace_ushr_name); break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+ if (op) {
+ _as->generateFunctionCallImp(Assembler::Void, opName, op, Assembler::ContextRegister,
+ identifier(targetName), Assembler::Reference(rightSource));
+ }
+}
+
+void InstructionSelection::inplaceElementOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBaseTemp, V4IR::Temp *targetIndexTemp)
+{
+ VM::InplaceBinOpElement op = 0;
+ const char *opName = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: setOp(op, opName, __qmljs_inplace_bit_and_element); break;
+ case V4IR::OpBitOr: setOp(op, opName, __qmljs_inplace_bit_or_element); break;
+ case V4IR::OpBitXor: setOp(op, opName, __qmljs_inplace_bit_xor_element); break;
+ case V4IR::OpAdd: setOp(op, opName, __qmljs_inplace_add_element); break;
+ case V4IR::OpSub: setOp(op, opName, __qmljs_inplace_sub_element); break;
+ case V4IR::OpMul: setOp(op, opName, __qmljs_inplace_mul_element); break;
+ case V4IR::OpDiv: setOp(op, opName, __qmljs_inplace_div_element); break;
+ case V4IR::OpMod: setOp(op, opName, __qmljs_inplace_mod_element); break;
+ case V4IR::OpLShift: setOp(op, opName, __qmljs_inplace_shl_element); break;
+ case V4IR::OpRShift: setOp(op, opName, __qmljs_inplace_shr_element); break;
+ case V4IR::OpURShift: setOp(op, opName, __qmljs_inplace_ushr_element); break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (op) {
+ _as->generateFunctionCallImp(Assembler::Void, opName, op, Assembler::ContextRegister,
+ Assembler::Reference(targetBaseTemp), Assembler::Reference(targetIndexTemp),
+ Assembler::Reference(source));
+ }
+}
+
+void InstructionSelection::inplaceMemberOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName)
+{
+ VM::InplaceBinOpMember op = 0;
+ const char *opName = 0;
+ switch (oper) {
+ case V4IR::OpBitAnd: setOp(op, opName, __qmljs_inplace_bit_and_member); break;
+ case V4IR::OpBitOr: setOp(op, opName, __qmljs_inplace_bit_or_member); break;
+ case V4IR::OpBitXor: setOp(op, opName, __qmljs_inplace_bit_xor_member); break;
+ case V4IR::OpAdd: setOp(op, opName, __qmljs_inplace_add_member); break;
+ case V4IR::OpSub: setOp(op, opName, __qmljs_inplace_sub_member); break;
+ case V4IR::OpMul: setOp(op, opName, __qmljs_inplace_mul_member); break;
+ case V4IR::OpDiv: setOp(op, opName, __qmljs_inplace_div_member); break;
+ case V4IR::OpMod: setOp(op, opName, __qmljs_inplace_mod_member); break;
+ case V4IR::OpLShift: setOp(op, opName, __qmljs_inplace_shl_member); break;
+ case V4IR::OpRShift: setOp(op, opName, __qmljs_inplace_shr_member); break;
+ case V4IR::OpURShift: setOp(op, opName, __qmljs_inplace_ushr_member); break;
+ default:
+ Q_UNREACHABLE();
+ break;
+ }
+
+ if (op) {
+ String* member = identifier(targetName);
+ _as->generateFunctionCallImp(Assembler::Void, opName, op, Assembler::ContextRegister,
+ Assembler::Reference(targetBase), identifier(targetName),
+ Assembler::Reference(source));
+ }
+}
+
+void InstructionSelection::callProperty(V4IR::Temp *base, const QString &name,
+ V4IR::ExprList *args, V4IR::Temp *result)
+{
+ assert(base != 0);
+
+ int argc = prepareVariableArguments(args);
+ VM::String *s = identifier(name);
+
+ if (useFastLookups) {
+ uint index = addLookup(s);
+ generateFunctionCall(Assembler::Void, __qmljs_call_property_lookup,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::Reference(base), Assembler::TrustedImm32(index),
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+ } else {
+ generateFunctionCall(Assembler::Void, __qmljs_call_property,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::Reference(base), s,
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+ }
+}
+
+void InstructionSelection::callSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ assert(base != 0);
+
+ int argc = prepareVariableArguments(args);
+ generateFunctionCall(Assembler::Void, __qmljs_call_element,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::Reference(base), Assembler::Reference(index),
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+}
+
+String *InstructionSelection::identifier(const QString &s)
+{
+ String *str = engine()->newIdentifier(s);
+ _vmFunction->identifiers.append(str);
+ return str;
+}
+
+void InstructionSelection::constructActivationProperty(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ assert(func != 0);
+
+ if (useFastLookups && func->global) {
+ int argc = prepareVariableArguments(args);
+ VM::String *s = identifier(*func->id);
+
+ uint index = addGlobalLookup(s);
+ generateFunctionCall(Assembler::Void, __qmljs_construct_global_lookup,
+ Assembler::ContextRegister, Assembler::PointerToValue(result),
+ Assembler::TrustedImm32(index),
+ baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+ return;
+ }
+
+ callRuntimeMethod(result, __qmljs_construct_activation_property, func, args);
+}
+
+void InstructionSelection::constructProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ int argc = prepareVariableArguments(args);
+ generateFunctionCall(Assembler::Void, __qmljs_construct_property, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(base), identifier(name), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
+}
+
+void InstructionSelection::constructValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result)
+{
+ assert(value != 0);
+
+ int argc = prepareVariableArguments(args);
+ generateFunctionCall(Assembler::Void, __qmljs_construct_value, Assembler::ContextRegister,
+ Assembler::PointerToValue(result), Assembler::Reference(value), baseAddressForCallArguments(), Assembler::TrustedImm32(argc));
+}
+
+void InstructionSelection::visitJump(V4IR::Jump *s)
+{
+ _as->jumpToBlock(_block, s->target);
+}
+
+void InstructionSelection::visitCJump(V4IR::CJump *s)
+{
+ if (V4IR::Temp *t = s->cond->asTemp()) {
+ Address temp = _as->loadTempAddress(Assembler::ScratchRegister, t);
+ Address tag = temp;
+ tag.offset += offsetof(VM::Value, tag);
+ Assembler::Jump booleanConversion = _as->branch32(Assembler::NotEqual, tag, Assembler::TrustedImm32(VM::Value::Boolean_Type));
+
+ Address data = temp;
+ data.offset += offsetof(VM::Value, int_32);
+ _as->load32(data, Assembler::ReturnValueRegister);
+ Assembler::Jump testBoolean = _as->jump();
+
+ booleanConversion.link(_as);
+ {
+ generateFunctionCall(Assembler::ReturnValueRegister, __qmljs_to_boolean, Assembler::Reference(t));
+ }
+
+ testBoolean.link(_as);
+ Assembler::Jump target = _as->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _as->addPatch(s->iftrue, target);
+
+ _as->jumpToBlock(_block, s->iffalse);
+ return;
+ } else if (V4IR::Binop *b = s->cond->asBinop()) {
+ if (b->left->asTemp() && b->right->asTemp()) {
+ VM::CmpOp op = 0;
+ const char *opName = 0;
+ switch (b->op) {
+ default: Q_UNREACHABLE(); assert(!"todo"); break;
+ case V4IR::OpGt: setOp(op, opName, __qmljs_cmp_gt); break;
+ case V4IR::OpLt: setOp(op, opName, __qmljs_cmp_lt); break;
+ case V4IR::OpGe: setOp(op, opName, __qmljs_cmp_ge); break;
+ case V4IR::OpLe: setOp(op, opName, __qmljs_cmp_le); break;
+ case V4IR::OpEqual: setOp(op, opName, __qmljs_cmp_eq); break;
+ case V4IR::OpNotEqual: setOp(op, opName, __qmljs_cmp_ne); break;
+ case V4IR::OpStrictEqual: setOp(op, opName, __qmljs_cmp_se); break;
+ case V4IR::OpStrictNotEqual: setOp(op, opName, __qmljs_cmp_sne); break;
+ case V4IR::OpInstanceof: setOp(op, opName, __qmljs_cmp_instanceof); break;
+ case V4IR::OpIn: setOp(op, opName, __qmljs_cmp_in); break;
+ } // switch
+
+ _as->generateFunctionCallImp(Assembler::ReturnValueRegister, opName, op, Assembler::ContextRegister,
+ Assembler::Reference(b->left->asTemp()),
+ Assembler::Reference(b->right->asTemp()));
+
+ Assembler::Jump target = _as->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister, Assembler::TrustedImm32(0));
+ _as->addPatch(s->iftrue, target);
+
+ _as->jumpToBlock(_block, s->iffalse);
+ return;
+ } else {
+ assert(!"wip");
+ }
+ Q_UNIMPLEMENTED();
+ }
+ Q_UNIMPLEMENTED();
+ assert(!"TODO");
+}
+
+void InstructionSelection::visitRet(V4IR::Ret *s)
+{
+ if (V4IR::Temp *t = s->expr->asTemp()) {
+#if defined(ARGUMENTS_IN_REGISTERS) && defined(VALUE_FITS_IN_REGISTER)
+ _as->copyValue(Assembler::ReturnValueRegister, t);
+#else
+ _as->loadPtr(addressForArgument(0), Assembler::ReturnValueRegister);
+ _as->copyValue(Address(Assembler::ReturnValueRegister, 0), t);
+#endif
+ return;
+ }
+ Q_UNIMPLEMENTED();
+ Q_UNUSED(s);
+}
+
+int InstructionSelection::prepareVariableArguments(V4IR::ExprList* args)
+{
+ int argc = 0;
+ for (V4IR::ExprList *it = args; it; it = it->next) {
+ ++argc;
+ }
+
+ int i = 0;
+ for (V4IR::ExprList *it = args; it; it = it->next, ++i) {
+// V4IR::Temp *arg = it->expr->asTemp();
+// assert(arg != 0);
+ _as->copyValue(argumentAddressForCall(i), it->expr);
+ }
+
+ return argc;
+}
+
+void InstructionSelection::callRuntimeMethodImp(V4IR::Temp *result, const char* name, ActivationMethod method, V4IR::Expr *base, V4IR::ExprList *args)
+{
+ V4IR::Name *baseName = base->asName();
+ assert(baseName != 0);
+
+ int argc = prepareVariableArguments(args);
+ _as->generateFunctionCallImp(Assembler::Void, name, method, Assembler::ContextRegister, Assembler::PointerToValue(result),
+ identifier(*baseName->id), baseAddressForCallArguments(),
+ Assembler::TrustedImm32(argc));
+}
+
+
+uint InstructionSelection::addLookup(VM::String *name)
+{
+ uint index = (uint)_lookups.size();
+ VM::Lookup l;
+ l.lookupProperty = Lookup::lookupPropertyGeneric;
+ for (int i = 0; i < Lookup::Size; ++i)
+ l.classList[i] = 0;
+ l.level = -1;
+ l.index = UINT_MAX;
+ l.name = name;
+ _lookups.append(l);
+ return index;
+}
+
+uint InstructionSelection::addGlobalLookup(VM::String *name)
+{
+ uint index = (uint)_lookups.size();
+ VM::Lookup l;
+ l.lookupGlobal = Lookup::lookupGlobalGeneric;
+ for (int i = 0; i < Lookup::Size; ++i)
+ l.classList[i] = 0;
+ l.level = -1;
+ l.index = UINT_MAX;
+ l.name = name;
+ _lookups.append(l);
+ return index;
+}
diff --git a/src/qml/qml/v4vm/qv4isel_masm_p.h b/src/qml/qml/v4vm/qv4isel_masm_p.h
new file mode 100644
index 0000000000..9f8704725f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_masm_p.h
@@ -0,0 +1,894 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ISEL_MASM_P_H
+#define QV4ISEL_MASM_P_H
+
+#include "qv4global.h"
+#include "qv4jsir_p.h"
+#include "qv4isel_p.h"
+#include "qv4isel_util_p.h"
+#include "qv4object.h"
+#include "qv4runtime.h"
+
+#include <QtCore/QHash>
+#include <config.h>
+#include <wtf/Vector.h>
+#include <assembler/MacroAssembler.h>
+
+namespace QQmlJS {
+namespace MASM {
+
+class Assembler : public JSC::MacroAssembler
+{
+public:
+ Assembler(V4IR::Function* function, VM::Function *vmFunction, VM::ExecutionEngine *engine);
+#if CPU(X86)
+
+#undef VALUE_FITS_IN_REGISTER
+#undef ARGUMENTS_IN_REGISTERS
+#define HAVE_ALU_OPS_WITH_MEM_OPERAND 1
+
+ static const RegisterID StackFrameRegister = JSC::X86Registers::ebp;
+ static const RegisterID StackPointerRegister = JSC::X86Registers::esp;
+ static const RegisterID LocalsRegister = JSC::X86Registers::edi;
+ static const RegisterID ContextRegister = JSC::X86Registers::esi;
+ static const RegisterID ReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID ScratchRegister = JSC::X86Registers::ecx;
+ static const RegisterID IntegerOpRegister = JSC::X86Registers::eax;
+ static const FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
+
+ static const int RegisterSize = 4;
+
+ static const int RegisterArgumentCount = 0;
+ static RegisterID registerForArgument(int)
+ {
+ assert(false);
+ // Not reached.
+ return JSC::X86Registers::eax;
+ }
+
+ inline void platformEnterStandardStackFrame() {}
+ inline void platformLeaveStandardStackFrame() {}
+#elif CPU(X86_64)
+
+#define VALUE_FITS_IN_REGISTER
+#define ARGUMENTS_IN_REGISTERS
+#define HAVE_ALU_OPS_WITH_MEM_OPERAND 1
+
+ static const RegisterID StackFrameRegister = JSC::X86Registers::ebp;
+ static const RegisterID StackPointerRegister = JSC::X86Registers::esp;
+ static const RegisterID LocalsRegister = JSC::X86Registers::r12;
+ static const RegisterID ContextRegister = JSC::X86Registers::r14;
+ static const RegisterID ReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID ScratchRegister = JSC::X86Registers::r10;
+ static const RegisterID IntegerOpRegister = JSC::X86Registers::eax;
+ static const FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
+
+ static const int RegisterSize = 8;
+
+ static const int RegisterArgumentCount = 6;
+ static RegisterID registerForArgument(int index)
+ {
+ static RegisterID regs[RegisterArgumentCount] = {
+ JSC::X86Registers::edi,
+ JSC::X86Registers::esi,
+ JSC::X86Registers::edx,
+ JSC::X86Registers::ecx,
+ JSC::X86Registers::r8,
+ JSC::X86Registers::r9
+ };
+ assert(index >= 0 && index < RegisterArgumentCount);
+ return regs[index];
+ };
+ inline void platformEnterStandardStackFrame() {}
+ inline void platformLeaveStandardStackFrame() {}
+#elif CPU(ARM)
+
+#undef VALUE_FITS_IN_REGISTER
+#define ARGUMENTS_IN_REGISTERS
+#undef HAVE_ALU_OPS_WITH_MEM_OPERAND
+
+ static const RegisterID StackFrameRegister = JSC::ARMRegisters::r4;
+ static const RegisterID StackPointerRegister = JSC::ARMRegisters::sp;
+ static const RegisterID LocalsRegister = JSC::ARMRegisters::r7;
+ static const RegisterID ContextRegister = JSC::ARMRegisters::r5;
+ static const RegisterID ReturnValueRegister = JSC::ARMRegisters::r0;
+ static const RegisterID ScratchRegister = JSC::ARMRegisters::r6;
+ static const RegisterID IntegerOpRegister = JSC::ARMRegisters::r0;
+ static const FPRegisterID FPGpr0 = JSC::ARMRegisters::d0;
+
+ static const int RegisterSize = 4;
+
+ static const RegisterID RegisterArgument1 = JSC::ARMRegisters::r0;
+ static const RegisterID RegisterArgument2 = JSC::ARMRegisters::r1;
+ static const RegisterID RegisterArgument3 = JSC::ARMRegisters::r2;
+ static const RegisterID RegisterArgument4 = JSC::ARMRegisters::r3;
+
+ static const int RegisterArgumentCount = 4;
+ static RegisterID registerForArgument(int index)
+ {
+ assert(index >= 0 && index < RegisterArgumentCount);
+ return static_cast<RegisterID>(JSC::ARMRegisters::r0 + index);
+ };
+ inline void platformEnterStandardStackFrame()
+ {
+ // Move the register arguments onto the stack as if they were
+ // pushed by the caller, just like on ia32. This gives us consistent
+ // access to the parameters if we need to.
+ push(JSC::ARMRegisters::r3);
+ push(JSC::ARMRegisters::r2);
+ push(JSC::ARMRegisters::r1);
+ push(JSC::ARMRegisters::r0);
+ push(JSC::ARMRegisters::lr);
+ }
+ inline void platformLeaveStandardStackFrame()
+ {
+ pop(JSC::ARMRegisters::lr);
+ pop(JSC::ARMRegisters::r0);
+ pop(JSC::ARMRegisters::r1);
+ pop(JSC::ARMRegisters::r2);
+ pop(JSC::ARMRegisters::r3);
+ }
+#else
+#error Argh.
+#endif
+ static const int calleeSavedRegisterCount;
+
+ // Explicit type to allow distinguishing between
+ // pushing an address itself or the value it points
+ // to onto the stack when calling functions.
+ struct Pointer : public Address
+ {
+ explicit Pointer(const Address& addr)
+ : Address(addr)
+ {}
+ explicit Pointer(RegisterID reg, int32_t offset)
+ : Address(reg, offset)
+ {}
+ };
+
+ struct VoidType { VoidType() {} };
+ static const VoidType Void;
+
+
+ typedef JSC::FunctionPtr FunctionPtr;
+
+ struct CallToLink {
+ Call call;
+ FunctionPtr externalFunction;
+ const char* functionName;
+ };
+ struct PointerToValue {
+ PointerToValue(V4IR::Temp *value) : value(value) {}
+ V4IR::Temp *value;
+ };
+ struct Reference {
+ Reference(V4IR::Temp *value) : value(value) {}
+ V4IR::Temp *value;
+ };
+
+ struct ReentryBlock {
+ ReentryBlock(V4IR::BasicBlock *b) : block(b) {}
+ V4IR::BasicBlock *block;
+ };
+
+ void callAbsolute(const char* functionName, FunctionPtr function) {
+ CallToLink ctl;
+ ctl.call = call();
+ ctl.externalFunction = function;
+ ctl.functionName = functionName;
+ _callsToLink.append(ctl);
+ }
+
+ void registerBlock(V4IR::BasicBlock*);
+ void jumpToBlock(V4IR::BasicBlock* current, V4IR::BasicBlock *target);
+ void addPatch(V4IR::BasicBlock* targetBlock, Jump targetJump);
+ void addPatch(DataLabelPtr patch, Label target);
+ void addPatch(DataLabelPtr patch, V4IR::BasicBlock *target);
+
+ Pointer loadTempAddress(RegisterID reg, V4IR::Temp *t);
+
+ void loadArgument(RegisterID source, RegisterID dest)
+ {
+ move(source, dest);
+ }
+
+ void loadArgument(TrustedImmPtr ptr, RegisterID dest)
+ {
+ move(TrustedImmPtr(ptr), dest);
+ }
+
+ void loadArgument(const Pointer& ptr, RegisterID dest)
+ {
+ addPtr(TrustedImm32(ptr.offset), ptr.base, dest);
+ }
+
+ void loadArgument(PointerToValue temp, RegisterID dest)
+ {
+ if (!temp.value) {
+ loadArgument(TrustedImmPtr(0), dest);
+ } else {
+ Pointer addr = loadTempAddress(dest, temp.value);
+ loadArgument(addr, dest);
+ }
+ }
+
+ void loadArgument(Reference temp, RegisterID dest)
+ {
+ assert(temp.value);
+ Pointer addr = loadTempAddress(dest, temp.value);
+ loadArgument(addr, dest);
+ }
+
+ void loadArgument(ReentryBlock block, RegisterID dest)
+ {
+ assert(block.block);
+ DataLabelPtr patch = moveWithPatch(TrustedImmPtr(0), dest);
+ addPatch(patch, block.block);
+ }
+
+#ifdef VALUE_FITS_IN_REGISTER
+ void loadArgument(V4IR::Temp* temp, RegisterID dest)
+ {
+ if (!temp) {
+ VM::Value undefined = VM::Value::undefinedValue();
+ move(TrustedImm64(undefined.val), dest);
+ } else {
+ Pointer addr = loadTempAddress(dest, temp);
+ load64(addr, dest);
+ }
+ }
+
+ void loadArgument(V4IR::Const* c, RegisterID dest)
+ {
+ VM::Value v = convertToValue(c);
+ move(TrustedImm64(v.val), dest);
+ }
+
+ void loadArgument(V4IR::Expr* expr, RegisterID dest)
+ {
+ if (!expr) {
+ VM::Value undefined = VM::Value::undefinedValue();
+ move(TrustedImm64(undefined.val), dest);
+ } else if (expr->asTemp()){
+ loadArgument(expr->asTemp(), dest);
+ } else if (expr->asConst()) {
+ loadArgument(expr->asConst(), dest);
+ } else {
+ assert(!"unimplemented expression type in loadArgument");
+ }
+ }
+#else
+ void loadArgument(V4IR::Expr*, RegisterID)
+ {
+ assert(!"unimplemented: expression in loadArgument");
+ }
+#endif
+
+ void loadArgument(VM::String* string, RegisterID dest)
+ {
+ loadArgument(TrustedImmPtr(string), dest);
+ }
+
+ void loadArgument(TrustedImm32 imm32, RegisterID dest)
+ {
+ xorPtr(dest, dest);
+ if (imm32.m_value)
+ move(imm32, dest);
+ }
+
+ void storeArgument(RegisterID src, V4IR::Temp *temp)
+ {
+ if (temp) {
+ Pointer addr = loadTempAddress(ScratchRegister, temp);
+#ifdef VALUE_FITS_IN_REGISTER
+ store64(src, addr);
+#else
+ // If the value doesn't fit into a register, then the
+ // register contains the address to where the argument
+ // (return value) is stored. Copy it from there.
+ copyValue(addr, Pointer(src, 0));
+#endif
+ }
+ }
+
+#ifdef VALUE_FITS_IN_REGISTER
+ void storeArgument(RegisterID src, const Pointer &dest)
+ {
+ store64(src, dest);
+ }
+#endif
+
+ void storeArgument(RegisterID src, RegisterID dest)
+ {
+ move(src, dest);
+ }
+
+ void storeArgument(RegisterID, VoidType)
+ {
+ }
+
+ using JSC::MacroAssembler::push;
+
+ void push(const Pointer& ptr)
+ {
+ addPtr(TrustedImm32(ptr.offset), ptr.base, ScratchRegister);
+ push(ScratchRegister);
+ }
+
+ void push(VM::Value value)
+ {
+#ifdef VALUE_FITS_IN_REGISTER
+ move(TrustedImm64(value.val), ScratchRegister);
+ push(ScratchRegister);
+#else
+ move(TrustedImm32(value.tag), ScratchRegister);
+ push(ScratchRegister);
+ move(TrustedImm32(value.int_32), ScratchRegister);
+ push(ScratchRegister);
+#endif
+ }
+
+ void push(PointerToValue temp)
+ {
+ if (temp.value) {
+ Pointer ptr = loadTempAddress(ScratchRegister, temp.value);
+ push(ptr);
+ } else {
+ push(TrustedImmPtr(0));
+ }
+ }
+
+ void push(Reference temp)
+ {
+ assert (temp.value);
+
+ Pointer ptr = loadTempAddress(ScratchRegister, temp.value);
+ push(ptr);
+ }
+
+ void push(ReentryBlock block)
+ {
+ assert(block.block);
+ DataLabelPtr patch = moveWithPatch(TrustedImmPtr(0), ScratchRegister);
+ push(ScratchRegister);
+ addPatch(patch, block.block);
+ }
+
+ void push(V4IR::Temp* temp)
+ {
+ if (temp) {
+ Address addr = loadTempAddress(ScratchRegister, temp);
+ addr.offset += 4;
+ push(addr);
+ addr.offset -= 4;
+ push(addr);
+ } else {
+ VM::Value undefined = VM::Value::undefinedValue();
+ push(undefined);
+ }
+ }
+
+ void push(V4IR::Const* c)
+ {
+ VM::Value v = convertToValue(c);
+ push(v);
+ }
+
+ void push(V4IR::Expr* e)
+ {
+ if (!e) {
+ VM::Value undefined = VM::Value::undefinedValue();
+ push(undefined);
+ } else if (V4IR::Const *c = e->asConst())
+ push(c);
+ else if (V4IR::Temp *t = e->asTemp()) {
+ push(t);
+ } else {
+ assert(!"Trying to push an expression that is not a Temp or Const");
+ }
+ }
+
+ void push(TrustedImmPtr ptr)
+ {
+ move(TrustedImmPtr(ptr), ScratchRegister);
+ push(ScratchRegister);
+ }
+
+ void push(VM::String* name)
+ {
+ push(TrustedImmPtr(name));
+ }
+
+ using JSC::MacroAssembler::loadDouble;
+ void loadDouble(V4IR::Temp* temp, FPRegisterID dest)
+ {
+ Pointer ptr = loadTempAddress(ScratchRegister, temp);
+ loadDouble(ptr, dest);
+ }
+
+ using JSC::MacroAssembler::storeDouble;
+ void storeDouble(FPRegisterID source, V4IR::Temp* temp)
+ {
+ Pointer ptr = loadTempAddress(ScratchRegister, temp);
+ storeDouble(source, ptr);
+ }
+
+ template <typename Result, typename Source>
+ void copyValue(Result result, Source source);
+ template <typename Result>
+ void copyValue(Result result, V4IR::Expr* source);
+
+ void storeValue(VM::Value value, Address destination)
+ {
+#ifdef VALUE_FITS_IN_REGISTER
+ store64(TrustedImm64(value.val), destination);
+#else
+ store32(TrustedImm32(value.int_32), destination);
+ destination.offset += 4;
+ store32(TrustedImm32(value.tag), destination);
+#endif
+ }
+
+ void storeValue(VM::Value value, V4IR::Temp* temp);
+
+ void enterStandardStackFrame(int locals);
+ void leaveStandardStackFrame(int locals);
+
+ static inline int sizeOfArgument(VoidType)
+ { return 0; }
+ static inline int sizeOfArgument(RegisterID)
+ { return RegisterSize; }
+ static inline int sizeOfArgument(V4IR::Temp*)
+ { return 8; } // Size of value
+ static inline int sizeOfArgument(V4IR::Expr*)
+ { return 8; } // Size of value
+ static inline int sizeOfArgument(const Pointer&)
+ { return sizeof(void*); }
+ static inline int sizeOfArgument(VM::String*)
+ { return sizeof(VM::String*); }
+ static inline int sizeOfArgument(const PointerToValue &)
+ { return sizeof(void *); }
+ static inline int sizeOfArgument(const Reference &)
+ { return sizeof(void *); }
+ static inline int sizeOfArgument(const ReentryBlock &)
+ { return sizeof(void *); }
+ static inline int sizeOfArgument(TrustedImmPtr)
+ { return sizeof(void*); }
+ static inline int sizeOfArgument(TrustedImm32)
+ { return 4; }
+
+ template <int argumentNumber, typename T>
+ int loadArgumentOnStackOrRegister(const T &value)
+ {
+ if (argumentNumber < RegisterArgumentCount) {
+ loadArgument(value, registerForArgument(argumentNumber));
+ return 0;
+ } else {
+ push(value);
+ return sizeOfArgument(value);
+ }
+ }
+
+ template <int argumentNumber>
+ int loadArgumentOnStackOrRegister(const VoidType &value)
+ {
+ Q_UNUSED(value);
+ return 0;
+ }
+
+ template <typename ArgRet, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, Arg5 arg5, Arg6 arg6)
+ {
+ int totalNumberOfArgs = 6;
+
+ // If necessary reserve space for the return value on the stack and
+ // pass the pointer to it as the first hidden parameter.
+ bool returnValueOnStack = false;
+ int sizeOfReturnValueOnStack = sizeOfArgument(r);
+ if (sizeOfReturnValueOnStack > RegisterSize) {
+ sub32(TrustedImm32(sizeOfReturnValueOnStack), StackPointerRegister);
+ ++totalNumberOfArgs;
+ returnValueOnStack = true;
+ }
+
+ int stackSpaceUsedForArgs = 0;
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<5>(arg6);
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<4>(arg5);
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<3>(arg4);
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<2>(arg3);
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<1>(arg2);
+ stackSpaceUsedForArgs += loadArgumentOnStackOrRegister<0>(arg1);
+
+ if (returnValueOnStack) {
+ // Load address of return value
+ push(Pointer(StackPointerRegister, stackSpaceUsedForArgs));
+ }
+
+ callAbsolute(functionName, function);
+
+ int stackSizeToCorrect = stackSpaceUsedForArgs;
+ if (returnValueOnStack)
+ stackSizeToCorrect += sizeOfReturnValueOnStack;
+
+ storeArgument(ReturnValueRegister, r);
+
+ if (stackSizeToCorrect)
+ add32(TrustedImm32(stackSizeToCorrect), StackPointerRegister);
+ }
+
+ template <typename ArgRet, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, Arg5 arg5)
+ {
+ generateFunctionCallImp(r, functionName, function, arg1, arg2, arg3, arg4, arg5, VoidType());
+ }
+
+ template <typename ArgRet, typename Arg1, typename Arg2, typename Arg3, typename Arg4>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4)
+ {
+ generateFunctionCallImp(r, functionName, function, arg1, arg2, arg3, arg4, VoidType());
+ }
+
+ template <typename ArgRet, typename Arg1, typename Arg2, typename Arg3>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1, Arg2 arg2, Arg3 arg3)
+ {
+ generateFunctionCallImp(r, functionName, function, arg1, arg2, arg3, VoidType(), VoidType());
+ }
+
+ template <typename ArgRet, typename Arg1, typename Arg2>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1, Arg2 arg2)
+ {
+ generateFunctionCallImp(r, functionName, function, arg1, arg2, VoidType(), VoidType(), VoidType());
+ }
+
+ template <typename ArgRet, typename Arg1>
+ void generateFunctionCallImp(ArgRet r, const char* functionName, FunctionPtr function, Arg1 arg1)
+ {
+ generateFunctionCallImp(r, functionName, function, arg1, VoidType(), VoidType(), VoidType(), VoidType());
+ }
+
+ typedef Jump (Assembler::*MemRegBinOp)(Address, RegisterID);
+ typedef Jump (Assembler::*ImmRegBinOp)(TrustedImm32, RegisterID);
+
+ struct BinaryOperationInfo {
+ const char *name;
+ VM::BinOp fallbackImplementation;
+ MemRegBinOp inlineMemRegOp;
+ ImmRegBinOp inlineImmRegOp;
+ };
+
+ static const BinaryOperationInfo binaryOperations[QQmlJS::V4IR::LastAluOp + 1];
+
+ void generateBinOp(V4IR::AluOp operation, V4IR::Temp* target, V4IR::Temp* left, V4IR::Temp* right);
+
+ Jump inline_add32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return branchAdd32(Overflow, addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ return branchAdd32(Overflow, ScratchRegister, reg);
+#endif
+ }
+
+ Jump inline_add32(TrustedImm32 imm, RegisterID reg)
+ {
+ return branchAdd32(Overflow, imm, reg);
+ }
+
+ Jump inline_sub32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return branchSub32(Overflow, addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ return branchSub32(Overflow, ScratchRegister, reg);
+#endif
+ }
+
+ Jump inline_sub32(TrustedImm32 imm, RegisterID reg)
+ {
+ return branchSub32(Overflow, imm, reg);
+ }
+
+ Jump inline_mul32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ return branchMul32(Overflow, addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ return branchMul32(Overflow, ScratchRegister, reg);
+#endif
+ }
+
+ Jump inline_mul32(TrustedImm32 imm, RegisterID reg)
+ {
+ return branchMul32(Overflow, imm, reg, reg);
+ }
+
+ Jump inline_shl32(Address addr, RegisterID reg)
+ {
+ load32(addr, ScratchRegister);
+ and32(TrustedImm32(0x1f), ScratchRegister);
+ lshift32(ScratchRegister, reg);
+ return Jump();
+ }
+
+ Jump inline_shl32(TrustedImm32 imm, RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ lshift32(imm, reg);
+ return Jump();
+ }
+
+ Jump inline_shr32(Address addr, RegisterID reg)
+ {
+ load32(addr, ScratchRegister);
+ and32(TrustedImm32(0x1f), ScratchRegister);
+ rshift32(ScratchRegister, reg);
+ return Jump();
+ }
+
+ Jump inline_shr32(TrustedImm32 imm, RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ rshift32(imm, reg);
+ return Jump();
+ }
+
+ Jump inline_ushr32(Address addr, RegisterID reg)
+ {
+ load32(addr, ScratchRegister);
+ and32(TrustedImm32(0x1f), ScratchRegister);
+ urshift32(ScratchRegister, reg);
+ return branchTest32(Signed, reg, reg);
+ }
+
+ Jump inline_ushr32(TrustedImm32 imm, RegisterID reg)
+ {
+ imm.m_value &= 0x1f;
+ urshift32(imm, reg);
+ return branchTest32(Signed, reg, reg);
+ }
+
+ Jump inline_and32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ and32(addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ and32(ScratchRegister, reg);
+#endif
+ return Jump();
+ }
+
+ Jump inline_and32(TrustedImm32 imm, RegisterID reg)
+ {
+ and32(imm, reg);
+ return Jump();
+ }
+
+ Jump inline_or32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ or32(addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ or32(ScratchRegister, reg);
+#endif
+ return Jump();
+ }
+
+ Jump inline_or32(TrustedImm32 imm, RegisterID reg)
+ {
+ or32(imm, reg);
+ return Jump();
+ }
+
+ Jump inline_xor32(Address addr, RegisterID reg)
+ {
+#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
+ xor32(addr, reg);
+#else
+ load32(addr, ScratchRegister);
+ xor32(ScratchRegister, reg);
+#endif
+ return Jump();
+ }
+
+ Jump inline_xor32(TrustedImm32 imm, RegisterID reg)
+ {
+ xor32(imm, reg);
+ return Jump();
+ }
+
+ void link(VM::Function *vmFunc);
+
+private:
+ V4IR::Function *_function;
+ VM::Function *_vmFunction;
+ QHash<V4IR::BasicBlock *, Label> _addrs;
+ QHash<V4IR::BasicBlock *, QVector<Jump> > _patches;
+ QList<CallToLink> _callsToLink;
+
+ struct DataLabelPatch {
+ DataLabelPtr dataLabel;
+ Label target;
+ };
+ QList<DataLabelPatch> _dataLabelPatches;
+
+ QHash<V4IR::BasicBlock *, QVector<DataLabelPtr> > _labelPatches;
+
+ VM::ExecutionEngine *_engine;
+};
+
+class Q_V4_EXPORT InstructionSelection:
+ protected V4IR::InstructionSelection,
+ public EvalInstructionSelection
+{
+public:
+ InstructionSelection(VM::ExecutionEngine *engine, V4IR::Module *module);
+ ~InstructionSelection();
+
+ virtual void run(VM::Function *vmFunction, V4IR::Function *function);
+
+protected:
+ virtual void callBuiltinInvalid(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callBuiltinTypeofMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinTypeofSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinTypeofName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinTypeofValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinDeleteMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinDeleteSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinDeleteName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinDeleteValue(V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostDecrementValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementName(const QString &name, V4IR::Temp *result);
+ virtual void callBuiltinPostIncrementValue(V4IR::Temp *value, V4IR::Temp *result);
+ virtual void callBuiltinThrow(V4IR::Temp *arg);
+ virtual void callBuiltinFinishTry();
+ virtual void callBuiltinForeachIteratorObject(V4IR::Temp *arg, V4IR::Temp *result);
+ virtual void callBuiltinForeachNextPropertyname(V4IR::Temp *arg, V4IR::Temp *result);
+ virtual void callBuiltinPushWithScope(V4IR::Temp *arg);
+ virtual void callBuiltinPopScope();
+ virtual void callBuiltinDeclareVar(bool deletable, const QString &name);
+ virtual void callBuiltinDefineGetterSetter(V4IR::Temp *object, const QString &name, V4IR::Temp *getter, V4IR::Temp *setter);
+ virtual void callBuiltinDefineProperty(V4IR::Temp *object, const QString &name, V4IR::Temp *value);
+ virtual void callBuiltinDefineArray(V4IR::Temp *result, V4IR::ExprList *args);
+ virtual void callProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void callValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void loadThisObject(V4IR::Temp *temp);
+ virtual void loadConst(V4IR::Const *sourceConst, V4IR::Temp *targetTemp);
+ virtual void loadString(const QString &str, V4IR::Temp *targetTemp);
+ virtual void loadRegexp(V4IR::RegExp *sourceRegexp, V4IR::Temp *targetTemp);
+ virtual void getActivationProperty(const V4IR::Name *name, V4IR::Temp *temp);
+ virtual void setActivationProperty(V4IR::Temp *source, const QString &targetName);
+ virtual void initClosure(V4IR::Closure *closure, V4IR::Temp *target);
+ virtual void getProperty(V4IR::Temp *base, const QString &name, V4IR::Temp *target);
+ virtual void setProperty(V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName);
+ virtual void getElement(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *target);
+ virtual void setElement(V4IR::Temp *source, V4IR::Temp *targetBase, V4IR::Temp *targetIndex);
+ virtual void copyValue(V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp);
+ virtual void unop(V4IR::AluOp oper, V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp);
+ virtual void binop(V4IR::AluOp oper, V4IR::Temp *leftSource, V4IR::Temp *rightSource, V4IR::Temp *target);
+ virtual void inplaceNameOp(V4IR::AluOp oper, V4IR::Temp *rightSource, const QString &targetName);
+ virtual void inplaceElementOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBaseTemp, V4IR::Temp *targetIndexTemp);
+ virtual void inplaceMemberOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName);
+
+ typedef Assembler::Address Address;
+ typedef Assembler::Pointer Pointer;
+
+ Address addressForArgument(int index) const
+ {
+ // StackFrameRegister points to its old value on the stack, and above
+ // it we have the return address, hence the need to step over two
+ // values before reaching the first argument.
+ return Address(Assembler::StackFrameRegister, (index + 2) * sizeof(void*));
+ }
+
+ // Some run-time functions take (Value* args, int argc). This function is for populating
+ // the args.
+ Pointer argumentAddressForCall(int argument)
+ {
+ const int index = _function->maxNumberOfArguments - argument;
+ return Pointer(Assembler::LocalsRegister, sizeof(VM::Value) * (-index)
+ - sizeof(void*) // size of ebp
+ - sizeof(void*) * Assembler::calleeSavedRegisterCount
+ );
+ }
+ Pointer baseAddressForCallArguments()
+ {
+ return argumentAddressForCall(0);
+ }
+
+ VM::String *identifier(const QString &s);
+ virtual void constructActivationProperty(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void constructProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result);
+ virtual void constructValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result);
+
+ virtual void visitJump(V4IR::Jump *);
+ virtual void visitCJump(V4IR::CJump *);
+ virtual void visitRet(V4IR::Ret *);
+ virtual void visitTry(V4IR::Try *);
+
+private:
+ #define isel_stringIfyx(s) #s
+ #define isel_stringIfy(s) isel_stringIfyx(s)
+
+ #define generateFunctionCall(t, function, ...) \
+ _as->generateFunctionCallImp(t, isel_stringIfy(function), function, __VA_ARGS__)
+
+ int prepareVariableArguments(V4IR::ExprList* args);
+
+ typedef void (*ActivationMethod)(VM::ExecutionContext *, VM::Value *result, VM::String *name, VM::Value *args, int argc);
+ void callRuntimeMethodImp(V4IR::Temp *result, const char* name, ActivationMethod method, V4IR::Expr *base, V4IR::ExprList *args);
+#define callRuntimeMethod(result, function, ...) \
+ callRuntimeMethodImp(result, isel_stringIfy(function), function, __VA_ARGS__)
+
+ uint addLookup(VM::String *name);
+ uint addGlobalLookup(VM::String *name);
+
+ V4IR::BasicBlock *_block;
+ V4IR::Function* _function;
+ VM::Function* _vmFunction;
+ QVector<VM::Lookup> _lookups;
+ Assembler* _as;
+ QSet<V4IR::BasicBlock*> _reentryBlocks;
+};
+
+class Q_V4_EXPORT ISelFactory: public EvalISelFactory
+{
+public:
+ virtual ~ISelFactory() {}
+ virtual EvalInstructionSelection *create(VM::ExecutionEngine *engine, V4IR::Module *module)
+ { return new InstructionSelection(engine, module); }
+};
+
+} // end of namespace MASM
+} // end of namespace QQmlJS
+
+#endif // QV4ISEL_MASM_P_H
diff --git a/src/qml/qml/v4vm/qv4isel_p.cpp b/src/qml/qml/v4vm/qv4isel_p.cpp
new file mode 100644
index 0000000000..417e38550e
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_p.cpp
@@ -0,0 +1,398 @@
+#include "debugging.h"
+#include "qv4engine.h"
+#include "qv4jsir_p.h"
+#include "qv4isel_p.h"
+#include "qv4isel_util_p.h"
+#include "qv4functionobject.h"
+
+#include <QString>
+
+#include <cassert>
+
+namespace {
+QTextStream qout(stderr, QIODevice::WriteOnly);
+} // anonymous namespace
+
+using namespace QQmlJS;
+using namespace QQmlJS::V4IR;
+
+EvalInstructionSelection::EvalInstructionSelection(VM::ExecutionEngine *engine, Module *module)
+ : _engine(engine)
+ , useFastLookups(true)
+{
+ assert(engine);
+ assert(module);
+
+ createFunctionMapping(0, module->rootFunction);
+ foreach (V4IR::Function *f, module->functions) {
+ assert(_irToVM.contains(f));
+ }
+}
+
+EvalInstructionSelection::~EvalInstructionSelection()
+{}
+
+EvalISelFactory::~EvalISelFactory()
+{}
+
+VM::Function *EvalInstructionSelection::createFunctionMapping(VM::Function *outer, Function *irFunction)
+{
+ VM::Function *vmFunction = _engine->newFunction(irFunction->name ? *irFunction->name : QString());
+ _irToVM.insert(irFunction, vmFunction);
+
+ vmFunction->hasDirectEval = irFunction->hasDirectEval;
+ vmFunction->usesArgumentsObject = irFunction->usesArgumentsObject;
+ vmFunction->hasNestedFunctions = !irFunction->nestedFunctions.isEmpty();
+ vmFunction->isStrict = irFunction->isStrict;
+ vmFunction->outer = outer;
+ vmFunction->isNamedExpression = irFunction->isNamedExpression;
+
+ if (outer)
+ outer->nestedFunctions.append(vmFunction);
+
+ foreach (const QString *formal, irFunction->formals)
+ if (formal)
+ vmFunction->formals.append(_engine->newString(*formal));
+ foreach (const QString *local, irFunction->locals)
+ if (local)
+ vmFunction->locals.append(_engine->newString(*local));
+
+ foreach (V4IR::Function *function, irFunction->nestedFunctions)
+ createFunctionMapping(vmFunction, function);
+
+ if (_engine->debugger)
+ _engine->debugger->mapFunction(vmFunction, irFunction);
+
+ return vmFunction;
+}
+
+VM::Function *EvalInstructionSelection::vmFunction(Function *f) {
+ VM::Function *function = _irToVM[f];
+ if (!function->code)
+ run(function, f);
+ return function;
+}
+
+void InstructionSelection::visitMove(V4IR::Move *s)
+{
+ if (s->op == V4IR::OpInvalid) {
+ if (V4IR::Name *n = s->target->asName()) {
+ if (s->source->asTemp()) {
+ setActivationProperty(s->source->asTemp(), *n->id);
+ return;
+ }
+ } else if (V4IR::Temp *t = s->target->asTemp()) {
+ if (V4IR::Name *n = s->source->asName()) {
+ if (*n->id == QStringLiteral("this")) // TODO: `this' should be a builtin.
+ loadThisObject(t);
+ else
+ getActivationProperty(n, t);
+ return;
+ } else if (V4IR::Const *c = s->source->asConst()) {
+ loadConst(c, t);
+ return;
+ } else if (V4IR::Temp *t2 = s->source->asTemp()) {
+ copyValue(t2, t);
+ return;
+ } else if (V4IR::String *str = s->source->asString()) {
+ loadString(*str->value, t);
+ return;
+ } else if (V4IR::RegExp *re = s->source->asRegExp()) {
+ loadRegexp(re, t);
+ return;
+ } else if (V4IR::Closure *clos = s->source->asClosure()) {
+ initClosure(clos, t);
+ return;
+ } else if (V4IR::New *ctor = s->source->asNew()) {
+ if (Name *func = ctor->base->asName()) {
+ constructActivationProperty(func, ctor->args, t);
+ return;
+ } else if (V4IR::Member *member = ctor->base->asMember()) {
+ constructProperty(member->base->asTemp(), *member->name, ctor->args, t);
+ return;
+ } else if (V4IR::Temp *value = ctor->base->asTemp()) {
+ constructValue(value, ctor->args, t);
+ return;
+ }
+ } else if (V4IR::Member *m = s->source->asMember()) {
+ if (V4IR::Temp *base = m->base->asTemp()) {
+ getProperty(base, *m->name, t);
+ return;
+ }
+ } else if (V4IR::Subscript *ss = s->source->asSubscript()) {
+ getElement(ss->base->asTemp(), ss->index->asTemp(), t);
+ return;
+ } else if (V4IR::Unop *u = s->source->asUnop()) {
+ if (V4IR::Temp *e = u->expr->asTemp()) {
+ unop(u->op, e, t);
+ return;
+ }
+ } else if (V4IR::Binop *b = s->source->asBinop()) {
+ if (b->left->asTemp() && b->right->asTemp()) {
+ binop(b->op, b->left->asTemp(), b->right->asTemp(), t);
+ return;
+ }
+ } else if (V4IR::Call *c = s->source->asCall()) {
+ if (c->base->asName()) {
+ callBuiltin(c, t);
+ return;
+ } else if (Member *member = c->base->asMember()) {
+ callProperty(member->base, *member->name, c->args, t);
+ return;
+ } else if (Subscript *s = c->base->asSubscript()) {
+ callSubscript(s->base, s->index, c->args, t);
+ return;
+ } else if (V4IR::Temp *value = c->base->asTemp()) {
+ callValue(value, c->args, t);
+ return;
+ }
+ }
+ } else if (V4IR::Member *m = s->target->asMember()) {
+ if (V4IR::Temp *base = m->base->asTemp()) {
+ if (s->source->asTemp()) {
+ setProperty(s->source->asTemp(), base, *m->name);
+ return;
+ }
+ }
+ } else if (V4IR::Subscript *ss = s->target->asSubscript()) {
+ if (s->source->asTemp()) {
+ setElement(s->source->asTemp(), ss->base->asTemp(), ss->index->asTemp());
+ return;
+ }
+ }
+ } else {
+ // inplace assignment, e.g. x += 1, ++x, ...
+ if (V4IR::Temp *t = s->target->asTemp()) {
+ if (s->source->asTemp()) {
+ binop(s->op, t, s->source->asTemp(), t);
+ return;
+ }
+ } else if (V4IR::Name *n = s->target->asName()) {
+ if (s->source->asTemp()) {
+ inplaceNameOp(s->op, s->source->asTemp(), *n->id);
+ return;
+ }
+ } else if (V4IR::Subscript *ss = s->target->asSubscript()) {
+ if (s->source->asTemp()) {
+ inplaceElementOp(s->op, s->source->asTemp(), ss->base->asTemp(),
+ ss->index->asTemp());
+ return;
+ }
+ } else if (V4IR::Member *m = s->target->asMember()) {
+ if (s->source->asTemp()) {
+ inplaceMemberOp(s->op, s->source->asTemp(), m->base->asTemp(), *m->name);
+ return;
+ }
+ }
+ }
+
+ // For anything else...:
+ Q_UNIMPLEMENTED();
+ s->dump(qout, V4IR::Stmt::MIR);
+ qout << endl;
+ assert(!"TODO");
+}
+
+InstructionSelection::~InstructionSelection()
+{
+}
+
+void InstructionSelection::visitEnter(Enter *)
+{
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::visitLeave(Leave *)
+{
+ Q_UNREACHABLE();
+}
+
+void InstructionSelection::visitExp(V4IR::Exp *s)
+{
+ if (V4IR::Call *c = s->expr->asCall()) {
+ // These are calls where the result is ignored.
+ if (c->base->asName()) {
+ callBuiltin(c, 0);
+ } else if (Temp *value = c->base->asTemp()) {
+ callValue(value, c->args, 0);
+ } else if (Member *member = c->base->asMember()) {
+ callProperty(member->base, *member->name, c->args, 0);
+ } else if (Subscript *s = c->base->asSubscript()) {
+ callSubscript(s->base, s->index, c->args, 0);
+ } else {
+ Q_UNIMPLEMENTED();
+ }
+ } else {
+ Q_UNIMPLEMENTED();
+ }
+}
+
+void InstructionSelection::callBuiltin(V4IR::Call *call, V4IR::Temp *result)
+{
+ V4IR::Name *baseName = call->base->asName();
+ assert(baseName != 0);
+
+ switch (baseName->builtin) {
+ case V4IR::Name::builtin_invalid:
+ callBuiltinInvalid(baseName, call->args, result);
+ return;
+
+ case V4IR::Name::builtin_typeof: {
+ if (V4IR::Member *m = call->args->expr->asMember()) {
+ callBuiltinTypeofMember(m->base->asTemp(), *m->name, result);
+ return;
+ } else if (V4IR::Subscript *ss = call->args->expr->asSubscript()) {
+ callBuiltinTypeofSubscript(ss->base->asTemp(), ss->index->asTemp(), result);
+ return;
+ } else if (V4IR::Name *n = call->args->expr->asName()) {
+ callBuiltinTypeofName(*n->id, result);
+ return;
+ } else if (V4IR::Temp *arg = call->args->expr->asTemp()){
+ assert(arg != 0);
+ callBuiltinTypeofValue(arg, result);
+ return;
+ }
+ } break;
+
+ case V4IR::Name::builtin_delete: {
+ if (V4IR::Member *m = call->args->expr->asMember()) {
+ callBuiltinDeleteMember(m->base->asTemp(), *m->name, result);
+ return;
+ } else if (V4IR::Subscript *ss = call->args->expr->asSubscript()) {
+ callBuiltinDeleteSubscript(ss->base->asTemp(), ss->index->asTemp(), result);
+ return;
+ } else if (V4IR::Name *n = call->args->expr->asName()) {
+ callBuiltinDeleteName(*n->id, result);
+ return;
+ } else if (call->args->expr->asTemp()){
+ // TODO: should throw in strict mode
+ callBuiltinDeleteValue(result);
+ return;
+ }
+ } break;
+
+ case V4IR::Name::builtin_postincrement: {
+ if (V4IR::Member *m = call->args->expr->asMember()) {
+ callBuiltinPostIncrementMember(m->base->asTemp(), *m->name, result);
+ return;
+ } else if (V4IR::Subscript *ss = call->args->expr->asSubscript()) {
+ callBuiltinPostIncrementSubscript(ss->base->asTemp(), ss->index->asTemp(), result);
+ return;
+ } else if (V4IR::Name *n = call->args->expr->asName()) {
+ callBuiltinPostIncrementName(*n->id, result);
+ return;
+ } else if (V4IR::Temp *arg = call->args->expr->asTemp()){
+ assert(arg != 0);
+ callBuiltinPostIncrementValue(arg, result);
+ return;
+ }
+ } break;
+
+ case V4IR::Name::builtin_postdecrement: {
+ if (V4IR::Member *m = call->args->expr->asMember()) {
+ callBuiltinPostDecrementMember(m->base->asTemp(), *m->name, result);
+ return;
+ } else if (V4IR::Subscript *ss = call->args->expr->asSubscript()) {
+ callBuiltinPostDecrementSubscript(ss->base->asTemp(), ss->index->asTemp(), result);
+ return;
+ } else if (V4IR::Name *n = call->args->expr->asName()) {
+ callBuiltinPostDecrementName(*n->id, result);
+ return;
+ } else if (V4IR::Temp *arg = call->args->expr->asTemp()){
+ assert(arg != 0);
+ callBuiltinPostDecrementValue(arg, result);
+ return;
+ }
+ } break;
+
+ case V4IR::Name::builtin_throw: {
+ V4IR::Temp *arg = call->args->expr->asTemp();
+ assert(arg != 0);
+ callBuiltinThrow(arg);
+ } return;
+
+ case V4IR::Name::builtin_finish_try:
+ callBuiltinFinishTry();
+ return;
+
+ case V4IR::Name::builtin_foreach_iterator_object: {
+ V4IR::Temp *arg = call->args->expr->asTemp();
+ assert(arg != 0);
+ callBuiltinForeachIteratorObject(arg, result);
+ } return;
+
+ case V4IR::Name::builtin_foreach_next_property_name: {
+ V4IR::Temp *arg = call->args->expr->asTemp();
+ assert(arg != 0);
+ callBuiltinForeachNextPropertyname(arg, result);
+ } return;
+ case V4IR::Name::builtin_push_with_scope: {
+ V4IR::Temp *arg = call->args->expr->asTemp();
+ assert(arg != 0);
+ callBuiltinPushWithScope(arg);
+ } return;
+
+ case V4IR::Name::builtin_pop_scope:
+ callBuiltinPopScope();
+ return;
+
+ case V4IR::Name::builtin_declare_vars: {
+ if (!call->args)
+ return;
+ V4IR::Const *deletable = call->args->expr->asConst();
+ assert(deletable->type == V4IR::BoolType);
+ for (V4IR::ExprList *it = call->args->next; it; it = it->next) {
+ V4IR::Name *arg = it->expr->asName();
+ assert(arg != 0);
+ callBuiltinDeclareVar(deletable->value != 0, *arg->id);
+ }
+ } return;
+
+ case V4IR::Name::builtin_define_getter_setter: {
+ if (!call->args)
+ return;
+ V4IR::ExprList *args = call->args;
+ V4IR::Temp *object = args->expr->asTemp();
+ assert(object);
+ args = args->next;
+ assert(args);
+ V4IR::Name *name = args->expr->asName();
+ args = args->next;
+ assert(args);
+ V4IR::Temp *getter = args->expr->asTemp();
+ args = args->next;
+ assert(args);
+ V4IR::Temp *setter = args->expr->asTemp();
+
+ callBuiltinDefineGetterSetter(object, *name->id, getter, setter);
+ } return;
+
+ case V4IR::Name::builtin_define_property: {
+ if (!call->args)
+ return;
+ V4IR::ExprList *args = call->args;
+ V4IR::Temp *object = args->expr->asTemp();
+ assert(object);
+ args = args->next;
+ assert(args);
+ V4IR::Name *name = args->expr->asName();
+ args = args->next;
+ assert(args);
+ V4IR::Temp *value = args->expr->asTemp();
+
+ callBuiltinDefineProperty(object, *name->id, value);
+ } return;
+
+ case V4IR::Name::builtin_define_array:
+ callBuiltinDefineArray(result, call->args);
+ return;
+
+ default:
+ break;
+ }
+
+ Q_UNIMPLEMENTED();
+ call->dump(qout); qout << endl;
+ assert(!"TODO!");
+ Q_UNREACHABLE();
+}
diff --git a/src/qml/qml/v4vm/qv4isel_p.h b/src/qml/qml/v4vm/qv4isel_p.h
new file mode 100644
index 0000000000..e85127c5a9
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_p.h
@@ -0,0 +1,158 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4ISEL_P_H
+#define QV4ISEL_P_H
+
+#include "qv4global.h"
+#include "qv4jsir_p.h"
+
+#include <qglobal.h>
+#include <QHash>
+
+namespace QQmlJS {
+
+namespace VM {
+struct ExecutionEngine;
+struct Function;
+} // namespace VM
+
+class Q_V4_EXPORT EvalInstructionSelection
+{
+public:
+ EvalInstructionSelection(VM::ExecutionEngine *engine, V4IR::Module *module);
+ virtual ~EvalInstructionSelection() = 0;
+
+ VM::Function *vmFunction(V4IR::Function *f);
+
+ void setUseFastLookups(bool b) { useFastLookups = b; }
+
+protected:
+ VM::Function *createFunctionMapping(VM::Function *outer, V4IR::Function *irFunction);
+ VM::ExecutionEngine *engine() const { return _engine; }
+ virtual void run(VM::Function *vmFunction, V4IR::Function *function) = 0;
+
+private:
+ VM::ExecutionEngine *_engine;
+ QHash<V4IR::Function *, VM::Function *> _irToVM;
+protected:
+ bool useFastLookups;
+};
+
+class Q_V4_EXPORT EvalISelFactory
+{
+public:
+ virtual ~EvalISelFactory() = 0;
+ virtual EvalInstructionSelection *create(VM::ExecutionEngine *engine, V4IR::Module *module) = 0;
+};
+
+namespace V4IR {
+class Q_V4_EXPORT InstructionSelection: protected V4IR::StmtVisitor
+{
+public:
+ virtual ~InstructionSelection() = 0;
+
+public: // visitor methods for StmtVisitor:
+ virtual void visitMove(V4IR::Move *s);
+ virtual void visitEnter(V4IR::Enter *);
+ virtual void visitLeave(V4IR::Leave *);
+ virtual void visitExp(V4IR::Exp *s);
+
+public: // to implement by subclasses:
+ virtual void callBuiltinInvalid(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void callBuiltinTypeofMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinTypeofSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result) = 0;
+ virtual void callBuiltinTypeofName(const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinTypeofValue(V4IR::Temp *value, V4IR::Temp *result) = 0;
+ virtual void callBuiltinDeleteMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinDeleteSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result) = 0;
+ virtual void callBuiltinDeleteName(const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinDeleteValue(V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostDecrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostDecrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostDecrementName(const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostDecrementValue(V4IR::Temp *value, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostIncrementMember(V4IR::Temp *base, const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostIncrementSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostIncrementName(const QString &name, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPostIncrementValue(V4IR::Temp *value, V4IR::Temp *result) = 0;
+ virtual void callBuiltinThrow(V4IR::Temp *arg) = 0;
+ virtual void callBuiltinFinishTry() = 0;
+ virtual void callBuiltinForeachIteratorObject(V4IR::Temp *arg, V4IR::Temp *result) = 0;
+ virtual void callBuiltinForeachNextPropertyname(V4IR::Temp *arg, V4IR::Temp *result) = 0;
+ virtual void callBuiltinPushWithScope(V4IR::Temp *arg) = 0;
+ virtual void callBuiltinPopScope() = 0;
+ virtual void callBuiltinDeclareVar(bool deletable, const QString &name) = 0;
+ virtual void callBuiltinDefineGetterSetter(V4IR::Temp *object, const QString &name, V4IR::Temp *getter, V4IR::Temp *setter) = 0;
+ virtual void callBuiltinDefineProperty(V4IR::Temp *object, const QString &name, V4IR::Temp *value) = 0;
+ virtual void callBuiltinDefineArray(V4IR::Temp *result, V4IR::ExprList *args) = 0;
+ virtual void callValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void callProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void callSubscript(V4IR::Temp *base, V4IR::Temp *index, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void constructActivationProperty(V4IR::Name *func, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void constructProperty(V4IR::Temp *base, const QString &name, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void constructValue(V4IR::Temp *value, V4IR::ExprList *args, V4IR::Temp *result) = 0;
+ virtual void loadThisObject(V4IR::Temp *temp) = 0;
+ virtual void loadConst(V4IR::Const *sourceConst, V4IR::Temp *targetTemp) = 0;
+ virtual void loadString(const QString &str, V4IR::Temp *targetTemp) = 0;
+ virtual void loadRegexp(V4IR::RegExp *sourceRegexp, V4IR::Temp *targetTemp) = 0;
+ virtual void getActivationProperty(const V4IR::Name *name, V4IR::Temp *temp) = 0;
+ virtual void setActivationProperty(V4IR::Temp *source, const QString &targetName) = 0;
+ virtual void initClosure(V4IR::Closure *closure, V4IR::Temp *target) = 0;
+ virtual void getProperty(V4IR::Temp *base, const QString &name, V4IR::Temp *target) = 0;
+ virtual void setProperty(V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName) = 0;
+ virtual void getElement(V4IR::Temp *base, V4IR::Temp *index, V4IR::Temp *target) = 0;
+ virtual void setElement(V4IR::Temp *source, V4IR::Temp *targetBase, V4IR::Temp *targetIndex) = 0;
+ virtual void copyValue(V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp) = 0;
+ virtual void unop(V4IR::AluOp oper, V4IR::Temp *sourceTemp, V4IR::Temp *targetTemp) = 0;
+ virtual void binop(V4IR::AluOp oper, V4IR::Temp *leftSource, V4IR::Temp *rightSource, V4IR::Temp *target) = 0;
+ virtual void inplaceNameOp(V4IR::AluOp oper, V4IR::Temp *rightSource, const QString &targetName) = 0;
+ virtual void inplaceElementOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBaseTemp, V4IR::Temp *targetIndexTemp) = 0;
+ virtual void inplaceMemberOp(V4IR::AluOp oper, V4IR::Temp *source, V4IR::Temp *targetBase, const QString &targetName) = 0;
+
+private:
+ void callBuiltin(V4IR::Call *c, V4IR::Temp *temp);
+};
+} // namespace IR
+
+} // namespace QQmlJS
+
+#endif // QV4ISEL_P_H
diff --git a/src/qml/qml/v4vm/qv4isel_util_p.h b/src/qml/qml/v4vm/qv4isel_util_p.h
new file mode 100644
index 0000000000..e10a9658f2
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4isel_util_p.h
@@ -0,0 +1,77 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4ISEL_UTIL_P_H
+#define QV4ISEL_UTIL_P_H
+
+#include "qv4runtime.h"
+#include "qv4jsir_p.h"
+
+namespace QQmlJS {
+
+inline VM::Value convertToValue(V4IR::Const *c)
+{
+ switch (c->type) {
+ case V4IR::MissingType:
+ return VM::Value::deletedValue();
+ case V4IR::NullType:
+ return VM::Value::nullValue();
+ case V4IR::UndefinedType:
+ return VM::Value::undefinedValue();
+ case V4IR::BoolType:
+ return VM::Value::fromBoolean(c->value != 0);
+ case V4IR::NumberType: {
+ int ival = (int)c->value;
+ // +0 != -0, so we need to convert to double when negating 0
+ if (ival == c->value && !(c->value == 0 && isNegative(c->value))) {
+ return VM::Value::fromInt32(ival);
+ } else {
+ return VM::Value::fromDouble(c->value);
+ }
+ }
+ default:
+ Q_UNREACHABLE();
+ }
+}
+
+} // namespace QQmlJS
+
+#endif // QV4ISEL_UTIL_P_H
diff --git a/src/qml/qml/v4vm/qv4jsir.cpp b/src/qml/qml/v4vm/qv4jsir.cpp
new file mode 100644
index 0000000000..3dad094624
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4jsir.cpp
@@ -0,0 +1,948 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4jsir_p.h"
+#include <private/qqmljsast_p.h>
+
+#include <QtCore/qtextstream.h>
+#include <QtCore/qdebug.h>
+#include <QtCore/qset.h>
+#include <cmath>
+#include <cassert>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace V4IR {
+
+const char *typeName(Type t)
+{
+ switch (t) {
+ case UndefinedType: return "undefined";
+ case NullType: return "null";
+ case BoolType: return "bool";
+ case NumberType: return "number";
+ default: return "invalid";
+ }
+}
+
+const char *opname(AluOp op)
+{
+ switch (op) {
+ case OpInvalid: return "?";
+
+ case OpIfTrue: return "(bool)";
+ case OpNot: return "!";
+ case OpUMinus: return "-";
+ case OpUPlus: return "+";
+ case OpCompl: return "~";
+ case OpIncrement: return "++";
+ case OpDecrement: return "--";
+
+ case OpBitAnd: return "&";
+ case OpBitOr: return "|";
+ case OpBitXor: return "^";
+
+ case OpAdd: return "+";
+ case OpSub: return "-";
+ case OpMul: return "*";
+ case OpDiv: return "/";
+ case OpMod: return "%";
+
+ case OpLShift: return "<<";
+ case OpRShift: return ">>";
+ case OpURShift: return ">>>";
+
+ case OpGt: return ">";
+ case OpLt: return "<";
+ case OpGe: return ">=";
+ case OpLe: return "<=";
+ case OpEqual: return "==";
+ case OpNotEqual: return "!=";
+ case OpStrictEqual: return "===";
+ case OpStrictNotEqual: return "!==";
+
+ case OpInstanceof: return "instanceof";
+ case OpIn: return "in";
+
+ case OpAnd: return "&&";
+ case OpOr: return "||";
+
+ default: return "?";
+
+ } // switch
+}
+
+AluOp binaryOperator(int op)
+{
+ switch (static_cast<QSOperator::Op>(op)) {
+ case QSOperator::Add: return OpAdd;
+ case QSOperator::And: return OpAnd;
+ case QSOperator::BitAnd: return OpBitAnd;
+ case QSOperator::BitOr: return OpBitOr;
+ case QSOperator::BitXor: return OpBitXor;
+ case QSOperator::Div: return OpDiv;
+ case QSOperator::Equal: return OpEqual;
+ case QSOperator::Ge: return OpGe;
+ case QSOperator::Gt: return OpGt;
+ case QSOperator::Le: return OpLe;
+ case QSOperator::LShift: return OpLShift;
+ case QSOperator::Lt: return OpLt;
+ case QSOperator::Mod: return OpMod;
+ case QSOperator::Mul: return OpMul;
+ case QSOperator::NotEqual: return OpNotEqual;
+ case QSOperator::Or: return OpOr;
+ case QSOperator::RShift: return OpRShift;
+ case QSOperator::StrictEqual: return OpStrictEqual;
+ case QSOperator::StrictNotEqual: return OpStrictNotEqual;
+ case QSOperator::Sub: return OpSub;
+ case QSOperator::URShift: return OpURShift;
+ case QSOperator::InstanceOf: return OpInstanceof;
+ case QSOperator::In: return OpIn;
+ default: return OpInvalid;
+ }
+}
+
+struct RemoveSharedExpressions: V4IR::StmtVisitor, V4IR::ExprVisitor
+{
+ CloneExpr clone;
+ QSet<Expr *> subexpressions; // contains all the non-cloned subexpressions in the given function
+ Expr *uniqueExpr;
+
+ RemoveSharedExpressions(): uniqueExpr(0) {}
+
+ void operator()(V4IR::Function *function)
+ {
+ subexpressions.clear();
+
+ foreach (BasicBlock *block, function->basicBlocks) {
+ clone.setBasicBlock(block);
+
+ foreach (Stmt *s, block->statements) {
+ s->accept(this);
+ }
+ }
+ }
+
+ template <typename _Expr>
+ _Expr *cleanup(_Expr *expr)
+ {
+ if (subexpressions.contains(expr)) {
+ // the cloned expression is unique by definition
+ // so we don't need to add it to `subexpressions'.
+ return clone(expr);
+ }
+
+ subexpressions.insert(expr);
+ V4IR::Expr *e = expr;
+ qSwap(uniqueExpr, e);
+ expr->accept(this);
+ qSwap(uniqueExpr, e);
+ return static_cast<_Expr *>(e);
+ }
+
+ // statements
+ virtual void visitExp(Exp *s)
+ {
+ s->expr = cleanup(s->expr);
+ }
+
+ virtual void visitEnter(Enter *s)
+ {
+ s->expr = cleanup(s->expr);
+ }
+
+ virtual void visitLeave(Leave *)
+ {
+ // nothing to do for Leave statements
+ }
+
+ virtual void visitMove(Move *s)
+ {
+ s->target = cleanup(s->target);
+ s->source = cleanup(s->source);
+ }
+
+ virtual void visitJump(Jump *)
+ {
+ // nothing to do for Jump statements
+ }
+
+ virtual void visitCJump(CJump *s)
+ {
+ s->cond = cleanup(s->cond);
+ }
+
+ virtual void visitRet(Ret *s)
+ {
+ s->expr = cleanup(s->expr);
+ }
+
+ virtual void visitTry(Try *)
+ {
+ // nothing to do for Try statements
+ }
+
+ // expressions
+ virtual void visitConst(Const *) {}
+ virtual void visitString(String *) {}
+ virtual void visitRegExp(RegExp *) {}
+ virtual void visitName(Name *) {}
+ virtual void visitTemp(Temp *) {}
+ virtual void visitClosure(Closure *) {}
+
+ virtual void visitUnop(Unop *e)
+ {
+ e->expr = cleanup(e->expr);
+ }
+
+ virtual void visitBinop(Binop *e)
+ {
+ e->left = cleanup(e->left);
+ e->right = cleanup(e->right);
+ }
+
+ virtual void visitCall(Call *e)
+ {
+ e->base = cleanup(e->base);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr = cleanup(it->expr);
+ }
+
+ virtual void visitNew(New *e)
+ {
+ e->base = cleanup(e->base);
+ for (V4IR::ExprList *it = e->args; it; it = it->next)
+ it->expr = cleanup(it->expr);
+ }
+
+ virtual void visitSubscript(Subscript *e)
+ {
+ e->base = cleanup(e->base);
+ e->index = cleanup(e->index);
+ }
+
+ virtual void visitMember(Member *e)
+ {
+ e->base = cleanup(e->base);
+ }
+};
+
+void Const::dump(QTextStream &out)
+{
+ switch (type) {
+ case QQmlJS::V4IR::UndefinedType:
+ out << "undefined";
+ break;
+ case QQmlJS::V4IR::NullType:
+ out << "null";
+ break;
+ case QQmlJS::V4IR::BoolType:
+ out << (value ? "true" : "false");
+ break;
+ case QQmlJS::V4IR::MissingType:
+ out << "missing";
+ break;
+ default:
+ out << QString::number(value, 'g', 16);
+ break;
+ }
+}
+
+void String::dump(QTextStream &out)
+{
+ out << '"' << escape(*value) << '"';
+}
+
+QString String::escape(const QString &s)
+{
+ QString r;
+ for (int i = 0; i < s.length(); ++i) {
+ const QChar ch = s.at(i);
+ if (ch == QLatin1Char('\n'))
+ r += QStringLiteral("\\n");
+ else if (ch == QLatin1Char('\r'))
+ r += QStringLiteral("\\r");
+ else if (ch == QLatin1Char('\\'))
+ r += QStringLiteral("\\\\");
+ else if (ch == QLatin1Char('"'))
+ r += QStringLiteral("\\\"");
+ else if (ch == QLatin1Char('\''))
+ r += QStringLiteral("\\'");
+ else
+ r += ch;
+ }
+ return r;
+}
+
+void RegExp::dump(QTextStream &out)
+{
+ char f[3];
+ int i = 0;
+ if (flags & RegExp_Global)
+ f[i++] = 'g';
+ if (flags & RegExp_IgnoreCase)
+ f[i++] = 'i';
+ if (flags & RegExp_Multiline)
+ f[i++] = 'm';
+ f[i] = 0;
+
+ out << '/' << *value << '/' << f;
+}
+
+void Name::initGlobal(const QString *id, quint32 line, quint32 column)
+{
+ this->id = id;
+ this->builtin = builtin_invalid;
+ this->global = true;
+ this->line = line;
+ this->column = column;
+}
+
+void Name::init(const QString *id, quint32 line, quint32 column)
+{
+ this->id = id;
+ this->builtin = builtin_invalid;
+ this->global = false;
+ this->line = line;
+ this->column = column;
+}
+
+void Name::init(Builtin builtin, quint32 line, quint32 column)
+{
+ this->id = 0;
+ this->builtin = builtin;
+ this->global = false;
+ this->line = line;
+ this->column = column;
+}
+
+static const char *builtin_to_string(Name::Builtin b)
+{
+ switch (b) {
+ case Name::builtin_invalid:
+ return "builtin_invalid";
+ case Name::builtin_typeof:
+ return "builtin_typeof";
+ case Name::builtin_delete:
+ return "builtin_delete";
+ case Name::builtin_postincrement:
+ return "builtin_postincrement";
+ case Name::builtin_postdecrement:
+ return "builtin_postdecrement";
+ case Name::builtin_throw:
+ return "builtin_throw";
+ case Name::builtin_finish_try:
+ return "builtin_finish_try";
+ case V4IR::Name::builtin_foreach_iterator_object:
+ return "builtin_foreach_iterator_object";
+ case V4IR::Name::builtin_foreach_next_property_name:
+ return "builtin_foreach_next_property_name";
+ case V4IR::Name::builtin_push_with_scope:
+ return "builtin_push_with_scope";
+ case V4IR::Name::builtin_pop_scope:
+ return "builtin_pop_scope";
+ case V4IR::Name::builtin_declare_vars:
+ return "builtin_declare_vars";
+ case V4IR::Name::builtin_define_property:
+ return "builtin_define_property";
+ case V4IR::Name::builtin_define_array:
+ return "builtin_define_array";
+ case V4IR::Name::builtin_define_getter_setter:
+ return "builtin_define_getter_setter";
+ }
+ return "builtin_(###FIXME)";
+};
+
+
+void Name::dump(QTextStream &out)
+{
+ if (id)
+ out << *id;
+ else
+ out << builtin_to_string(builtin);
+}
+
+void Temp::dump(QTextStream &out)
+{
+ if (index < 0) {
+ out << '#' << -(index + 1); // negative and 1-based.
+ } else {
+ out << '%' << index; // temp
+ }
+ if (scope)
+ out << "@" << scope;
+}
+
+void Closure::dump(QTextStream &out)
+{
+ QString name = value->name ? *value->name : QString();
+ if (name.isEmpty())
+ name.sprintf("%p", value);
+ out << "closure(" << name << ')';
+}
+
+void Unop::dump(QTextStream &out)
+{
+ out << opname(op);
+ expr->dump(out);
+}
+
+void Binop::dump(QTextStream &out)
+{
+ left->dump(out);
+ out << ' ' << opname(op) << ' ';
+ right->dump(out);
+}
+
+void Call::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '(';
+ for (ExprList *it = args; it; it = it->next) {
+ if (it != args)
+ out << ", ";
+ it->expr->dump(out);
+ }
+ out << ')';
+}
+
+void New::dump(QTextStream &out)
+{
+ out << "new ";
+ base->dump(out);
+ out << '(';
+ for (ExprList *it = args; it; it = it->next) {
+ if (it != args)
+ out << ", ";
+ it->expr->dump(out);
+ }
+ out << ')';
+}
+
+void Subscript::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '[';
+ index->dump(out);
+ out << ']';
+}
+
+void Member::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '.' << *name;
+}
+
+void Exp::dump(QTextStream &out, Mode)
+{
+ out << "(void) ";
+ expr->dump(out);
+ out << ';';
+}
+
+void Enter::dump(QTextStream &out, Mode)
+{
+ out << "%enter(";
+ expr->dump(out);
+ out << ");";
+}
+
+void Leave::dump(QTextStream &out, Mode)
+{
+ out << "%leave";
+ out << ';';
+}
+
+void Move::dump(QTextStream &out, Mode)
+{
+ target->dump(out);
+ out << ' ';
+ if (op != OpInvalid)
+ out << opname(op);
+ out << "= ";
+// if (source->type != target->type)
+// out << typeName(source->type) << "_to_" << typeName(target->type) << '(';
+ source->dump(out);
+// if (source->type != target->type)
+// out << ')';
+ out << ';';
+}
+
+void Jump::dump(QTextStream &out, Mode mode)
+{
+ Q_UNUSED(mode);
+ out << "goto " << 'L' << target->index << ';';
+}
+
+void CJump::dump(QTextStream &out, Mode mode)
+{
+ Q_UNUSED(mode);
+ out << "if (";
+ cond->dump(out);
+ if (mode == HIR)
+ out << ") goto " << 'L' << iftrue->index << "; else goto " << 'L' << iffalse->index << ';';
+ else
+ out << ") goto " << 'L' << iftrue->index << ";";
+}
+
+void Ret::dump(QTextStream &out, Mode)
+{
+ out << "return";
+ if (expr) {
+ out << ' ';
+ expr->dump(out);
+ }
+ out << ';';
+}
+
+void Try::dump(QTextStream &out, Stmt::Mode mode)
+{
+ out << "try L" << tryBlock->index << "; catch exception in ";
+ exceptionVar->dump(out);
+ out << " with the name " << exceptionVarName << " and go to L" << catchBlock->index << ';';
+}
+
+Function *Module::newFunction(const QString &name, Function *outer)
+{
+ Function *f = new Function(this, outer, name);
+ functions.append(f);
+ if (!outer) {
+ assert(!rootFunction);
+ rootFunction = f;
+ } else {
+ outer->nestedFunctions.append(f);
+ }
+ return f;
+}
+
+Module::~Module()
+{
+ foreach (Function *f, functions) {
+ delete f;
+ }
+}
+
+Function::~Function()
+{
+ // destroy the Stmt::Data blocks manually, because memory pool cleanup won't
+ // call the Stmt destructors.
+ foreach (V4IR::BasicBlock *b, basicBlocks)
+ foreach (V4IR::Stmt *s, b->statements)
+ s->destroyData();
+
+ qDeleteAll(basicBlocks);
+ pool = 0;
+ module = 0;
+}
+
+
+const QString *Function::newString(const QString &text)
+{
+ return &*strings.insert(text);
+}
+
+BasicBlock *Function::newBasicBlock(BasicBlockInsertMode mode)
+{
+ BasicBlock *block = new BasicBlock(this);
+ return mode == InsertBlock ? insertBasicBlock(block) : block;
+}
+
+void Function::dump(QTextStream &out, Stmt::Mode mode)
+{
+ QString n = name ? *name : QString();
+ if (n.isEmpty())
+ n.sprintf("%p", this);
+ out << "function " << n << "() {" << endl;
+ foreach (const QString *formal, formals)
+ out << "\treceive " << *formal << ';' << endl;
+ foreach (const QString *local, locals)
+ out << "\tlocal " << *local << ';' << endl;
+ foreach (BasicBlock *bb, basicBlocks)
+ bb->dump(out, mode);
+ out << '}' << endl;
+}
+
+void Function::removeSharedExpressions()
+{
+ RemoveSharedExpressions removeSharedExpressions;
+ removeSharedExpressions(this);
+}
+
+int Function::indexOfArgument(const QStringRef &string) const
+{
+ for (int i = formals.size() - 1; i >= 0; --i) {
+ if (*formals.at(i) == string)
+ return i;
+ }
+ return -1;
+}
+unsigned BasicBlock::newTemp()
+{
+ return function->tempCount++;
+}
+
+Temp *BasicBlock::TEMP(int index, uint scope)
+{
+ Temp *e = function->New<Temp>();
+ e->init(index, scope);
+ return e;
+}
+
+Expr *BasicBlock::CONST(Type type, double value)
+{
+ Const *e = function->New<Const>();
+ e->init(type, value);
+ return e;
+}
+
+Expr *BasicBlock::STRING(const QString *value)
+{
+ String *e = function->New<String>();
+ e->init(value);
+ return e;
+}
+
+Expr *BasicBlock::REGEXP(const QString *value, int flags)
+{
+ RegExp *e = function->New<RegExp>();
+ e->init(value, flags);
+ return e;
+}
+
+Name *BasicBlock::NAME(const QString &id, quint32 line, quint32 column)
+{
+ Name *e = function->New<Name>();
+ e->init(function->newString(id), line, column);
+ return e;
+}
+
+Name *BasicBlock::GLOBALNAME(const QString &id, quint32 line, quint32 column)
+{
+ Name *e = function->New<Name>();
+ e->initGlobal(function->newString(id), line, column);
+ return e;
+}
+
+
+Name *BasicBlock::NAME(Name::Builtin builtin, quint32 line, quint32 column)
+{
+ Name *e = function->New<Name>();
+ e->init(builtin, line, column);
+ return e;
+}
+
+Closure *BasicBlock::CLOSURE(Function *function)
+{
+ Closure *clos = function->New<Closure>();
+ clos->init(function);
+ return clos;
+}
+
+Expr *BasicBlock::UNOP(AluOp op, Temp *expr)
+{
+ Unop *e = function->New<Unop>();
+ e->init(op, expr);
+ return e;
+}
+
+Expr *BasicBlock::BINOP(AluOp op, Expr *left, Expr *right)
+{
+ Binop *e = function->New<Binop>();
+ e->init(op, left, right);
+ return e;
+}
+
+Expr *BasicBlock::CALL(Expr *base, ExprList *args)
+{
+ Call *e = function->New<Call>();
+ e->init(base, args);
+ int argc = 0;
+ for (ExprList *it = args; it; it = it->next)
+ ++argc;
+ function->maxNumberOfArguments = qMax(function->maxNumberOfArguments, argc);
+ return e;
+}
+
+Expr *BasicBlock::NEW(Expr *base, ExprList *args)
+{
+ New *e = function->New<New>();
+ e->init(base, args);
+ return e;
+}
+
+Expr *BasicBlock::SUBSCRIPT(Temp *base, Temp *index)
+{
+ Subscript *e = function->New<Subscript>();
+ e->init(base, index);
+ return e;
+}
+
+Expr *BasicBlock::MEMBER(Temp *base, const QString *name)
+{
+ Member*e = function->New<Member>();
+ e->init(base, name);
+ return e;
+}
+
+Stmt *BasicBlock::EXP(Expr *expr)
+{
+ if (isTerminated())
+ return 0;
+
+ Exp *s = function->New<Exp>();
+ s->init(expr);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::ENTER(Expr *expr)
+{
+ if (isTerminated())
+ return 0;
+
+ Enter *s = function->New<Enter>();
+ s->init(expr);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::LEAVE()
+{
+ if (isTerminated())
+ return 0;
+
+ Leave *s = function->New<Leave>();
+ s->init();
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::MOVE(Expr *target, Expr *source, AluOp op)
+{
+ if (isTerminated())
+ return 0;
+
+ Move *s = function->New<Move>();
+ s->init(target, source, op);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::JUMP(BasicBlock *target)
+{
+ if (isTerminated())
+ return 0;
+
+ Jump *s = function->New<Jump>();
+ s->init(target);
+ statements.append(s);
+
+ assert(! out.contains(target));
+ out.append(target);
+
+ assert(! target->in.contains(this));
+ target->in.append(this);
+
+ return s;
+}
+
+Stmt *BasicBlock::CJUMP(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse)
+{
+ if (isTerminated())
+ return 0;
+
+ if (iftrue == iffalse) {
+ MOVE(TEMP(newTemp()), cond);
+ return JUMP(iftrue);
+ }
+
+ CJump *s = function->New<CJump>();
+ s->init(cond, iftrue, iffalse);
+ statements.append(s);
+
+ assert(! out.contains(iftrue));
+ out.append(iftrue);
+
+ assert(! iftrue->in.contains(this));
+ iftrue->in.append(this);
+
+ assert(! out.contains(iffalse));
+ out.append(iffalse);
+
+ assert(! iffalse->in.contains(this));
+ iffalse->in.append(this);
+
+ return s;
+}
+
+Stmt *BasicBlock::RET(Temp *expr)
+{
+ if (isTerminated())
+ return 0;
+
+ Ret *s = function->New<Ret>();
+ s->init(expr);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::TRY(BasicBlock *tryBlock, BasicBlock *catchBlock, const QString &exceptionVarName, Temp *exceptionVar)
+{
+ if (isTerminated())
+ return 0;
+
+ Try *t = function->New<Try>();
+ t->init(tryBlock, catchBlock, exceptionVarName, exceptionVar);
+ statements.append(t);
+
+ assert(! out.contains(tryBlock));
+ out.append(tryBlock);
+
+ assert(! out.contains(catchBlock));
+ out.append(catchBlock);
+
+ assert(! tryBlock->in.contains(this));
+ tryBlock->in.append(this);
+
+ assert(! catchBlock->in.contains(this));
+ catchBlock->in.append(this);
+
+ return t;
+}
+
+void BasicBlock::dump(QTextStream &out, Stmt::Mode mode)
+{
+ out << 'L' << index << ':' << endl;
+ foreach (Stmt *s, statements) {
+ out << '\t';
+ s->dump(out, mode);
+ out << endl;
+ }
+}
+
+CloneExpr::CloneExpr(BasicBlock *block)
+ : block(block), cloned(0)
+{
+}
+
+void CloneExpr::setBasicBlock(BasicBlock *block)
+{
+ this->block = block;
+}
+
+ExprList *CloneExpr::clone(ExprList *list)
+{
+ if (! list)
+ return 0;
+
+ ExprList *clonedList = block->function->New<V4IR::ExprList>();
+ clonedList->init(clone(list->expr), clone(list->next));
+ return clonedList;
+}
+
+void CloneExpr::visitConst(Const *e)
+{
+ cloned = block->CONST(e->type, e->value);
+}
+
+void CloneExpr::visitString(String *e)
+{
+ cloned = block->STRING(e->value);
+}
+
+void CloneExpr::visitRegExp(RegExp *e)
+{
+ cloned = block->REGEXP(e->value, e->flags);
+}
+
+void CloneExpr::visitName(Name *e)
+{
+ if (e->id)
+ cloned = block->NAME(*e->id, e->line, e->column);
+ else
+ cloned = block->NAME(e->builtin, e->line, e->column);
+}
+
+void CloneExpr::visitTemp(Temp *e)
+{
+ cloned = block->TEMP(e->index, e->scope);
+}
+
+void CloneExpr::visitClosure(Closure *e)
+{
+ cloned = block->CLOSURE(e->value);
+}
+
+void CloneExpr::visitUnop(Unop *e)
+{
+ cloned = block->UNOP(e->op, clone(e->expr));
+}
+
+void CloneExpr::visitBinop(Binop *e)
+{
+ cloned = block->BINOP(e->op, clone(e->left), clone(e->right));
+}
+
+void CloneExpr::visitCall(Call *e)
+{
+ cloned = block->CALL(clone(e->base), clone(e->args));
+}
+
+void CloneExpr::visitNew(New *e)
+{
+ cloned = block->NEW(clone(e->base), clone(e->args));
+}
+
+void CloneExpr::visitSubscript(Subscript *e)
+{
+ cloned = block->SUBSCRIPT(clone(e->base), clone(e->index));
+}
+
+void CloneExpr::visitMember(Member *e)
+{
+ cloned = block->MEMBER(clone(e->base), e->name);
+}
+
+} // end of namespace IR
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
diff --git a/src/qml/qml/v4vm/qv4jsir_p.h b/src/qml/qml/v4vm/qv4jsir_p.h
new file mode 100644
index 0000000000..47368449fc
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4jsir_p.h
@@ -0,0 +1,821 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4IR_P_H
+#define QV4IR_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+#include "qv4global.h"
+#include <private/qqmljsmemorypool_p.h>
+
+#include <QtCore/QVector>
+#include <QtCore/QString>
+#include <QtCore/QBitArray>
+
+#ifdef CONST
+#undef CONST
+#endif
+
+QT_BEGIN_NAMESPACE
+
+class QTextStream;
+class QQmlType;
+
+namespace QQmlJS {
+
+inline bool isNegative(double d)
+{
+ uchar *dch = (uchar *)&d;
+ if (QSysInfo::ByteOrder == QSysInfo::BigEndian)
+ return (dch[0] & 0x80);
+ else
+ return (dch[7] & 0x80);
+
+}
+
+namespace VM {
+struct ExecutionContext;
+struct Value;
+}
+
+namespace V4IR {
+
+struct BasicBlock;
+struct Function;
+struct Module;
+
+struct Stmt;
+struct Expr;
+
+// expressions
+struct Const;
+struct String;
+struct RegExp;
+struct Name;
+struct Temp;
+struct Closure;
+struct Unop;
+struct Binop;
+struct Call;
+struct New;
+struct Subscript;
+struct Member;
+
+// statements
+struct Exp;
+struct Enter;
+struct Leave;
+struct Move;
+struct Jump;
+struct CJump;
+struct Ret;
+struct Try;
+
+enum AluOp {
+ OpInvalid = 0,
+
+ OpIfTrue,
+ OpNot,
+ OpUMinus,
+ OpUPlus,
+ OpCompl,
+ OpIncrement,
+ OpDecrement,
+
+ OpBitAnd,
+ OpBitOr,
+ OpBitXor,
+
+ OpAdd,
+ OpSub,
+ OpMul,
+ OpDiv,
+ OpMod,
+
+ OpLShift,
+ OpRShift,
+ OpURShift,
+
+ OpGt,
+ OpLt,
+ OpGe,
+ OpLe,
+ OpEqual,
+ OpNotEqual,
+ OpStrictEqual,
+ OpStrictNotEqual,
+
+ OpInstanceof,
+ OpIn,
+
+ OpAnd,
+ OpOr,
+
+ LastAluOp = OpOr
+};
+AluOp binaryOperator(int op);
+const char *opname(V4IR::AluOp op);
+
+enum Type {
+ MissingType, // Used to indicate holes in array initialization (e.g. [,,])
+ UndefinedType,
+ NullType,
+ BoolType,
+ NumberType
+};
+
+struct ExprVisitor {
+ virtual ~ExprVisitor() {}
+ virtual void visitConst(Const *) = 0;
+ virtual void visitString(String *) = 0;
+ virtual void visitRegExp(RegExp *) = 0;
+ virtual void visitName(Name *) = 0;
+ virtual void visitTemp(Temp *) = 0;
+ virtual void visitClosure(Closure *) = 0;
+ virtual void visitUnop(Unop *) = 0;
+ virtual void visitBinop(Binop *) = 0;
+ virtual void visitCall(Call *) = 0;
+ virtual void visitNew(New *) = 0;
+ virtual void visitSubscript(Subscript *) = 0;
+ virtual void visitMember(Member *) = 0;
+};
+
+struct StmtVisitor {
+ virtual ~StmtVisitor() {}
+ virtual void visitExp(Exp *) = 0;
+ virtual void visitEnter(Enter *) = 0;
+ virtual void visitLeave(Leave *) = 0;
+ virtual void visitMove(Move *) = 0;
+ virtual void visitJump(Jump *) = 0;
+ virtual void visitCJump(CJump *) = 0;
+ virtual void visitRet(Ret *) = 0;
+ virtual void visitTry(Try *) = 0;
+};
+
+struct Expr {
+ virtual ~Expr() {}
+ virtual void accept(ExprVisitor *) = 0;
+ virtual bool isLValue() { return false; }
+ virtual Const *asConst() { return 0; }
+ virtual String *asString() { return 0; }
+ virtual RegExp *asRegExp() { return 0; }
+ virtual Name *asName() { return 0; }
+ virtual Temp *asTemp() { return 0; }
+ virtual Closure *asClosure() { return 0; }
+ virtual Unop *asUnop() { return 0; }
+ virtual Binop *asBinop() { return 0; }
+ virtual Call *asCall() { return 0; }
+ virtual New *asNew() { return 0; }
+ virtual Subscript *asSubscript() { return 0; }
+ virtual Member *asMember() { return 0; }
+ virtual void dump(QTextStream &out) = 0;
+};
+
+struct ExprList {
+ Expr *expr;
+ ExprList *next;
+
+ void init(Expr *expr, ExprList *next = 0)
+ {
+ this->expr = expr;
+ this->next = next;
+ }
+};
+
+struct Const: Expr {
+ Type type;
+ double value;
+
+ void init(Type type, double value)
+ {
+ this->type = type;
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitConst(this); }
+ virtual Const *asConst() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct String: Expr {
+ const QString *value;
+
+ void init(const QString *value)
+ {
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitString(this); }
+ virtual String *asString() { return this; }
+
+ virtual void dump(QTextStream &out);
+ static QString escape(const QString &s);
+};
+
+struct RegExp: Expr {
+ // needs to be compatible with the flags in the lexer
+ enum Flags {
+ RegExp_Global = 0x01,
+ RegExp_IgnoreCase = 0x02,
+ RegExp_Multiline = 0x04
+ };
+
+ const QString *value;
+ int flags;
+
+ void init(const QString *value, int flags)
+ {
+ this->value = value;
+ this->flags = flags;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitRegExp(this); }
+ virtual RegExp *asRegExp() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Name: Expr {
+ enum Builtin {
+ builtin_invalid,
+ builtin_typeof,
+ builtin_delete,
+ builtin_postincrement,
+ builtin_postdecrement,
+ builtin_throw,
+ builtin_finish_try,
+ builtin_foreach_iterator_object,
+ builtin_foreach_next_property_name,
+ builtin_push_with_scope,
+ builtin_pop_scope,
+ builtin_declare_vars,
+ builtin_define_property,
+ builtin_define_array,
+ builtin_define_getter_setter
+ };
+
+ const QString *id;
+ Builtin builtin;
+ bool global;
+ quint32 line;
+ quint32 column;
+
+ void initGlobal(const QString *id, quint32 line, quint32 column);
+ void init(const QString *id, quint32 line, quint32 column);
+ void init(Builtin builtin, quint32 line, quint32 column);
+
+ virtual void accept(ExprVisitor *v) { v->visitName(this); }
+ virtual bool isLValue() { return true; }
+ virtual Name *asName() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Temp: Expr {
+ int index;
+ int scope; // how many scopes outside the current one?
+
+ void init(int index, int scope = 0)
+ {
+ this->index = index;
+ this->scope = scope;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitTemp(this); }
+ virtual bool isLValue() { return true; }
+ virtual Temp *asTemp() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Closure: Expr {
+ Function *value;
+
+ void init(Function *value)
+ {
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitClosure(this); }
+ virtual Closure *asClosure() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Unop: Expr {
+ AluOp op;
+ Temp *expr;
+
+ void init(AluOp op, Temp *expr)
+ {
+ this->op = op;
+ this->expr = expr;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitUnop(this); }
+ virtual Unop *asUnop() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Binop: Expr {
+ AluOp op;
+ Expr *left; // Temp or Const
+ Expr *right; // Temp or Const
+
+ void init(AluOp op, Expr *left, Expr *right)
+ {
+ this->op = op;
+ this->left = left;
+ this->right = right;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitBinop(this); }
+ virtual Binop *asBinop() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Call: Expr {
+ Expr *base; // Name, Member, Temp
+ ExprList *args; // List of Temps
+
+ void init(Expr *base, ExprList *args)
+ {
+ this->base = base;
+ this->args = args;
+ }
+
+ Expr *onlyArgument() const {
+ if (args && ! args->next)
+ return args->expr;
+ return 0;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitCall(this); }
+ virtual Call *asCall() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct New: Expr {
+ Expr *base; // Name, Member, Temp
+ ExprList *args; // List of Temps
+
+ void init(Expr *base, ExprList *args)
+ {
+ this->base = base;
+ this->args = args;
+ }
+
+ Expr *onlyArgument() const {
+ if (args && ! args->next)
+ return args->expr;
+ return 0;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitNew(this); }
+ virtual New *asNew() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Subscript: Expr {
+ Temp *base;
+ Temp *index;
+
+ void init(Temp *base, Temp *index)
+ {
+ this->base = base;
+ this->index = index;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitSubscript(this); }
+ virtual bool isLValue() { return true; }
+ virtual Subscript *asSubscript() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Member: Expr {
+ Temp *base;
+ const QString *name;
+
+ void init(Temp *base, const QString *name)
+ {
+ this->base = base;
+ this->name = name;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitMember(this); }
+ virtual bool isLValue() { return true; }
+ virtual Member *asMember() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Stmt {
+ enum Mode {
+ HIR,
+ MIR
+ };
+
+ struct Data {
+ QVector<unsigned> uses;
+ QVector<unsigned> defs;
+ QBitArray liveIn;
+ QBitArray liveOut;
+ };
+
+ Data *d;
+
+ Stmt(): d(0) {}
+ virtual ~Stmt() { Q_UNREACHABLE(); }
+ virtual Stmt *asTerminator() { return 0; }
+
+ virtual void accept(StmtVisitor *) = 0;
+ virtual Exp *asExp() { return 0; }
+ virtual Move *asMove() { return 0; }
+ virtual Enter *asEnter() { return 0; }
+ virtual Leave *asLeave() { return 0; }
+ virtual Jump *asJump() { return 0; }
+ virtual CJump *asCJump() { return 0; }
+ virtual Ret *asRet() { return 0; }
+ virtual Try *asTry() { return 0; }
+ virtual void dump(QTextStream &out, Mode mode = HIR) = 0;
+
+ void destroyData() {
+ delete d;
+ d = 0;
+ }
+};
+
+struct Exp: Stmt {
+ Expr *expr;
+
+ void init(Expr *expr)
+ {
+ this->expr = expr;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitExp(this); }
+ virtual Exp *asExp() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Move: Stmt {
+ Expr *target; // LHS - Temp, Name, Member or Subscript
+ Expr *source;
+ AluOp op;
+
+ void init(Expr *target, Expr *source, AluOp op)
+ {
+ this->target = target;
+ this->source = source;
+ this->op = op;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitMove(this); }
+ virtual Move *asMove() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Enter: Stmt {
+ Expr *expr;
+
+ void init(Expr *expr)
+ {
+ this->expr = expr;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitEnter(this); }
+ virtual Enter *asEnter() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Leave: Stmt {
+ void init() {}
+
+ virtual void accept(StmtVisitor *v) { v->visitLeave(this); }
+ virtual Leave *asLeave() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Jump: Stmt {
+ BasicBlock *target;
+
+ void init(BasicBlock *target)
+ {
+ this->target = target;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitJump(this); }
+ virtual Jump *asJump() { return this; }
+
+ virtual void dump(QTextStream &out, Mode mode);
+};
+
+struct CJump: Stmt {
+ Expr *cond; // Temp, Binop
+ BasicBlock *iftrue;
+ BasicBlock *iffalse;
+
+ void init(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse)
+ {
+ this->cond = cond;
+ this->iftrue = iftrue;
+ this->iffalse = iffalse;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitCJump(this); }
+ virtual CJump *asCJump() { return this; }
+
+ virtual void dump(QTextStream &out, Mode mode);
+};
+
+struct Ret: Stmt {
+ Temp *expr;
+
+ void init(Temp *expr)
+ {
+ this->expr = expr;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitRet(this); }
+ virtual Ret *asRet() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Try: Stmt {
+ BasicBlock *tryBlock;
+ BasicBlock *catchBlock;
+ QString exceptionVarName;
+ Temp *exceptionVar; // place to store the caught exception, for use when re-throwing
+
+ void init(BasicBlock *tryBlock, BasicBlock *catchBlock, const QString &exceptionVarName, Temp *exceptionVar)
+ {
+ this->tryBlock = tryBlock;
+ this->catchBlock = catchBlock;
+ this->exceptionVarName = exceptionVarName;
+ this->exceptionVar = exceptionVar;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitTry(this); }
+ virtual Try *asTry() { return this; }
+
+ virtual void dump(QTextStream &out, Mode mode);
+};
+
+struct Q_V4_EXPORT Module {
+ MemoryPool pool;
+ QVector<Function *> functions;
+ Function *rootFunction;
+
+ Function *newFunction(const QString &name, Function *outer);
+
+ Module() : rootFunction(0) {}
+ ~Module();
+};
+
+struct Function {
+ Module *module;
+ MemoryPool *pool;
+ const QString *name;
+ QVector<BasicBlock *> basicBlocks;
+ int tempCount;
+ int maxNumberOfArguments;
+ QSet<QString> strings;
+ QList<const QString *> formals;
+ QList<const QString *> locals;
+ QVector<Function *> nestedFunctions;
+ Function *outer;
+
+ int insideWithOrCatch;
+
+ uint hasDirectEval: 1;
+ uint usesArgumentsObject : 1;
+ uint isStrict: 1;
+ uint isNamedExpression : 1;
+ uint hasTry: 1;
+ uint hasWith: 1;
+ uint unused : 26;
+
+ template <typename _Tp> _Tp *New() { return new (pool->allocate(sizeof(_Tp))) _Tp(); }
+
+ Function(Module *module, Function *outer, const QString &name)
+ : module(module)
+ , pool(&module->pool)
+ , tempCount(0)
+ , maxNumberOfArguments(0)
+ , outer(outer)
+ , insideWithOrCatch(0)
+ , hasDirectEval(false)
+ , usesArgumentsObject(false)
+ , isStrict(false)
+ , isNamedExpression(false)
+ , hasTry(false)
+ , hasWith(false)
+ , unused(0)
+ { this->name = newString(name); }
+
+ ~Function();
+
+ enum BasicBlockInsertMode {
+ InsertBlock,
+ DontInsertBlock
+ };
+
+ BasicBlock *newBasicBlock(BasicBlockInsertMode mode = InsertBlock);
+ const QString *newString(const QString &text);
+
+ void RECEIVE(const QString &name) { formals.append(newString(name)); }
+ void LOCAL(const QString &name) { locals.append(newString(name)); }
+
+ inline BasicBlock *insertBasicBlock(BasicBlock *block) { basicBlocks.append(block); return block; }
+
+ void dump(QTextStream &out, Stmt::Mode mode = Stmt::HIR);
+
+ void removeSharedExpressions();
+
+ int indexOfArgument(const QStringRef &string) const;
+};
+
+struct BasicBlock {
+ Function *function;
+ QVector<Stmt *> statements;
+ QVector<BasicBlock *> in;
+ QVector<BasicBlock *> out;
+ QBitArray liveIn;
+ QBitArray liveOut;
+ int index;
+ int offset;
+
+ BasicBlock(Function *function): function(function), index(-1), offset(-1) {}
+ ~BasicBlock() {}
+
+ template <typename Instr> inline Instr i(Instr i) { statements.append(i); return i; }
+
+ inline bool isEmpty() const {
+ return statements.isEmpty();
+ }
+
+ inline Stmt *terminator() const {
+ if (! statements.isEmpty() && statements.at(statements.size() - 1)->asTerminator() != 0)
+ return statements.at(statements.size() - 1);
+ return 0;
+ }
+
+ inline bool isTerminated() const {
+ if (terminator() != 0)
+ return true;
+ return false;
+ }
+
+ unsigned newTemp();
+
+ Temp *TEMP(int index, uint scope = 0);
+
+ Expr *CONST(Type type, double value);
+ Expr *STRING(const QString *value);
+ Expr *REGEXP(const QString *value, int flags);
+
+ Name *NAME(const QString &id, quint32 line, quint32 column);
+ Name *NAME(Name::Builtin builtin, quint32 line, quint32 column);
+
+ Name *GLOBALNAME(const QString &id, quint32 line, quint32 column);
+
+ Closure *CLOSURE(Function *function);
+
+ Expr *UNOP(AluOp op, Temp *expr);
+ Expr *BINOP(AluOp op, Expr *left, Expr *right);
+ Expr *CALL(Expr *base, ExprList *args = 0);
+ Expr *NEW(Expr *base, ExprList *args = 0);
+ Expr *SUBSCRIPT(Temp *base, Temp *index);
+ Expr *MEMBER(Temp *base, const QString *name);
+
+ Stmt *EXP(Expr *expr);
+ Stmt *ENTER(Expr *expr);
+ Stmt *LEAVE();
+
+ Stmt *MOVE(Expr *target, Expr *source, AluOp op = V4IR::OpInvalid);
+
+ Stmt *JUMP(BasicBlock *target);
+ Stmt *CJUMP(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse);
+ Stmt *RET(Temp *expr);
+ Stmt *TRY(BasicBlock *tryBlock, BasicBlock *catchBlock, const QString &exceptionVarName, Temp *exceptionVar);
+
+ void dump(QTextStream &out, Stmt::Mode mode = Stmt::HIR);
+};
+
+class CloneExpr: protected V4IR::ExprVisitor
+{
+public:
+ explicit CloneExpr(V4IR::BasicBlock *block = 0);
+
+ void setBasicBlock(V4IR::BasicBlock *block);
+
+ template <typename _Expr>
+ _Expr *operator()(_Expr *expr)
+ {
+ return clone(expr);
+ }
+
+ template <typename _Expr>
+ _Expr *clone(_Expr *expr)
+ {
+ Expr *c = expr;
+ qSwap(cloned, c);
+ expr->accept(this);
+ qSwap(cloned, c);
+ return static_cast<_Expr *>(c);
+ }
+
+protected:
+ V4IR::ExprList *clone(V4IR::ExprList *list);
+
+ virtual void visitConst(Const *);
+ virtual void visitString(String *);
+ virtual void visitRegExp(RegExp *);
+ virtual void visitName(Name *);
+ virtual void visitTemp(Temp *);
+ virtual void visitClosure(Closure *);
+ virtual void visitUnop(Unop *);
+ virtual void visitBinop(Binop *);
+ virtual void visitCall(Call *);
+ virtual void visitNew(New *);
+ virtual void visitSubscript(Subscript *);
+ virtual void visitMember(Member *);
+
+private:
+ V4IR::BasicBlock *block;
+ V4IR::Expr *cloned;
+};
+
+} // end of namespace IR
+
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4IR_P_H
diff --git a/src/qml/qml/v4vm/qv4jsonobject.cpp b/src/qml/qml/v4vm/qv4jsonobject.cpp
new file mode 100644
index 0000000000..cb4df70970
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4jsonobject.cpp
@@ -0,0 +1,936 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <qv4jsonobject.h>
+#include <qv4objectproto.h>
+#include <qv4numberobject.h>
+#include <qv4stringobject.h>
+#include <qv4booleanobject.h>
+#include <qv4objectiterator.h>
+#include <qjsondocument.h>
+#include <qstack.h>
+#include <qstringlist.h>
+
+#include <wtf/MathExtras.h>
+
+namespace QQmlJS {
+namespace VM {
+
+//#define PARSER_DEBUG
+#ifdef PARSER_DEBUG
+static int indent = 0;
+#define BEGIN qDebug() << QByteArray(4*indent++, ' ').constData()
+#define END --indent
+#define DEBUG qDebug() << QByteArray(4*indent, ' ').constData()
+#else
+#define BEGIN if (1) ; else qDebug()
+#define END do {} while (0)
+#define DEBUG if (1) ; else qDebug()
+#endif
+
+
+class Parser
+{
+public:
+ Parser(ExecutionContext *context, const QChar *json, int length);
+
+ Value parse(QJsonParseError *error);
+
+private:
+ inline bool eatSpace();
+ inline QChar nextToken();
+
+ Value parseObject();
+ Value parseArray();
+ bool parseMember(Object *o);
+ bool parseString(QString *string);
+ bool parseValue(Value *val);
+ bool parseNumber(Value *val);
+
+ ExecutionContext *context;
+ const QChar *head;
+ const QChar *json;
+ const QChar *end;
+
+ int nestingLevel;
+ QJsonParseError::ParseError lastError;
+};
+
+static const int nestingLimit = 1024;
+
+
+Parser::Parser(ExecutionContext *context, const QChar *json, int length)
+ : context(context), head(json), json(json), nestingLevel(0), lastError(QJsonParseError::NoError)
+{
+ end = json + length;
+}
+
+
+
+/*
+
+begin-array = ws %x5B ws ; [ left square bracket
+
+begin-object = ws %x7B ws ; { left curly bracket
+
+end-array = ws %x5D ws ; ] right square bracket
+
+end-object = ws %x7D ws ; } right curly bracket
+
+name-separator = ws %x3A ws ; : colon
+
+value-separator = ws %x2C ws ; , comma
+
+Insignificant whitespace is allowed before or after any of the six
+structural characters.
+
+ws = *(
+ %x20 / ; Space
+ %x09 / ; Horizontal tab
+ %x0A / ; Line feed or New line
+ %x0D ; Carriage return
+ )
+
+*/
+
+enum {
+ Space = 0x20,
+ Tab = 0x09,
+ LineFeed = 0x0a,
+ Return = 0x0d,
+ BeginArray = 0x5b,
+ BeginObject = 0x7b,
+ EndArray = 0x5d,
+ EndObject = 0x7d,
+ NameSeparator = 0x3a,
+ ValueSeparator = 0x2c,
+ Quote = 0x22
+};
+
+bool Parser::eatSpace()
+{
+ while (json < end) {
+ if (*json > Space)
+ break;
+ if (*json != Space &&
+ *json != Tab &&
+ *json != LineFeed &&
+ *json != Return)
+ break;
+ ++json;
+ }
+ return (json < end);
+}
+
+QChar Parser::nextToken()
+{
+ if (!eatSpace())
+ return 0;
+ QChar token = *json++;
+ switch (token.unicode()) {
+ case BeginArray:
+ case BeginObject:
+ case NameSeparator:
+ case ValueSeparator:
+ case EndArray:
+ case EndObject:
+ eatSpace();
+ case Quote:
+ break;
+ default:
+ token = 0;
+ break;
+ }
+ return token;
+}
+
+/*
+ JSON-text = object / array
+*/
+Value Parser::parse(QJsonParseError *error)
+{
+#ifdef PARSER_DEBUG
+ indent = 0;
+ qDebug() << ">>>>> parser begin";
+#endif
+
+ eatSpace();
+
+ Value v;
+ if (!parseValue(&v)) {
+#ifdef PARSER_DEBUG
+ qDebug() << ">>>>> parser error";
+#endif
+ if (lastError == QJsonParseError::NoError)
+ lastError = QJsonParseError::IllegalValue;
+ error->offset = json - head;
+ error->error = lastError;
+ return Value::undefinedValue();
+ }
+
+ // some input left...
+ if (eatSpace()) {
+ lastError = QJsonParseError::IllegalValue;
+ error->offset = json - head;
+ error->error = lastError;
+ return Value::undefinedValue();
+ }
+
+ END;
+ error->offset = 0;
+ error->error = QJsonParseError::NoError;
+ return v;
+}
+
+/*
+ object = begin-object [ member *( value-separator member ) ]
+ end-object
+*/
+
+Value Parser::parseObject()
+{
+ if (++nestingLevel > nestingLimit) {
+ lastError = QJsonParseError::DeepNesting;
+ return Value::undefinedValue();
+ }
+
+ BEGIN << "parseObject pos=" << json;
+
+ Object *o = context->engine->newObject();
+ Value objectVal = Value::fromObject(o);
+
+ QChar token = nextToken();
+ while (token == Quote) {
+ if (!parseMember(o))
+ return Value::undefinedValue();
+ token = nextToken();
+ if (token != ValueSeparator)
+ break;
+ token = nextToken();
+ if (token == EndObject) {
+ lastError = QJsonParseError::MissingObject;
+ return Value::undefinedValue();
+ }
+ }
+
+ DEBUG << "end token=" << token;
+ if (token != EndObject) {
+ lastError = QJsonParseError::UnterminatedObject;
+ return Value::undefinedValue();
+ }
+
+ END;
+
+ --nestingLevel;
+ return objectVal;
+}
+
+/*
+ member = string name-separator value
+*/
+bool Parser::parseMember(Object *o)
+{
+ BEGIN << "parseMember";
+
+ QString key;
+ if (!parseString(&key))
+ return false;
+ QChar token = nextToken();
+ if (token != NameSeparator) {
+ lastError = QJsonParseError::MissingNameSeparator;
+ return false;
+ }
+ Value val;
+ if (!parseValue(&val))
+ return false;
+
+ Property *p = o->insertMember(context->engine->newIdentifier(key), Attr_Data);
+ p->value = val;
+
+ END;
+ return true;
+}
+
+/*
+ array = begin-array [ value *( value-separator value ) ] end-array
+*/
+Value Parser::parseArray()
+{
+ BEGIN << "parseArray";
+ ArrayObject *array = context->engine->newArrayObject(context);
+
+ if (++nestingLevel > nestingLimit) {
+ lastError = QJsonParseError::DeepNesting;
+ return Value::undefinedValue();
+ }
+
+ if (!eatSpace()) {
+ lastError = QJsonParseError::UnterminatedArray;
+ return Value::undefinedValue();
+ }
+ if (*json == EndArray) {
+ nextToken();
+ } else {
+ uint index = 0;
+ while (1) {
+ Value val;
+ if (!parseValue(&val))
+ return Value::undefinedValue();
+ array->arraySet(index, val);
+ QChar token = nextToken();
+ if (token == EndArray)
+ break;
+ else if (token != ValueSeparator) {
+ if (!eatSpace())
+ lastError = QJsonParseError::UnterminatedArray;
+ else
+ lastError = QJsonParseError::MissingValueSeparator;
+ return Value::undefinedValue();
+ }
+ ++index;
+ }
+ }
+
+ DEBUG << "size =" << array->arrayLength();
+ END;
+
+ --nestingLevel;
+ return Value::fromObject(array);
+}
+
+/*
+value = false / null / true / object / array / number / string
+
+*/
+
+bool Parser::parseValue(Value *val)
+{
+ BEGIN << "parse Value" << *json;
+
+ switch ((json++)->unicode()) {
+ case 'n':
+ if (end - json < 4) {
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ }
+ if (*json++ == 'u' &&
+ *json++ == 'l' &&
+ *json++ == 'l') {
+ *val = Value::nullValue();
+ DEBUG << "value: null";
+ END;
+ return true;
+ }
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ case 't':
+ if (end - json < 4) {
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ }
+ if (*json++ == 'r' &&
+ *json++ == 'u' &&
+ *json++ == 'e') {
+ *val = Value::fromBoolean(true);
+ DEBUG << "value: true";
+ END;
+ return true;
+ }
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ case 'f':
+ if (end - json < 5) {
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ }
+ if (*json++ == 'a' &&
+ *json++ == 'l' &&
+ *json++ == 's' &&
+ *json++ == 'e') {
+ *val = Value::fromBoolean(false);
+ DEBUG << "value: false";
+ END;
+ return true;
+ }
+ lastError = QJsonParseError::IllegalValue;
+ return false;
+ case Quote: {
+ QString value;
+ if (!parseString(&value))
+ return false;
+ DEBUG << "value: string";
+ END;
+ *val = Value::fromString(context, value);
+ return true;
+ }
+ case BeginArray: {
+ *val = parseArray();
+ if (val->isUndefined())
+ return false;
+ DEBUG << "value: array";
+ END;
+ return true;
+ }
+ case BeginObject: {
+ *val = parseObject();
+ if (val->isUndefined())
+ return false;
+ DEBUG << "value: object";
+ END;
+ return true;
+ }
+ case EndArray:
+ lastError = QJsonParseError::MissingObject;
+ return false;
+ default:
+ --json;
+ if (!parseNumber(val))
+ return false;
+ DEBUG << "value: number";
+ END;
+ }
+
+ return true;
+}
+
+
+
+
+
+/*
+ number = [ minus ] int [ frac ] [ exp ]
+ decimal-point = %x2E ; .
+ digit1-9 = %x31-39 ; 1-9
+ e = %x65 / %x45 ; e E
+ exp = e [ minus / plus ] 1*DIGIT
+ frac = decimal-point 1*DIGIT
+ int = zero / ( digit1-9 *DIGIT )
+ minus = %x2D ; -
+ plus = %x2B ; +
+ zero = %x30 ; 0
+
+*/
+
+bool Parser::parseNumber(Value *val)
+{
+ BEGIN << "parseNumber" << *json;
+
+ const QChar *start = json;
+ bool isInt = true;
+
+ // minus
+ if (json < end && *json == '-')
+ ++json;
+
+ // int = zero / ( digit1-9 *DIGIT )
+ if (json < end && *json == '0') {
+ ++json;
+ } else {
+ while (json < end && *json >= '0' && *json <= '9')
+ ++json;
+ }
+
+ // frac = decimal-point 1*DIGIT
+ if (json < end && *json == '.') {
+ isInt = false;
+ ++json;
+ while (json < end && *json >= '0' && *json <= '9')
+ ++json;
+ }
+
+ // exp = e [ minus / plus ] 1*DIGIT
+ if (json < end && (*json == 'e' || *json == 'E')) {
+ isInt = false;
+ ++json;
+ if (json < end && (*json == '-' || *json == '+'))
+ ++json;
+ while (json < end && *json >= '0' && *json <= '9')
+ ++json;
+ }
+
+ QString number(start, json - start);
+ DEBUG << "numberstring" << number;
+
+ if (isInt) {
+ bool ok;
+ int n = number.toInt(&ok);
+ if (ok && n < (1<<25) && n > -(1<<25)) {
+ *val = Value::fromInt32(n);
+ END;
+ return true;
+ }
+ }
+
+ bool ok;
+ double d;
+ d = number.toDouble(&ok);
+
+ if (!ok) {
+ lastError = QJsonParseError::IllegalNumber;
+ return false;
+ }
+
+ * val = Value::fromDouble(d);
+
+ END;
+ return true;
+}
+
+/*
+
+ string = quotation-mark *char quotation-mark
+
+ char = unescaped /
+ escape (
+ %x22 / ; " quotation mark U+0022
+ %x5C / ; \ reverse solidus U+005C
+ %x2F / ; / solidus U+002F
+ %x62 / ; b backspace U+0008
+ %x66 / ; f form feed U+000C
+ %x6E / ; n line feed U+000A
+ %x72 / ; r carriage return U+000D
+ %x74 / ; t tab U+0009
+ %x75 4HEXDIG ) ; uXXXX U+XXXX
+
+ escape = %x5C ; \
+
+ quotation-mark = %x22 ; "
+
+ unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
+ */
+static inline bool addHexDigit(QChar digit, uint *result)
+{
+ ushort d = digit.unicode();
+ *result <<= 4;
+ if (d >= '0' && d <= '9')
+ *result |= (d - '0');
+ else if (d >= 'a' && d <= 'f')
+ *result |= (d - 'a') + 10;
+ else if (d >= 'A' && d <= 'F')
+ *result |= (d - 'A') + 10;
+ else
+ return false;
+ return true;
+}
+
+static inline bool scanEscapeSequence(const QChar *&json, const QChar *end, uint *ch)
+{
+ ++json;
+ if (json >= end)
+ return false;
+
+ DEBUG << "scan escape";
+ uint escaped = (json++)->unicode();
+ switch (escaped) {
+ case '"':
+ *ch = '"'; break;
+ case '\\':
+ *ch = '\\'; break;
+ case '/':
+ *ch = '/'; break;
+ case 'b':
+ *ch = 0x8; break;
+ case 'f':
+ *ch = 0xc; break;
+ case 'n':
+ *ch = 0xa; break;
+ case 'r':
+ *ch = 0xd; break;
+ case 't':
+ *ch = 0x9; break;
+ case 'u': {
+ *ch = 0;
+ if (json > end - 4)
+ return false;
+ for (int i = 0; i < 4; ++i) {
+ if (!addHexDigit(*json, ch))
+ return false;
+ ++json;
+ }
+ if (*ch <= 0x1f)
+ return false;
+ return true;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+
+bool Parser::parseString(QString *string)
+{
+ BEGIN << "parse string stringPos=" << json;
+
+ while (json < end) {
+ if (*json == '"')
+ break;
+ else if (*json == '\\') {
+ uint ch = 0;
+ if (!scanEscapeSequence(json, end, &ch)) {
+ lastError = QJsonParseError::IllegalEscapeSequence;
+ return false;
+ }
+ qDebug() << "scanEscape" << hex << ch;
+ if (QChar::requiresSurrogates(ch)) {
+ *string += QChar::highSurrogate(ch);
+ *string += QChar::lowSurrogate(ch);
+ } else {
+ *string += QChar(ch);
+ }
+ } else {
+ if (json->unicode() <= 0x1f) {
+ lastError = QJsonParseError::IllegalEscapeSequence;
+ return false;
+ }
+ *string += *json;
+ ++json;
+ }
+ }
+ ++json;
+
+ if (json > end) {
+ lastError = QJsonParseError::UnterminatedString;
+ return false;
+ }
+
+ END;
+ return true;
+}
+
+
+struct Stringify
+{
+ ExecutionContext *ctx;
+ FunctionObject *replacerFunction;
+ QVector<String *> propertyList;
+ QString gap;
+ QString indent;
+
+ QStack<Object *> stack;
+
+ Stringify(ExecutionContext *ctx) : ctx(ctx), replacerFunction(0) {}
+
+ QString Str(const QString &key, Value value);
+ QString JA(ArrayObject *a);
+ QString JO(Object *o);
+
+ QString makeMember(const QString &key, Value v);
+};
+
+static QString quote(const QString &str)
+{
+ QString product = "\"";
+ for (int i = 0; i < str.length(); ++i) {
+ QChar c = str.at(i);
+ switch (c.unicode()) {
+ case '"':
+ product += "\\\"";
+ break;
+ case '\\':
+ product += "\\\\";
+ break;
+ case '\b':
+ product += "\\b";
+ break;
+ case '\f':
+ product += "\\f";
+ break;
+ case '\n':
+ product += "\\n";
+ break;
+ case '\r':
+ product += "\\r";
+ break;
+ case '\t':
+ product += "\\t";
+ break;
+ default:
+ if (c.unicode() <= 0x1f) {
+ product += "\\u00";
+ product += c.unicode() > 0xf ? '1' : '0';
+ product += "0123456789abcdef"[c.unicode() & 0xf];
+ } else {
+ product += c;
+ }
+ }
+ }
+ product += '"';
+ return product;
+}
+
+QString Stringify::Str(const QString &key, Value value)
+{
+ QString result;
+
+ if (Object *o = value.asObject()) {
+ FunctionObject *toJSON = o->get(ctx, ctx->engine->newString(QStringLiteral("toJSON"))).asFunctionObject();
+ if (toJSON) {
+ Value arg = Value::fromString(ctx, key);
+ value = toJSON->call(ctx, value, &arg, 1);
+ }
+ }
+
+ if (replacerFunction) {
+ Object *holder = ctx->engine->newObject();
+ Value holderValue = Value::fromObject(holder);
+ holder->put(ctx, QString(), value);
+ Value args[2];
+ args[0] = Value::fromString(ctx, key);
+ args[1] = value;
+ value = replacerFunction->call(ctx, holderValue, args, 2);
+ }
+
+ if (Object *o = value.asObject()) {
+ if (NumberObject *n = o->asNumberObject())
+ value = n->value;
+ else if (StringObject *so = o->asStringObject())
+ value = so->value;
+ else if (BooleanObject *b =o->asBooleanObject())
+ value = b->value;
+ }
+
+ if (value.isNull())
+ return QStringLiteral("null");
+ if (value.isBoolean())
+ return value.booleanValue() ? QStringLiteral("true") : QStringLiteral("false");
+ if (value.isString())
+ return quote(value.stringValue()->toQString());
+
+ if (value.isNumber()) {
+ double d = value.toNumber();
+ return std::isfinite(d) ? value.toString(ctx)->toQString() : QStringLiteral("null");
+ }
+
+ if (Object *o = value.asObject()) {
+ if (!o->asFunctionObject()) {
+ if (o->asArrayObject())
+ return JA(static_cast<ArrayObject *>(o));
+ else
+ return JO(o);
+ }
+ }
+
+ return QString();
+}
+
+QString Stringify::makeMember(const QString &key, Value v)
+{
+ QString strP = Str(key, v);
+ if (!strP.isEmpty()) {
+ QString member = quote(key) + ':';
+ if (!gap.isEmpty())
+ member += ' ';
+ member += strP;
+ return member;
+ }
+ return QString();
+}
+
+QString Stringify::JO(Object *o)
+{
+ if (stack.contains(o))
+ ctx->throwTypeError();
+
+ QString result;
+ stack.push(o);
+ QString stepback = indent;
+ indent += gap;
+
+ QStringList partial;
+ if (propertyList.isEmpty()) {
+ ObjectIterator it(ctx, o, ObjectIterator::EnumberableOnly);
+
+ while (1) {
+ String *name;
+ uint index;
+ PropertyAttributes attrs;
+ Property *pd = it.next(&name, &index, &attrs);
+ if (!pd)
+ break;
+ Value v = o->getValue(ctx, pd, attrs);
+ QString key;
+ if (name)
+ key = name->toQString();
+ else
+ key = QString::number(index);
+ QString member = makeMember(key, v);
+ if (!member.isEmpty())
+ partial += member;
+ }
+ } else {
+ for (int i = 0; i < propertyList.size(); ++i) {
+ bool exists;
+ Value v = o->get(ctx, propertyList.at(i), &exists);
+ if (!exists)
+ continue;
+ QString member = makeMember(propertyList.at(i)->toQString(), v);
+ if (!member.isEmpty())
+ partial += member;
+ }
+ }
+
+ if (partial.isEmpty()) {
+ result = QStringLiteral("{}");
+ } else if (gap.isEmpty()) {
+ result = "{" + partial.join(",") + "}";
+ } else {
+ QString separator = ",\n" + indent;
+ result = "{\n" + indent + partial.join(separator) + "\n" + stepback + "}";
+ }
+
+ indent = stepback;
+ stack.pop();
+ return result;
+}
+
+QString Stringify::JA(ArrayObject *a)
+{
+ if (stack.contains(a))
+ ctx->throwTypeError();
+
+ QString result;
+ stack.push(a);
+ QString stepback = indent;
+ indent += gap;
+
+ QStringList partial;
+ uint len = a->arrayLength();
+ for (uint i = 0; i < len; ++i) {
+ bool exists;
+ Value v = a->getIndexed(ctx, i, &exists);
+ if (!exists) {
+ partial += QStringLiteral("null");
+ continue;
+ }
+ QString strP = Str(QString::number(i), v);
+ if (!strP.isEmpty())
+ partial += strP;
+ else
+ partial += QStringLiteral("null");
+ }
+
+ if (partial.isEmpty()) {
+ result = QStringLiteral("[]");
+ } else if (gap.isEmpty()) {
+ result = "[" + partial.join(",") + "]";
+ } else {
+ QString separator = ",\n" + indent;
+ result = "[\n" + indent + partial.join(separator) + "\n" + stepback + "]";
+ }
+
+ indent = stepback;
+ stack.pop();
+ return result;
+}
+
+
+JsonObject::JsonObject(ExecutionContext *context)
+ : Object(context->engine)
+{
+ type = Type_JSONObject;
+ prototype = context->engine->objectPrototype;
+
+ defineDefaultProperty(context, QStringLiteral("parse"), method_parse, 2);
+ defineDefaultProperty(context, QStringLiteral("stringify"), method_stringify, 3);
+}
+
+
+Value JsonObject::method_parse(SimpleCallContext *ctx)
+{
+ QString jtext = ctx->argument(0).toString(ctx)->toQString();
+
+ DEBUG << "parsing source = " << jtext;
+ Parser parser(ctx, jtext.constData(), jtext.length());
+ QJsonParseError error;
+ Value result = parser.parse(&error);
+ if (error.error != QJsonParseError::NoError) {
+ DEBUG << "parse error" << error.errorString();
+ ctx->throwSyntaxError(0);
+ }
+
+ return result;
+}
+
+Value JsonObject::method_stringify(SimpleCallContext *ctx)
+{
+ Stringify stringify(ctx);
+
+ Object *o = ctx->argument(1).asObject();
+ if (o) {
+ stringify.replacerFunction = o->asFunctionObject();
+ if (o->isArrayObject()) {
+ uint arrayLen = o->arrayLength();
+ for (uint i = 0; i < arrayLen; ++i) {
+ Value v = o->getIndexed(ctx, i);
+ if (v.asNumberObject() || v.asStringObject() || v.isNumber())
+ v = __qmljs_to_string(v, ctx);
+ if (v.isString()) {
+ String *s = v.stringValue();
+ if (!stringify.propertyList.contains(s))
+ stringify.propertyList.append(s);
+ }
+ }
+ }
+ }
+
+ Value s = ctx->argument(2);
+ if (NumberObject *n = s.asNumberObject())
+ s = n->value;
+ else if (StringObject *so = s.asStringObject())
+ s = so->value;
+
+ if (s.isNumber()) {
+ stringify.gap = QString(qMin(10, (int)s.toInteger()), ' ');
+ } else if (s.isString()) {
+ stringify.gap = s.stringValue()->toQString().left(10);
+ }
+
+
+ QString result = stringify.Str(QString(), ctx->argument(0));
+ if (result.isEmpty())
+ return Value::undefinedValue();
+ return Value::fromString(ctx, result);
+}
+
+
+
+}
+}
diff --git a/src/qml/qml/v4vm/qv4jsonobject.h b/src/qml/qml/v4vm/qv4jsonobject.h
new file mode 100644
index 0000000000..dba4786c2b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4jsonobject.h
@@ -0,0 +1,65 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4JSONOBJECTS_H
+#define QV4SJONOBJECTS_H
+
+#include <qv4object.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct JsonObject : Object {
+ JsonObject(ExecutionContext *context);
+
+ static Value method_parse(SimpleCallContext *ctx);
+ static Value method_stringify(SimpleCallContext *ctx);
+
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
+
diff --git a/src/qml/qml/v4vm/qv4lookup.cpp b/src/qml/qml/v4vm/qv4lookup.cpp
new file mode 100644
index 0000000000..38a11a99de
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4lookup.cpp
@@ -0,0 +1,332 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include "qv4lookup.h"
+#include "qv4functionobject.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+void Lookup::lookupPropertyGeneric(QQmlJS::VM::Lookup *l, ExecutionContext *ctx, QQmlJS::VM::Value *result, const QQmlJS::VM::Value &object)
+{
+ if (Object *o = object.asObject()) {
+ PropertyAttributes attrs;
+ Property *p = l->lookup(o, &attrs);
+ if (p) {
+ if (attrs.isData()) {
+ if (l->level == 0)
+ l->lookupProperty = lookupProperty0;
+ else if (l->level == 1)
+ l->lookupProperty = lookupProperty1;
+ else if (l->level == 2)
+ l->lookupProperty = lookupProperty2;
+ if (result)
+ *result = p->value;
+ return;
+ } else {
+ if (l->level == 0)
+ l->lookupProperty = lookupPropertyAccessor0;
+ else if (l->level == 1)
+ l->lookupProperty = lookupPropertyAccessor1;
+ else if (l->level == 2)
+ l->lookupProperty = lookupPropertyAccessor2;
+ if (result)
+ *result = p->value;
+ Value res = o->getValue(ctx, p, attrs);
+ if (result)
+ *result = res;
+ return;
+ }
+ } else if (result) {
+ *result = Value::undefinedValue();
+ }
+ } else {
+ Value res;
+ if (Managed *m = object.asManaged()) {
+ res = m->get(ctx, l->name);
+ } else {
+ o = __qmljs_convert_to_object(ctx, object);
+ res = o->get(ctx, l->name);
+ }
+ if (result)
+ *result = res;
+ }
+}
+
+void Lookup::lookupProperty0(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass) {
+ if (result)
+ *result = o->memberData[l->index].value;
+ return;
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+void Lookup::lookupProperty1(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass &&
+ l->classList[1] == o->prototype->internalClass) {
+ if (result)
+ *result = o->prototype->memberData[l->index].value;
+ return;
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+void Lookup::lookupProperty2(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[1] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[2] == o->internalClass) {
+ if (result)
+ *result = o->memberData[l->index].value;
+ return;
+ }
+ }
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+void Lookup::lookupPropertyAccessor0(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass) {
+ Value res;
+ FunctionObject *getter = o->memberData[l->index].getter();
+ if (!getter)
+ res = Value::undefinedValue();
+ else
+ res = getter->call(ctx, object, 0, 0);
+ if (result)
+ *result = res;
+ return;
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+void Lookup::lookupPropertyAccessor1(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass &&
+ l->classList[1] == o->prototype->internalClass) {
+ Value res;
+ FunctionObject *getter = o->prototype->memberData[l->index].getter();
+ if (!getter)
+ res = Value::undefinedValue();
+ else
+ res = getter->call(ctx, object, 0, 0);
+ if (result)
+ *result = res;
+ return;
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+void Lookup::lookupPropertyAccessor2(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object)
+{
+ if (Object *o = object.asObject()) {
+ if (l->classList[0] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[1] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[2] == o->internalClass) {
+ Value res;
+ FunctionObject *getter = o->memberData[l->index].getter();
+ if (!getter)
+ res = Value::undefinedValue();
+ else
+ res = getter->call(ctx, object, 0, 0);
+ if (result)
+ *result = res;
+ return;
+ }
+ }
+ }
+ }
+ l->lookupProperty = lookupPropertyGeneric;
+ lookupPropertyGeneric(l, ctx, result, object);
+}
+
+
+void Lookup::lookupGlobalGeneric(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ PropertyAttributes attrs;
+ Property *p = l->lookup(o, &attrs);
+ if (p) {
+ if (attrs.isData()) {
+ if (l->level == 0)
+ l->lookupGlobal = lookupGlobal0;
+ else if (l->level == 1)
+ l->lookupGlobal = lookupGlobal1;
+ else if (l->level == 2)
+ l->lookupGlobal = lookupGlobal2;
+ *result = p->value;
+ return;
+ } else {
+ if (l->level == 0)
+ l->lookupGlobal = lookupGlobalAccessor0;
+ else if (l->level == 1)
+ l->lookupGlobal = lookupGlobalAccessor1;
+ else if (l->level == 2)
+ l->lookupGlobal = lookupGlobalAccessor2;
+ Value res = o->getValue(ctx, p, attrs);
+ if (result)
+ *result = res;
+ return;
+ }
+ }
+ ctx->throwReferenceError(Value::fromString(l->name));
+}
+
+void Lookup::lookupGlobal0(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass) {
+ *result = o->memberData[l->index].value;
+ return;
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+void Lookup::lookupGlobal1(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass &&
+ l->classList[1] == o->prototype->internalClass) {
+ *result = o->prototype->memberData[l->index].value;
+ return;
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+void Lookup::lookupGlobal2(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[1] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[2] == o->internalClass) {
+ *result = o->prototype->memberData[l->index].value;
+ return;
+ }
+ }
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+void Lookup::lookupGlobalAccessor0(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass) {
+ FunctionObject *getter = o->memberData[l->index].getter();
+ if (!getter)
+ *result = Value::undefinedValue();
+ else
+ *result = getter->call(ctx, Value::undefinedValue(), 0, 0);
+ return;
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+void Lookup::lookupGlobalAccessor1(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass &&
+ l->classList[1] == o->prototype->internalClass) {
+ FunctionObject *getter = o->prototype->memberData[l->index].getter();
+ if (!getter)
+ *result = Value::undefinedValue();
+ else
+ *result = getter->call(ctx, Value::undefinedValue(), 0, 0);
+ return;
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+void Lookup::lookupGlobalAccessor2(Lookup *l, ExecutionContext *ctx, Value *result)
+{
+ Object *o = ctx->engine->globalObject;
+ if (l->classList[0] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[1] == o->internalClass) {
+ o = o->prototype;
+ if (l->classList[2] == o->internalClass) {
+ FunctionObject *getter = o->memberData[l->index].getter();
+ if (!getter)
+ *result = Value::undefinedValue();
+ else
+ *result = getter->call(ctx, Value::undefinedValue(), 0, 0);
+ return;
+ }
+ }
+ }
+ l->lookupGlobal = lookupGlobalGeneric;
+ lookupGlobalGeneric(l, ctx, result);
+}
+
+}
+}
+
+QT_END_NAMESPACE
diff --git a/src/qml/qml/v4vm/qv4lookup.h b/src/qml/qml/v4vm/qv4lookup.h
new file mode 100644
index 0000000000..38546cd65b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4lookup.h
@@ -0,0 +1,144 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4LOOKUP_H
+#define QV4LOOKUP_H
+
+#include "qv4global.h"
+#include "qv4runtime.h"
+#include "qv4engine.h"
+#include "qv4context.h"
+#include "qv4object.h"
+#include "qv4internalclass.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Lookup {
+ enum { Size = 3 };
+ union {
+ void (*lookupProperty)(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ void (*lookupGlobal)(Lookup *l, ExecutionContext *ctx, Value *result);
+ };
+ InternalClass *classList[Size];
+ int level;
+ uint index;
+ String *name;
+
+ static void lookupPropertyGeneric(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupProperty0(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupProperty1(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupProperty2(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupPropertyAccessor0(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupPropertyAccessor1(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+ static void lookupPropertyAccessor2(Lookup *l, ExecutionContext *ctx, Value *result, const Value &object);
+
+ static void lookupGlobalGeneric(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobal0(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobal1(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobal2(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobalAccessor0(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobalAccessor1(Lookup *l, ExecutionContext *ctx, Value *result);
+ static void lookupGlobalAccessor2(Lookup *l, ExecutionContext *ctx, Value *result);
+
+ Property *lookup(Object *obj, PropertyAttributes *attrs) {
+ int i = 0;
+ while (i < level && obj && obj->internalClass == classList[i]) {
+ obj = obj->prototype;
+ ++i;
+ }
+
+ if (index != UINT_MAX && obj->internalClass == classList[i]) {
+ *attrs = obj->internalClass->propertyData.at(index);
+ return obj->memberData + index;
+ }
+
+ while (i < Size && obj) {
+ classList[i] = obj->internalClass;
+
+ index = obj->internalClass->find(name);
+ if (index != UINT_MAX) {
+ level = i;
+ *attrs = obj->internalClass->propertyData.at(index);
+ return obj->memberData + index;
+ }
+
+ obj = obj->prototype;
+ ++i;
+ }
+ level = i;
+
+ while (obj) {
+ index = obj->internalClass->find(name);
+ if (index != UINT_MAX) {
+ *attrs = obj->internalClass->propertyData.at(index);
+ return obj->memberData + index;
+ }
+
+ obj = obj->prototype;
+ }
+ return 0;
+ }
+
+ Property *setterLookup(Object *o, PropertyAttributes *attrs) {
+ if (o->internalClass == classList[0]) {
+ *attrs = o->internalClass->propertyData[index];
+ return o->memberData + index;
+ }
+
+ uint idx = o->internalClass->find(name);
+ if (idx != UINT_MAX) {
+ classList[0] = o->internalClass;
+ index = idx;
+ *attrs = o->internalClass->propertyData[index];
+ return o->memberData + index;
+ }
+ return 0;
+ }
+};
+
+}
+}
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4managed.cpp b/src/qml/qml/v4vm/qv4managed.cpp
new file mode 100644
index 0000000000..ab87b9dd9f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4managed.cpp
@@ -0,0 +1,187 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4managed.h"
+#include "qv4mm.h"
+#include "qv4errorobject.h"
+
+using namespace QQmlJS::VM;
+
+const ManagedVTable Managed::static_vtbl =
+{
+ call,
+ construct,
+ 0 /*markObjects*/,
+ destroy,
+ hasInstance,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ "Managed",
+};
+
+
+void *Managed::operator new(size_t size, MemoryManager *mm)
+{
+ assert(mm);
+
+ return mm->allocManaged(size);
+}
+
+void Managed::operator delete(void *ptr)
+{
+ if (!ptr)
+ return;
+
+ Managed *m = static_cast<Managed *>(ptr);
+ m->vtbl = 0;
+ m->_data = 0;
+ m->~Managed();
+}
+
+void Managed::operator delete(void *ptr, MemoryManager *mm)
+{
+ Q_UNUSED(mm);
+
+ operator delete(ptr);
+}
+
+QString Managed::className() const
+{
+ const char *s = 0;
+ switch (Type(type)) {
+ case Type_Invalid:
+ case Type_String:
+ return QString();
+ case Type_Object:
+ s = "Object";
+ break;
+ case Type_ArrayObject:
+ s = "Array";
+ break;
+ case Type_FunctionObject:
+ s = "Function";
+ break;
+ case Type_BooleanObject:
+ s = "Boolean";
+ break;
+ case Type_NumberObject:
+ s = "Number";
+ break;
+ case Type_StringObject:
+ s = "String";
+ break;
+ case Type_DateObject:
+ s = "Date";
+ break;
+ case Type_RegExpObject:
+ s = "RegExp";
+ break;
+ case Type_ErrorObject:
+ switch (ErrorObject::ErrorType(subtype)) {
+ case ErrorObject::Error:
+ s = "Error";
+ break;
+ case ErrorObject::EvalError:
+ s = "EvalError";
+ break;
+ case ErrorObject::RangeError:
+ s = "RangeError";
+ break;
+ case ErrorObject::ReferenceError:
+ s = "ReferenceError";
+ break;
+ case ErrorObject::SyntaxError:
+ s = "SyntaxError";
+ break;
+ case ErrorObject::TypeError:
+ s = "TypeError";
+ break;
+ case ErrorObject::URIError:
+ s = "URIError";
+ break;
+ }
+ break;
+ case Type_ArgumentsObject:
+ s = "Arguments";
+ break;
+ case Type_JSONObject:
+ s = "JSON";
+ break;
+ case Type_MathObject:
+ s = "Math";
+ break;
+ case Type_ForeachIteratorObject:
+ s = "__ForeachIterator";
+ break;
+ }
+ return QString::fromLatin1(s);
+}
+
+bool Managed::hasInstance(Managed *, ExecutionContext *ctx, const Value &)
+{
+ ctx->throwTypeError();
+}
+
+Value Managed::construct(Managed *, ExecutionContext *context, Value *, int)
+{
+ context->throwTypeError();
+}
+
+Value Managed::call(Managed *, ExecutionContext *context, const Value &, Value *, int)
+{
+ context->throwTypeError();
+}
+
+Value Managed::get(ExecutionContext *ctx, String *name, bool *hasProperty)
+{
+ return vtbl->get(this, ctx, name, hasProperty);
+}
+
+Value Managed::getIndexed(ExecutionContext *ctx, uint index, bool *hasProperty)
+{
+ return vtbl->getIndexed(this, ctx, index, hasProperty);
+}
diff --git a/src/qml/qml/v4vm/qv4managed.h b/src/qml/qml/v4vm/qv4managed.h
new file mode 100644
index 0000000000..844c88772b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4managed.h
@@ -0,0 +1,247 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_MANAGED_H
+#define QMLJS_MANAGED_H
+
+#include <QtCore/QString>
+#include <QtCore/QVector>
+#include <QtCore/QDebug>
+#include "qv4global.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+class MemoryManager;
+struct String;
+struct Object;
+struct ObjectPrototype;
+struct ExecutionContext;
+struct ScriptFunction;
+
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct ArrayObject;
+struct DateObject;
+struct FunctionObject;
+struct RegExpObject;
+struct ErrorObject;
+struct ArgumentsObject;
+struct JSONObject;
+struct ForeachIteratorObject;
+struct Managed;
+struct Value;
+struct RegExp;
+
+struct ManagedVTable
+{
+ Value (*call)(Managed *, ExecutionContext *context, const Value &thisObject, Value *args, int argc);
+ Value (*construct)(Managed *, ExecutionContext *context, Value *args, int argc);
+ void (*markObjects)(Managed *);
+ void (*destroy)(Managed *);
+ bool (*hasInstance)(Managed *, ExecutionContext *ctx, const Value &value);
+ Value (*get)(Managed *, ExecutionContext *ctx, String *name, bool *hasProperty);
+ Value (*getIndexed)(Managed *, ExecutionContext *ctx, uint index, bool *hasProperty);
+ void (*put)(Managed *, ExecutionContext *ctx, String *name, const Value &value);
+ void (*putIndexed)(Managed *, ExecutionContext *ctx, uint index, const Value &value);
+ PropertyAttributes (*query)(Managed *, ExecutionContext *ctx, String *name);
+ PropertyAttributes (*queryIndexed)(Managed *, ExecutionContext *ctx, uint index);
+ bool (*deleteProperty)(Managed *m, ExecutionContext *ctx, String *name);
+ bool (*deleteIndexedProperty)(Managed *m, ExecutionContext *ctx, uint index);
+ const char *className;
+};
+
+#define DEFINE_MANAGED_VTABLE(classname) \
+const ManagedVTable classname::static_vtbl = \
+{ \
+ call, \
+ construct, \
+ markObjects, \
+ destroy, \
+ hasInstance, \
+ get, \
+ getIndexed, \
+ put, \
+ putIndexed, \
+ query, \
+ queryIndexed, \
+ deleteProperty, \
+ deleteIndexedProperty, \
+ #classname \
+}
+
+
+struct Q_V4_EXPORT Managed
+{
+private:
+ void *operator new(size_t);
+ Managed(const Managed &other);
+ void operator = (const Managed &other);
+
+protected:
+ Managed()
+ : vtbl(&static_vtbl), _data(0)
+ { inUse = 1; extensible = 1; }
+
+public:
+ void *operator new(size_t size, MemoryManager *mm);
+ void operator delete(void *ptr);
+ void operator delete(void *ptr, MemoryManager *mm);
+
+ inline void mark() {
+ if (markBit)
+ return;
+ markBit = 1;
+ if (vtbl->markObjects)
+ vtbl->markObjects(this);
+ }
+
+ enum Type {
+ Type_Invalid,
+ Type_String,
+ Type_Object,
+ Type_ArrayObject,
+ Type_FunctionObject,
+ Type_BooleanObject,
+ Type_NumberObject,
+ Type_StringObject,
+ Type_DateObject,
+ Type_RegExpObject,
+ Type_ErrorObject,
+ Type_ArgumentsObject,
+ Type_JSONObject,
+ Type_MathObject,
+ Type_ForeachIteratorObject,
+ Type_RegExp
+ };
+
+ String *asString() { return reinterpret_cast<String *>(this); }
+ Object *asObject() { return reinterpret_cast<Object *>(this); }
+ ArrayObject *asArrayObject() { return type == Type_ArrayObject ? reinterpret_cast<ArrayObject *>(this) : 0; }
+ FunctionObject *asFunctionObject() { return type == Type_FunctionObject ? reinterpret_cast<FunctionObject *>(this) : 0; }
+ BooleanObject *asBooleanObject() { return type == Type_BooleanObject ? reinterpret_cast<BooleanObject *>(this) : 0; }
+ NumberObject *asNumberObject() { return type == Type_NumberObject ? reinterpret_cast<NumberObject *>(this) : 0; }
+ StringObject *asStringObject() { return type == Type_StringObject ? reinterpret_cast<StringObject *>(this) : 0; }
+ DateObject *asDateObject() { return type == Type_DateObject ? reinterpret_cast<DateObject *>(this) : 0; }
+ RegExpObject *asRegExpObject() { return type == Type_RegExpObject ? reinterpret_cast<RegExpObject *>(this) : 0; }
+ ErrorObject *asErrorObject() { return type == Type_ErrorObject ? reinterpret_cast<ErrorObject *>(this) : 0; }
+ ArgumentsObject *asArgumentsObject() { return type == Type_ArgumentsObject ? reinterpret_cast<ArgumentsObject *>(this) : 0; }
+ JSONObject *asJSONObject() { return type == Type_JSONObject ? reinterpret_cast<JSONObject *>(this) : 0; }
+ ForeachIteratorObject *asForeachIteratorObject() { return type == Type_ForeachIteratorObject ? reinterpret_cast<ForeachIteratorObject *>(this) : 0; }
+ RegExp *asRegExp() { return type == Type_RegExp ? reinterpret_cast<RegExp *>(this) : 0; }
+
+ bool isArrayObject() const { return type == Type_ArrayObject; }
+ bool isStringObject() const { return type == Type_StringObject; }
+
+ QString className() const;
+
+ Managed **nextFreeRef() {
+ return reinterpret_cast<Managed **>(this);
+ }
+ Managed *nextFree() {
+ return *reinterpret_cast<Managed **>(this);
+ }
+ void setNextFree(Managed *m) {
+ *reinterpret_cast<Managed **>(this) = m;
+ }
+
+ inline bool hasInstance(ExecutionContext *ctx, const Value &v) {
+ return vtbl->hasInstance(this, ctx, v);
+ }
+ Value construct(ExecutionContext *context, Value *args, int argc);
+ Value call(ExecutionContext *context, const Value &thisObject, Value *args, int argc);
+ Value get(ExecutionContext *ctx, String *name, bool *hasProperty = 0);
+ Value getIndexed(ExecutionContext *ctx, uint index, bool *hasProperty = 0);
+ void put(ExecutionContext *ctx, String *name, const Value &value)
+ { vtbl->put(this, ctx, name, value); }
+ void putIndexed(ExecutionContext *ctx, uint index, const Value &value)
+ { vtbl->putIndexed(this, ctx, index, value); }
+ bool deleteProperty(ExecutionContext *ctx, String *name)
+ { return vtbl->deleteProperty(this, ctx, name); }
+ bool deleteIndexedProperty(ExecutionContext *ctx, uint index)
+ { return vtbl->deleteIndexedProperty(this, ctx, index); }
+
+ static void destroy(Managed *that) { that->_data = 0; }
+ static bool hasInstance(Managed *that, ExecutionContext *ctx, const Value &value);
+ static Value construct(Managed *, ExecutionContext *context, Value *, int);
+ static Value call(Managed *, ExecutionContext *, const Value &, Value *, int);
+
+ uint internalType() const {
+ return type;
+ }
+
+protected:
+
+ static const ManagedVTable static_vtbl;
+
+ const ManagedVTable *vtbl;
+
+ union {
+ uint _data;
+ struct {
+ uint markBit : 1;
+ uint inUse : 1;
+ uint extensible : 1; // used by Object
+ uint isNonStrictArgumentsObject : 1;
+ uint isBuiltinFunction : 1; // used by FunctionObject
+ uint needsActivation : 1; // used by FunctionObject
+ uint usesArgumentsObject : 1; // used by FunctionObject
+ uint strictMode : 1; // used by FunctionObject
+ uint type : 5;
+ mutable uint subtype : 3;
+ uint externalComparison : 1;
+ uint unused : 15;
+ };
+ };
+
+private:
+ friend class MemoryManager;
+ friend struct Identifiers;
+};
+
+}
+}
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4math.h b/src/qml/qml/v4vm/qv4math.h
new file mode 100644
index 0000000000..0699c0c971
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4math.h
@@ -0,0 +1,120 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_MATH_H
+#define QMLJS_MATH_H
+
+#ifndef QMLJS_LLVM_RUNTIME
+# include <QtCore/qnumeric.h>
+#endif // QMLJS_LLVM_RUNTIME
+#include <cmath>
+
+#if !defined(QMLJS_LLVM_RUNTIME) && COMPILER(GCC) && (CPU(X86_64) || CPU(X86))
+#define QMLJS_INLINE_MATH
+#define QMLJS_READONLY __attribute((const))
+#endif
+
+#if defined(QMLJS_INLINE_MATH)
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+static inline QMLJS_READONLY Value add_int32(int a, int b)
+{
+ quint8 overflow = 0;
+ int aa = a;
+
+ asm ("addl %2, %1\n"
+ "seto %0"
+ : "=q" (overflow), "=r" (aa)
+ : "r" (b), "1" (aa)
+ : "cc"
+ );
+ if (!overflow)
+ return Value::fromInt32(aa);
+ return Value::fromDouble((double)a + (double)b);
+}
+
+static inline QMLJS_READONLY Value sub_int32(int a, int b)
+{
+ quint8 overflow = 0;
+ int aa = a;
+
+ asm ("subl %2, %1\n"
+ "seto %0"
+ : "=q" (overflow), "=r" (aa)
+ : "r" (b), "1" (aa)
+ : "cc"
+ );
+ if (!overflow)
+ return Value::fromInt32(aa);
+ return Value::fromDouble((double)a - (double)b);
+}
+
+static inline QMLJS_READONLY Value mul_int32(int a, int b)
+{
+ quint8 overflow = 0;
+ int aa = a;
+
+ asm ("imul %2, %1\n"
+ "setc %0"
+ : "=q" (overflow), "=r" (aa)
+ : "r" (b), "1" (aa)
+ : "cc"
+ );
+ if (!overflow)
+ return Value::fromInt32(aa);
+ return Value::fromDouble((double)a * (double)b);
+}
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // defined(QMLJS_INLINE_MATH)
+
+#ifdef QMLJS_READONLY
+#undef QMLJS_READONLY
+#endif
+
+#endif // QMLJS_MATH_H
diff --git a/src/qml/qml/v4vm/qv4mathobject.cpp b/src/qml/qml/v4vm/qv4mathobject.cpp
new file mode 100644
index 0000000000..d1017ebad6
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4mathobject.cpp
@@ -0,0 +1,311 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4mathobject.h"
+#include "qv4objectproto.h"
+
+#include <cmath>
+#include <qmath.h>
+#include <qnumeric.h>
+
+using namespace QQmlJS::VM;
+
+static const double qt_PI = 2.0 * ::asin(1.0);
+
+MathObject::MathObject(ExecutionContext *ctx)
+ : Object(ctx->engine)
+{
+ type = Type_MathObject;
+ prototype = ctx->engine->objectPrototype;
+
+ defineReadonlyProperty(ctx->engine, QStringLiteral("E"), Value::fromDouble(::exp(1.0)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("LN2"), Value::fromDouble(::log(2.0)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("LN10"), Value::fromDouble(::log(10.0)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("LOG2E"), Value::fromDouble(1.0/::log(2.0)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("LOG10E"), Value::fromDouble(1.0/::log(10.0)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("PI"), Value::fromDouble(qt_PI));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("SQRT1_2"), Value::fromDouble(::sqrt(0.5)));
+ defineReadonlyProperty(ctx->engine, QStringLiteral("SQRT2"), Value::fromDouble(::sqrt(2.0)));
+
+ defineDefaultProperty(ctx, QStringLiteral("abs"), method_abs, 1);
+ defineDefaultProperty(ctx, QStringLiteral("acos"), method_acos, 1);
+ defineDefaultProperty(ctx, QStringLiteral("asin"), method_asin, 0);
+ defineDefaultProperty(ctx, QStringLiteral("atan"), method_atan, 1);
+ defineDefaultProperty(ctx, QStringLiteral("atan2"), method_atan2, 2);
+ defineDefaultProperty(ctx, QStringLiteral("ceil"), method_ceil, 1);
+ defineDefaultProperty(ctx, QStringLiteral("cos"), method_cos, 1);
+ defineDefaultProperty(ctx, QStringLiteral("exp"), method_exp, 1);
+ defineDefaultProperty(ctx, QStringLiteral("floor"), method_floor, 1);
+ defineDefaultProperty(ctx, QStringLiteral("log"), method_log, 1);
+ defineDefaultProperty(ctx, QStringLiteral("max"), method_max, 2);
+ defineDefaultProperty(ctx, QStringLiteral("min"), method_min, 2);
+ defineDefaultProperty(ctx, QStringLiteral("pow"), method_pow, 2);
+ defineDefaultProperty(ctx, QStringLiteral("random"), method_random, 0);
+ defineDefaultProperty(ctx, QStringLiteral("round"), method_round, 1);
+ defineDefaultProperty(ctx, QStringLiteral("sin"), method_sin, 1);
+ defineDefaultProperty(ctx, QStringLiteral("sqrt"), method_sqrt, 1);
+ defineDefaultProperty(ctx, QStringLiteral("tan"), method_tan, 1);
+}
+
+/* copies the sign from y to x and returns the result */
+static double copySign(double x, double y)
+{
+ uchar *xch = (uchar *)&x;
+ uchar *ych = (uchar *)&y;
+ if (QSysInfo::ByteOrder == QSysInfo::BigEndian)
+ xch[0] = (xch[0] & 0x7f) | (ych[0] & 0x80);
+ else
+ xch[7] = (xch[7] & 0x7f) | (ych[7] & 0x80);
+ return x;
+}
+
+Value MathObject::method_abs(SimpleCallContext *context)
+{
+ if (!context->argumentCount)
+ return Value::fromDouble(qSNaN());
+
+ if (context->arguments[0].isInteger()) {
+ int i = context->arguments[0].integerValue();
+ return Value::fromInt32(i < 0 ? - i : i);
+ }
+
+ double v = context->arguments[0].toNumber();
+ if (v == 0) // 0 | -0
+ return Value::fromDouble(0);
+
+ return Value::fromDouble(v < 0 ? -v : v);
+}
+
+Value MathObject::method_acos(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : 2;
+ if (v > 1)
+ return Value::fromDouble(qSNaN());
+
+ return Value::fromDouble(::acos(v));
+}
+
+Value MathObject::method_asin(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : 2;
+ if (v > 1)
+ return Value::fromDouble(qSNaN());
+ else
+ return Value::fromDouble(::asin(v));
+}
+
+Value MathObject::method_atan(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ if (v == 0.0)
+ return Value::fromDouble(v);
+ else
+ return Value::fromDouble(::atan(v));
+}
+
+Value MathObject::method_atan2(SimpleCallContext *context)
+{
+ double v1 = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ double v2 = context->argumentCount > 1 ? context->arguments[1].toNumber() : qSNaN();
+
+ if ((v1 < 0) && qIsFinite(v1) && qIsInf(v2) && (copySign(1.0, v2) == 1.0))
+ return Value::fromDouble(copySign(0, -1.0));
+
+ if ((v1 == 0.0) && (v2 == 0.0)) {
+ if ((copySign(1.0, v1) == 1.0) && (copySign(1.0, v2) == -1.0)) {
+ return Value::fromDouble(qt_PI);
+ } else if ((copySign(1.0, v1) == -1.0) && (copySign(1.0, v2) == -1.0)) {
+ return Value::fromDouble(-qt_PI);
+ }
+ }
+ return Value::fromDouble(::atan2(v1, v2));
+}
+
+Value MathObject::method_ceil(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ if (v < 0.0 && v > -1.0)
+ return Value::fromDouble(copySign(0, -1.0));
+ else
+ return Value::fromDouble(::ceil(v));
+}
+
+Value MathObject::method_cos(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ return Value::fromDouble(::cos(v));
+}
+
+Value MathObject::method_exp(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ if (qIsInf(v)) {
+ if (copySign(1.0, v) == -1.0)
+ return Value::fromDouble(0);
+ else
+ return Value::fromDouble(qInf());
+ } else {
+ return Value::fromDouble(::exp(v));
+ }
+}
+
+Value MathObject::method_floor(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ return Value::fromDouble(::floor(v));
+}
+
+Value MathObject::method_log(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ if (v < 0)
+ return Value::fromDouble(qSNaN());
+ else
+ return Value::fromDouble(::log(v));
+}
+
+Value MathObject::method_max(SimpleCallContext *context)
+{
+ double mx = -qInf();
+ for (unsigned i = 0; i < context->argumentCount; ++i) {
+ double x = context->arguments[i].toNumber();
+ if (x > mx || isnan(x))
+ mx = x;
+ }
+ return Value::fromDouble(mx);
+}
+
+Value MathObject::method_min(SimpleCallContext *context)
+{
+ double mx = qInf();
+ for (unsigned i = 0; i < context->argumentCount; ++i) {
+ double x = context->arguments[i].toNumber();
+ if ((x == 0 && mx == x && copySign(1.0, x) == -1.0)
+ || (x < mx) || isnan(x)) {
+ mx = x;
+ }
+ }
+ return Value::fromDouble(mx);
+}
+
+Value MathObject::method_pow(SimpleCallContext *context)
+{
+ double x = context->argumentCount > 0 ? context->arguments[0].toNumber() : qSNaN();
+ double y = context->argumentCount > 1 ? context->arguments[1].toNumber() : qSNaN();
+
+ if (isnan(y))
+ return Value::fromDouble(qSNaN());
+
+ if (y == 0) {
+ return Value::fromDouble(1);
+ } else if (((x == 1) || (x == -1)) && isinf(y)) {
+ return Value::fromDouble(qSNaN());
+ } else if (((x == 0) && copySign(1.0, x) == 1.0) && (y < 0)) {
+ return Value::fromDouble(qInf());
+ } else if ((x == 0) && copySign(1.0, x) == -1.0) {
+ if (y < 0) {
+ if (::fmod(-y, 2.0) == 1.0)
+ return Value::fromDouble(-qInf());
+ else
+ return Value::fromDouble(qInf());
+ } else if (y > 0) {
+ if (::fmod(y, 2.0) == 1.0)
+ return Value::fromDouble(copySign(0, -1.0));
+ else
+ return Value::fromDouble(0);
+ }
+ }
+
+#ifdef Q_OS_AIX
+ else if (qIsInf(x) && copySign(1.0, x) == -1.0) {
+ if (y > 0) {
+ if (::fmod(y, 2.0) == 1.0)
+ return Value::fromDouble(-qInf());
+ else
+ return Value::fromDouble(qInf());
+ } else if (y < 0) {
+ if (::fmod(-y, 2.0) == 1.0)
+ return Value::fromDouble(copySign(0, -1.0));
+ else
+ return Value::fromDouble(0);
+ }
+ }
+#endif
+ else {
+ return Value::fromDouble(::pow(x, y));
+ }
+ // ###
+ return Value::fromDouble(qSNaN());
+}
+
+Value MathObject::method_random(SimpleCallContext *)
+{
+ return Value::fromDouble(qrand() / (double) RAND_MAX);
+}
+
+Value MathObject::method_round(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ v = copySign(::floor(v + 0.5), v);
+ return Value::fromDouble(v);
+}
+
+Value MathObject::method_sin(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ return Value::fromDouble(::sin(v));
+}
+
+Value MathObject::method_sqrt(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ return Value::fromDouble(::sqrt(v));
+}
+
+Value MathObject::method_tan(SimpleCallContext *context)
+{
+ double v = context->argumentCount ? context->arguments[0].toNumber() : qSNaN();
+ if (v == 0.0)
+ return Value::fromDouble(v);
+ else
+ return Value::fromDouble(::tan(v));
+}
+
diff --git a/src/qml/qml/v4vm/qv4mathobject.h b/src/qml/qml/v4vm/qv4mathobject.h
new file mode 100644
index 0000000000..68ca87d38b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4mathobject.h
@@ -0,0 +1,80 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4MATHOBJECT_H
+#define QV$MATHOBJECT_H
+
+#include "qv4object.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct MathObject: Object
+{
+ MathObject(ExecutionContext *ctx);
+
+ static Value method_abs(SimpleCallContext *context);
+ static Value method_acos(SimpleCallContext *context);
+ static Value method_asin(SimpleCallContext *context);
+ static Value method_atan(SimpleCallContext *context);
+ static Value method_atan2(SimpleCallContext *context);
+ static Value method_ceil(SimpleCallContext *context);
+ static Value method_cos(SimpleCallContext *context);
+ static Value method_exp(SimpleCallContext *context);
+ static Value method_floor(SimpleCallContext *context);
+ static Value method_log(SimpleCallContext *context);
+ static Value method_max(SimpleCallContext *context);
+ static Value method_min(SimpleCallContext *context);
+ static Value method_pow(SimpleCallContext *context);
+ static Value method_random(SimpleCallContext *context);
+ static Value method_round(SimpleCallContext *context);
+ static Value method_sin(SimpleCallContext *context);
+ static Value method_sqrt(SimpleCallContext *context);
+ static Value method_tan(SimpleCallContext *context);
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_OBJECTS_H
diff --git a/src/qml/qml/v4vm/qv4mm.cpp b/src/qml/qml/v4vm/qv4mm.cpp
new file mode 100644
index 0000000000..19e0e3fd68
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4mm.cpp
@@ -0,0 +1,493 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4engine.h"
+#include "qv4object.h"
+#include "qv4objectproto.h"
+#include "qv4mm.h"
+#include "PageAllocation.h"
+#include "StdLibExtras.h"
+
+#include <QTime>
+#include <QVector>
+#include <QVector>
+#include <QMap>
+
+#include <iostream>
+#include <cstdlib>
+#include "qv4alloca_p.h"
+
+#ifdef V4_USE_VALGRIND
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+#endif
+
+using namespace QQmlJS::VM;
+using namespace WTF;
+
+static const std::size_t CHUNK_SIZE = 1024*32;
+
+struct MemoryManager::Data
+{
+ bool enableGC;
+ bool gcBlocked;
+ bool scribble;
+ bool aggressiveGC;
+ ExecutionEngine *engine;
+ quintptr *stackTop;
+
+ enum { MaxItemSize = 256 };
+ Managed *smallItems[MaxItemSize/16];
+ uint nChunks[MaxItemSize/16];
+ uint availableItems[MaxItemSize/16];
+ uint allocCount[MaxItemSize/16];
+ struct Chunk {
+ PageAllocation memory;
+ int chunkSize;
+ };
+
+ QVector<Chunk> heapChunks;
+ QHash<Managed *, uint> protectedObject;
+
+ // statistics:
+#ifdef DETAILED_MM_STATS
+ QVector<unsigned> allocSizeCounters;
+#endif // DETAILED_MM_STATS
+
+ Data(bool enableGC)
+ : enableGC(enableGC)
+ , gcBlocked(false)
+ , engine(0)
+ , stackTop(0)
+ {
+ memset(smallItems, 0, sizeof(smallItems));
+ memset(nChunks, 0, sizeof(nChunks));
+ memset(availableItems, 0, sizeof(availableItems));
+ memset(allocCount, 0, sizeof(allocCount));
+ scribble = !qgetenv("MM_SCRIBBLE").isEmpty();
+ aggressiveGC = !qgetenv("MM_AGGRESSIVE_GC").isEmpty();
+ }
+
+ ~Data()
+ {
+ for (QVector<Chunk>::iterator i = heapChunks.begin(), ei = heapChunks.end(); i != ei; ++i)
+ i->memory.deallocate();
+ }
+};
+
+#define SCRIBBLE(obj, c, size) \
+ if (m_d->scribble) \
+ ::memset((void *)(obj + 1), c, size - sizeof(Managed));
+
+
+namespace QQmlJS { namespace VM {
+
+bool operator<(const MemoryManager::Data::Chunk &a, const MemoryManager::Data::Chunk &b)
+{
+ return a.memory.base() < b.memory.base();
+}
+
+} } // namespace QQmlJS::VM
+
+MemoryManager::MemoryManager()
+ : m_d(new Data(true))
+ , m_contextList(0)
+{
+ setEnableGC(true);
+#ifdef V4_USE_VALGRIND
+ VALGRIND_CREATE_MEMPOOL(this, 0, true);
+#endif
+
+#if USE(PTHREADS)
+# if OS(DARWIN)
+ void *st = pthread_get_stackaddr_np(pthread_self());
+ m_d->stackTop = static_cast<quintptr *>(st);
+# else
+ void* stackBottom = 0;
+ pthread_attr_t attr;
+ pthread_getattr_np(pthread_self(), &attr);
+ size_t stackSize = 0;
+ pthread_attr_getstack(&attr, &stackBottom, &stackSize);
+ pthread_attr_destroy(&attr);
+
+ m_d->stackTop = static_cast<quintptr *>(stackBottom) + stackSize/sizeof(quintptr);
+# endif
+#elif OS(WINDOWS)
+# if COMPILER(MSVC)
+ PNT_TIB tib = (PNT_TIB)NtCurrentTeb();
+ m_d->stackTop = static_cast<quintptr*>(tib->StackBase);
+# else
+# error "Unsupported compiler: no way to get the top-of-stack."
+# endif
+#else
+# error "Unsupported platform: no way to get the top-of-stack."
+#endif
+
+}
+
+Managed *MemoryManager::alloc(std::size_t size)
+{
+ if (m_d->aggressiveGC)
+ runGC();
+#ifdef DETAILED_MM_STATS
+ willAllocate(size);
+#endif // DETAILED_MM_STATS
+
+ assert(size >= 16);
+ assert(size % 16 == 0);
+
+ size_t pos = size >> 4;
+ ++m_d->allocCount[pos];
+
+ // fits into a small bucket
+ assert(size < MemoryManager::Data::MaxItemSize);
+
+ Managed *m = m_d->smallItems[pos];
+ if (m)
+ goto found;
+
+ // try to free up space, otherwise allocate
+ if (m_d->allocCount[pos] > (m_d->availableItems[pos] >> 1) && !m_d->aggressiveGC) {
+ runGC();
+ m = m_d->smallItems[pos];
+ if (m)
+ goto found;
+ }
+
+ // no free item available, allocate a new chunk
+ {
+ // allocate larger chunks at a time to avoid excessive GC, but cap at 64M chunks
+ uint shift = ++m_d->nChunks[pos];
+ if (shift > 10)
+ shift = 10;
+ std::size_t allocSize = CHUNK_SIZE*(1 << shift)*size;
+ allocSize = roundUpToMultipleOf(WTF::pageSize(), allocSize);
+ Data::Chunk allocation;
+ allocation.memory = PageAllocation::allocate(allocSize, OSAllocator::JSGCHeapPages);
+ allocation.chunkSize = size;
+ m_d->heapChunks.append(allocation);
+ qSort(m_d->heapChunks);
+ char *chunk = (char *)allocation.memory.base();
+ char *end = chunk + allocation.memory.size() - size;
+ memset(chunk, 0, allocation.memory.size());
+ Managed **last = &m_d->smallItems[pos];
+ while (chunk <= end) {
+ Managed *o = reinterpret_cast<Managed *>(chunk);
+ o->_data = 0;
+ *last = o;
+ last = o->nextFreeRef();
+ chunk += size;
+ }
+ *last = 0;
+ m = m_d->smallItems[pos];
+ m_d->availableItems[pos] += allocation.memory.size()/size - 1;
+#ifdef V4_USE_VALGRIND
+ VALGRIND_MAKE_MEM_NOACCESS(allocation.memory, allocation.chunkSize);
+#endif
+ }
+
+ found:
+#ifdef V4_USE_VALGRIND
+ VALGRIND_MEMPOOL_ALLOC(this, m, size);
+#endif
+
+ m_d->smallItems[pos] = m->nextFree();
+ return m;
+}
+
+void MemoryManager::mark()
+{
+ m_d->engine->markObjects();
+
+ for (QHash<Managed *, uint>::const_iterator it = m_d->protectedObject.begin(); it != m_d->protectedObject.constEnd(); ++it)
+ it.key()->mark();
+
+ // push all caller saved registers to the stack, so we can find the objects living in these registers
+#if COMPILER(MSVC)
+# if CPU(X86_64)
+ HANDLE thread = GetCurrentThread();
+ WOW64_CONTEXT ctxt;
+ /*bool success =*/ Wow64GetThreadContext(thread, &ctxt);
+# elif CPU(X86)
+ HANDLE thread = GetCurrentThread();
+ CONTEXT ctxt;
+ /*bool success =*/ GetThreadContext(thread, &ctxt);
+# endif // CPU
+#elif COMPILER(CLANG) || COMPILER(GCC)
+# if CPU(X86_64)
+ quintptr regs[5];
+ asm(
+ "mov %%rbp, %0\n"
+ "mov %%r12, %1\n"
+ "mov %%r13, %2\n"
+ "mov %%r14, %3\n"
+ "mov %%r15, %4\n"
+ : "=m" (regs[0]), "=m" (regs[1]), "=m" (regs[2]), "=m" (regs[3]), "=m" (regs[4])
+ :
+ :
+ );
+# endif // CPU
+#endif // COMPILER
+
+ collectFromStack();
+}
+
+std::size_t MemoryManager::sweep()
+{
+ std::size_t freedCount = 0;
+
+ for (QVector<Data::Chunk>::iterator i = m_d->heapChunks.begin(), ei = m_d->heapChunks.end(); i != ei; ++i)
+ freedCount += sweep(reinterpret_cast<char*>(i->memory.base()), i->memory.size(), i->chunkSize);
+
+ ExecutionContext *ctx = m_contextList;
+ ExecutionContext **n = &m_contextList;
+ while (ctx) {
+ ExecutionContext *next = ctx->next;
+ if (!ctx->marked) {
+ free(ctx);
+ *n = next;
+ } else {
+ ctx->marked = false;
+ n = &ctx->next;
+ }
+ ctx = next;
+ }
+
+ return freedCount;
+}
+
+std::size_t MemoryManager::sweep(char *chunkStart, std::size_t chunkSize, size_t size)
+{
+// qDebug("chunkStart @ %p, size=%x, pos=%x (%x)", chunkStart, size, size>>4, m_d->smallItems[size >> 4]);
+ std::size_t freedCount = 0;
+
+ Managed **f = &m_d->smallItems[size >> 4];
+
+#ifdef V4_USE_VALGRIND
+ VALGRIND_DISABLE_ERROR_REPORTING;
+#endif
+ for (char *chunk = chunkStart, *chunkEnd = chunk + chunkSize - size; chunk <= chunkEnd; chunk += size) {
+ Managed *m = reinterpret_cast<Managed *>(chunk);
+// qDebug("chunk @ %p, size = %lu, in use: %s, mark bit: %s",
+// chunk, m->size, (m->inUse ? "yes" : "no"), (m->markBit ? "true" : "false"));
+
+ assert((intptr_t) chunk % 16 == 0);
+
+ if (m->inUse) {
+ if (m->markBit) {
+ m->markBit = 0;
+ } else {
+// qDebug() << "-- collecting it." << m << *f << m->nextFree();
+#ifdef V4_USE_VALGRIND
+ VALGRIND_ENABLE_ERROR_REPORTING;
+#endif
+ m->vtbl->destroy(m);
+
+ m->setNextFree(*f);
+#ifdef V4_USE_VALGRIND
+ VALGRIND_DISABLE_ERROR_REPORTING;
+ VALGRIND_MEMPOOL_FREE(this, m);
+#endif
+ *f = m;
+ SCRIBBLE(m, 0x99, size);
+ ++freedCount;
+ }
+ }
+ }
+#ifdef V4_USE_VALGRIND
+ VALGRIND_ENABLE_ERROR_REPORTING;
+#endif
+
+ return freedCount;
+}
+
+bool MemoryManager::isGCBlocked() const
+{
+ return m_d->gcBlocked;
+}
+
+void MemoryManager::setGCBlocked(bool blockGC)
+{
+ m_d->gcBlocked = blockGC;
+}
+
+void MemoryManager::runGC()
+{
+ if (!m_d->enableGC || m_d->gcBlocked) {
+// qDebug() << "Not running GC.";
+ return;
+ }
+
+// QTime t; t.start();
+
+// qDebug() << ">>>>>>>>runGC";
+
+ mark();
+// std::cerr << "GC: marked " << marks
+// << " objects in " << t.elapsed()
+// << "ms" << std::endl;
+
+// t.restart();
+ /*std::size_t freedCount =*/ sweep();
+// std::cerr << "GC: sweep freed " << freedCount
+// << " objects in " << t.elapsed()
+// << "ms" << std::endl;
+ memset(m_d->allocCount, 0, sizeof(m_d->allocCount));
+}
+
+void MemoryManager::setEnableGC(bool enableGC)
+{
+ m_d->enableGC = enableGC;
+}
+
+MemoryManager::~MemoryManager()
+{
+ sweep();
+}
+
+void MemoryManager::protect(Managed *m)
+{
+ ++m_d->protectedObject[m];
+}
+
+void MemoryManager::unprotect(Managed *m)
+{
+ if (!--m_d->protectedObject[m])
+ m_d->protectedObject.remove(m);
+}
+
+static inline void add(QVector<Managed *> &values, const Value &v)
+{
+ if (Object *o = v.asObject())
+ values.append(o);
+}
+
+void MemoryManager::setExecutionEngine(ExecutionEngine *engine)
+{
+ m_d->engine = engine;
+}
+
+void MemoryManager::dumpStats() const
+{
+#ifdef DETAILED_MM_STATS
+ std::cerr << "=================" << std::endl;
+ std::cerr << "Allocation stats:" << std::endl;
+ std::cerr << "Requests for each chunk size:" << std::endl;
+ for (int i = 0; i < m_d->allocSizeCounters.size(); ++i) {
+ if (unsigned count = m_d->allocSizeCounters[i]) {
+ std::cerr << "\t" << (i << 4) << " bytes chunks: " << count << std::endl;
+ }
+ }
+#endif // DETAILED_MM_STATS
+}
+
+ExecutionEngine *MemoryManager::engine() const
+{
+ return m_d->engine;
+}
+
+#ifdef DETAILED_MM_STATS
+void MemoryManager::willAllocate(std::size_t size)
+{
+ unsigned alignedSize = (size + 15) >> 4;
+ QVector<unsigned> &counters = m_d->allocSizeCounters;
+ if ((unsigned) counters.size() < alignedSize + 1)
+ counters.resize(alignedSize + 1);
+ counters[alignedSize]++;
+}
+
+#endif // DETAILED_MM_STATS
+
+void MemoryManager::collectFromStack() const
+{
+ quintptr valueOnStack = 0;
+
+ if (!m_d->heapChunks.count())
+ return;
+
+ quintptr *current = (&valueOnStack) + 1;
+// qDebug() << "collectFromStack";// << top << current << &valueOnStack;
+
+#if V4_USE_VALGRIND
+ VALGRIND_MAKE_MEM_DEFINED(current, (m_d->stackTop - current)*sizeof(quintptr));
+#endif
+
+ char** heapChunkBoundaries = (char**)alloca(m_d->heapChunks.count() * 2 * sizeof(char*));
+ char** heapChunkBoundariesEnd = heapChunkBoundaries + 2 * m_d->heapChunks.count();
+ int i = 0;
+ for (QVector<Data::Chunk>::Iterator it = m_d->heapChunks.begin(), end =
+ m_d->heapChunks.end(); it != end; ++it) {
+ heapChunkBoundaries[i++] = reinterpret_cast<char*>(it->memory.base()) - 1;
+ heapChunkBoundaries[i++] = reinterpret_cast<char*>(it->memory.base()) + it->memory.size() - it->chunkSize;
+ }
+ assert(i == m_d->heapChunks.count() * 2);
+
+ for (; current < m_d->stackTop; ++current) {
+ char* genericPtr =
+#if QT_POINTER_SIZE == 8
+ reinterpret_cast<char *>((*current) & ~(quint64(Value::Type_Mask) << Value::Tag_Shift));
+#else
+ reinterpret_cast<char *>(*current);
+#endif
+
+ if (genericPtr < *heapChunkBoundaries || genericPtr > *(heapChunkBoundariesEnd - 1))
+ continue;
+ int index = qLowerBound(heapChunkBoundaries, heapChunkBoundariesEnd, genericPtr) - heapChunkBoundaries;
+ // An odd index means the pointer is _before_ the end of a heap chunk and therefore valid.
+ assert(index >= 0 && index < m_d->heapChunks.count() * 2);
+ if (index & 1) {
+ int size = m_d->heapChunks.at(index >> 1).chunkSize;
+ Managed *m = reinterpret_cast<Managed *>(genericPtr);
+// qDebug() << " inside" << size;
+
+ if (((quintptr)m - (quintptr)heapChunkBoundaries[index-1] - 1 ) % size)
+ // wrongly aligned value, skip it
+ continue;
+
+ if (!m->inUse)
+ // Skip pointers to already freed objects, they are bogus as well
+ continue;
+
+// qDebug() << " marking";
+ m->mark();
+ }
+ }
+}
diff --git a/src/qml/qml/v4vm/qv4mm.h b/src/qml/qml/v4vm/qv4mm.h
new file mode 100644
index 0000000000..a41df835c3
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4mm.h
@@ -0,0 +1,155 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4GC_H
+#define QV4GC_H
+
+#include "qv4global.h"
+#include "qv4context.h"
+
+#include <QScopedPointer>
+
+//#define DETAILED_MM_STATS
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct ExecutionEngine;
+struct ExecutionContext;
+struct Managed;
+
+class Q_V4_EXPORT MemoryManager
+{
+ MemoryManager(const MemoryManager &);
+ MemoryManager &operator=(const MemoryManager&);
+
+public:
+ struct Data;
+
+ class GCBlocker
+ {
+ public:
+ GCBlocker(MemoryManager *mm)
+ : mm(mm)
+ , wasBlocked(mm->isGCBlocked())
+ {
+ mm->setGCBlocked(true);
+ }
+
+ ~GCBlocker()
+ {
+ mm->setGCBlocked(wasBlocked);
+ }
+
+ private:
+ MemoryManager *mm;
+ bool wasBlocked;
+ };
+
+public:
+ MemoryManager();
+ ~MemoryManager();
+
+ void protect(Managed *m);
+ void unprotect(Managed *m);
+
+ // TODO: this is only for 64bit (and x86 with SSE/AVX), so exend it for other architectures to be slightly more efficient (meaning, align on 8-byte boundaries).
+ // Note: all occurances of "16" in alloc/dealloc are also due to the alignment.
+ static inline std::size_t align(std::size_t size)
+ { return (size + 15) & ~0xf; }
+
+ inline Managed *allocManaged(std::size_t size)
+ {
+ size = align(size);
+ Managed *o = alloc(size);
+ return o;
+ }
+
+ ExecutionContext *allocContext(uint size);
+
+ bool isGCBlocked() const;
+ void setGCBlocked(bool blockGC);
+ void runGC();
+
+ void setEnableGC(bool enableGC);
+ void setExecutionEngine(ExecutionEngine *engine);
+
+ void dumpStats() const;
+
+protected:
+ /// expects size to be aligned
+ // TODO: try to inline
+ Managed *alloc(std::size_t size);
+
+ ExecutionEngine *engine() const;
+
+#ifdef DETAILED_MM_STATS
+ void willAllocate(std::size_t size);
+#endif // DETAILED_MM_STATS
+
+private:
+ void collectFromStack() const;
+ void mark();
+ std::size_t sweep();
+ std::size_t sweep(char *chunkStart, std::size_t chunkSize, size_t size);
+
+protected:
+ QScopedPointer<Data> m_d;
+ ExecutionContext *m_contextList;
+};
+
+inline ExecutionContext *MemoryManager::allocContext(uint size)
+{
+ ExecutionContext *newContext = (ExecutionContext *)malloc(size);
+ newContext->next = m_contextList;
+ m_contextList = newContext;
+ return newContext;
+}
+
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4GC_H
diff --git a/src/qml/qml/v4vm/qv4numberobject.cpp b/src/qml/qml/v4vm/qv4numberobject.cpp
new file mode 100644
index 0000000000..f32c8b4f97
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4numberobject.cpp
@@ -0,0 +1,237 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4numberobject.h"
+#include <QtCore/qnumeric.h>
+#include <QtCore/qmath.h>
+#include <QtCore/QDebug>
+#include <cassert>
+
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(NumberCtor);
+
+NumberCtor::NumberCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value NumberCtor::construct(Managed *, ExecutionContext *ctx, Value *args, int argc)
+{
+ double d = argc ? args[0].toNumber() : 0.;
+ return Value::fromObject(ctx->engine->newNumberObject(Value::fromDouble(d)));
+}
+
+Value NumberCtor::call(Managed *m, ExecutionContext *parentCtx, const Value &thisObject, Value *argv, int argc)
+{
+ double d = argc ? argv[0].toNumber() : 0.;
+ return Value::fromDouble(d);
+}
+
+void NumberPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine, QStringLiteral("NaN"), Value::fromDouble(qSNaN()));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine, QStringLiteral("NEGATIVE_INFINITY"), Value::fromDouble(-qInf()));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine, QStringLiteral("POSITIVE_INFINITY"), Value::fromDouble(qInf()));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine, QStringLiteral("MAX_VALUE"), Value::fromDouble(1.7976931348623158e+308));
+
+#ifdef __INTEL_COMPILER
+# pragma warning( push )
+# pragma warning(disable: 239)
+#endif
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine, QStringLiteral("MIN_VALUE"), Value::fromDouble(5e-324));
+#ifdef __INTEL_COMPILER
+# pragma warning( pop )
+#endif
+
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleString"), method_toLocaleString);
+ defineDefaultProperty(ctx, QStringLiteral("valueOf"), method_valueOf);
+ defineDefaultProperty(ctx, QStringLiteral("toFixed"), method_toFixed, 1);
+ defineDefaultProperty(ctx, QStringLiteral("toExponential"), method_toExponential);
+ defineDefaultProperty(ctx, QStringLiteral("toPrecision"), method_toPrecision);
+}
+
+Value NumberPrototype::method_toString(SimpleCallContext *ctx)
+{
+ double num;
+ if (ctx->thisObject.isNumber()) {
+ num = ctx->thisObject.asDouble();
+ } else {
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+ num = thisObject->value.asDouble();
+ }
+
+ Value arg = ctx->argument(0);
+ if (!arg.isUndefined()) {
+ int radix = arg.toInt32();
+ if (radix < 2 || radix > 36) {
+ ctx->throwError(QString::fromLatin1("Number.prototype.toString: %0 is not a valid radix")
+ .arg(radix));
+ return Value::undefinedValue();
+ }
+
+ if (isnan(num)) {
+ return Value::fromString(ctx, QStringLiteral("NaN"));
+ } else if (qIsInf(num)) {
+ return Value::fromString(ctx, QLatin1String(num < 0 ? "-Infinity" : "Infinity"));
+ }
+
+ if (radix != 10) {
+ QString str;
+ bool negative = false;
+ if (num < 0) {
+ negative = true;
+ num = -num;
+ }
+ double frac = num - ::floor(num);
+ num = Value::toInteger(num);
+ do {
+ char c = (char)::fmod(num, radix);
+ c = (c < 10) ? (c + '0') : (c - 10 + 'a');
+ str.prepend(QLatin1Char(c));
+ num = ::floor(num / radix);
+ } while (num != 0);
+ if (frac != 0) {
+ str.append(QLatin1Char('.'));
+ do {
+ frac = frac * radix;
+ char c = (char)::floor(frac);
+ c = (c < 10) ? (c + '0') : (c - 10 + 'a');
+ str.append(QLatin1Char(c));
+ frac = frac - ::floor(frac);
+ } while (frac != 0);
+ }
+ if (negative)
+ str.prepend(QLatin1Char('-'));
+ return Value::fromString(ctx, str);
+ }
+ }
+
+ String *str = Value::fromDouble(num).toString(ctx);
+ return Value::fromString(str);
+}
+
+Value NumberPrototype::method_toLocaleString(SimpleCallContext *ctx)
+{
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ String *str = thisObject->value.toString(ctx);
+ return Value::fromString(str);
+}
+
+Value NumberPrototype::method_valueOf(SimpleCallContext *ctx)
+{
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ return thisObject->value;
+}
+
+Value NumberPrototype::method_toFixed(SimpleCallContext *ctx)
+{
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ double fdigits = 0;
+
+ if (ctx->argumentCount > 0)
+ fdigits = ctx->argument(0).toInteger();
+
+ if (isnan(fdigits))
+ fdigits = 0;
+
+ if (fdigits < 0 || fdigits > 20)
+ ctx->throwRangeError(ctx->thisObject);
+
+ double v = thisObject->value.asDouble();
+ QString str;
+ if (isnan(v))
+ str = QString::fromLatin1("NaN");
+ else if (qIsInf(v))
+ str = QString::fromLatin1(v < 0 ? "-Infinity" : "Infinity");
+ else if (v < 1.e21)
+ str = QString::number(v, 'f', int (fdigits));
+ else
+ return __qmljs_string_from_number(ctx, v);
+ return Value::fromString(ctx, str);
+}
+
+Value NumberPrototype::method_toExponential(SimpleCallContext *ctx)
+{
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ double fdigits = 0;
+
+ if (ctx->argumentCount > 0)
+ fdigits = ctx->argument(0).toInteger();
+
+ QString z = QString::number(thisObject->value.asDouble(), 'e', int (fdigits));
+ return Value::fromString(ctx, z);
+}
+
+Value NumberPrototype::method_toPrecision(SimpleCallContext *ctx)
+{
+ NumberObject *thisObject = ctx->thisObject.asNumberObject();
+ if (!thisObject)
+ ctx->throwTypeError();
+
+ double fdigits = 0;
+
+ if (ctx->argumentCount > 0)
+ fdigits = ctx->argument(0).toInteger();
+
+ return Value::fromString(ctx, QString::number(thisObject->value.asDouble(), 'g', int (fdigits)));
+}
diff --git a/src/qml/qml/v4vm/qv4numberobject.h b/src/qml/qml/v4vm/qv4numberobject.h
new file mode 100644
index 0000000000..d8be4790da
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4numberobject.h
@@ -0,0 +1,83 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4NUMBEROBJECT_H
+#define QV4NUMBEROBJECT_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct NumberCtor: FunctionObject
+{
+ NumberCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct NumberPrototype: NumberObject
+{
+ NumberPrototype(ExecutionEngine *engine): NumberObject(engine, Value::fromDouble(0)) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_toLocaleString(SimpleCallContext *ctx);
+ static Value method_valueOf(SimpleCallContext *ctx);
+ static Value method_toFixed(SimpleCallContext *ctx);
+ static Value method_toExponential(SimpleCallContext *ctx);
+ static Value method_toPrecision(SimpleCallContext *ctx);
+};
+
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4object.cpp b/src/qml/qml/v4vm/qv4object.cpp
new file mode 100644
index 0000000000..5091ceb095
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4object.cpp
@@ -0,0 +1,1177 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4object.h"
+#include "qv4jsir_p.h"
+#include "qv4isel_p.h"
+#include "qv4objectproto.h"
+#include "qv4stringobject.h"
+#include "qv4argumentsobject.h"
+#include "qv4mm.h"
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include "private/qlocale_tools_p.h"
+
+#include <QtCore/qmath.h>
+#include <QtCore/QDebug>
+#include <cassert>
+#include <typeinfo>
+#include <iostream>
+#include "qv4alloca_p.h"
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(Object);
+
+Object::Object(ExecutionEngine *engine)
+ : prototype(0)
+ , internalClass(engine->emptyClass)
+ , memberDataAlloc(InlinePropertySize), memberData(inlineProperties)
+ , arrayOffset(0), arrayDataLen(0), arrayAlloc(0), arrayAttributes(0), arrayData(0), sparseArray(0)
+ , externalResource(0)
+{
+ vtbl = &static_vtbl;
+ type = Type_Object;
+}
+
+Object::Object(ExecutionContext *context)
+ : prototype(0)
+ , internalClass(context->engine->emptyClass)
+ , memberDataAlloc(InlinePropertySize), memberData(inlineProperties)
+ , arrayOffset(0), arrayDataLen(0), arrayAlloc(0), arrayAttributes(0), arrayData(0), sparseArray(0)
+ , externalResource(0)
+{
+ vtbl = &static_vtbl;
+ type = Type_Object;
+}
+
+Object::~Object()
+{
+ delete externalResource;
+ if (memberData != inlineProperties)
+ delete [] memberData;
+ delete [] (arrayData - (sparseArray ? 0 : arrayOffset));
+ if (arrayAttributes)
+ delete [] (arrayAttributes - (sparseArray ? 0 : arrayOffset));
+ delete sparseArray;
+ _data = 0;
+}
+
+void Object::destroy(Managed *that)
+{
+ static_cast<Object *>(that)->~Object();
+}
+
+void Object::put(ExecutionContext *ctx, const QString &name, const Value &value)
+{
+ put(ctx, ctx->engine->newString(name), value);
+}
+
+Value Object::getValue(const Value &thisObject, ExecutionContext *ctx, const Property *p, PropertyAttributes attrs)
+{
+ if (!attrs.isAccessor())
+ return p->value;
+ FunctionObject *getter = p->getter();
+ if (!getter)
+ return Value::undefinedValue();
+
+ return getter->call(ctx, thisObject, 0, 0);
+}
+
+void Object::putValue(ExecutionContext *ctx, Property *pd, PropertyAttributes attrs, const Value &value)
+{
+ if (attrs.isAccessor()) {
+ if (pd->set) {
+ Value args[1];
+ args[0] = value;
+ pd->set->call(ctx, Value::fromObject(this), args, 1);
+ return;
+ }
+ goto reject;
+ }
+
+ if (!attrs.isWritable())
+ goto reject;
+
+ pd->value = value;
+ return;
+
+ reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+
+}
+
+void Object::inplaceBinOp(ExecutionContext *ctx, BinOp op, String *name, const Value &rhs)
+{
+ Value v = get(ctx, name);
+ Value result;
+ op(ctx, &result, v, rhs);
+ put(ctx, name, result);
+}
+
+void Object::inplaceBinOp(ExecutionContext *ctx, BinOp op, const Value &index, const Value &rhs)
+{
+ uint idx = index.asArrayIndex();
+ if (idx < UINT_MAX) {
+ bool hasProperty = false;
+ Value v = getIndexed(ctx, idx, &hasProperty);
+ Value result;
+ op(ctx, &result, v, rhs);
+ putIndexed(ctx, idx, result);
+ return;
+ }
+ String *name = index.toString(ctx);
+ assert(name);
+ inplaceBinOp(ctx, op, name, rhs);
+}
+
+void Object::defineDefaultProperty(String *name, Value value)
+{
+ Property *pd = insertMember(name, Attr_Data|Attr_NotEnumerable);
+ pd->value = value;
+}
+
+void Object::defineDefaultProperty(ExecutionContext *context, const QString &name, Value value)
+{
+ defineDefaultProperty(context->engine->newIdentifier(name), value);
+}
+
+void Object::defineDefaultProperty(ExecutionContext *context, const QString &name, Value (*code)(SimpleCallContext *), int argumentCount)
+{
+ Q_UNUSED(argumentCount);
+ String *s = context->engine->newIdentifier(name);
+ FunctionObject* function = context->engine->newBuiltinFunction(context, s, code);
+ function->defineReadonlyProperty(context->engine->id_length, Value::fromInt32(argumentCount));
+ defineDefaultProperty(s, Value::fromObject(function));
+}
+
+void Object::defineReadonlyProperty(ExecutionEngine *engine, const QString &name, Value value)
+{
+ defineReadonlyProperty(engine->newIdentifier(name), value);
+}
+
+void Object::defineReadonlyProperty(String *name, Value value)
+{
+ Property *pd = insertMember(name, Attr_ReadOnly);
+ pd->value = value;
+}
+
+void Object::markObjects(Managed *that)
+{
+ Object *o = static_cast<Object *>(that);
+ if (o->prototype)
+ o->prototype->mark();
+
+ for (int i = 0; i < o->internalClass->size; ++i) {
+ const Property &pd = o->memberData[i];
+ if (o->internalClass->propertyData[i].isData()) {
+ if (Managed *m = pd.value.asManaged())
+ m->mark();
+ } else {
+ if (pd.getter())
+ pd.getter()->mark();
+ if (pd.setter())
+ pd.setter()->mark();
+ }
+ }
+ o->markArrayObjects();
+}
+
+Property *Object::insertMember(String *s, PropertyAttributes attributes)
+{
+ uint idx;
+ internalClass = internalClass->addMember(s, attributes, &idx);
+
+ if (idx >= memberDataAlloc) {
+ memberDataAlloc = qMax((uint)8, 2*memberDataAlloc);
+ Property *newMemberData = new Property[memberDataAlloc];
+ memcpy(newMemberData, memberData, sizeof(Property)*idx);
+ if (memberData != inlineProperties)
+ delete [] memberData;
+ memberData = newMemberData;
+ }
+ return memberData + idx;
+}
+
+// Section 8.12.1
+Property *Object::__getOwnProperty__(String *name, PropertyAttributes *attrs)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return __getOwnProperty__(idx, attrs);
+
+ uint member = internalClass->find(name);
+ if (member < UINT_MAX) {
+ if (attrs)
+ *attrs = internalClass->propertyData[member];
+ return memberData + member;
+ }
+
+ if (attrs)
+ *attrs = Attr_Invalid;
+ return 0;
+}
+
+Property *Object::__getOwnProperty__(uint index, PropertyAttributes *attrs)
+{
+ uint pidx = propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX) {
+ Property *p = arrayData + pidx;
+ if (!arrayAttributes || arrayAttributes[pidx].isData()) {
+ if (attrs)
+ *attrs = arrayAttributes ? arrayAttributes[pidx] : PropertyAttributes(Attr_Data);
+ return p;
+ } else if (arrayAttributes[pidx].isAccessor()) {
+ if (attrs)
+ *attrs = arrayAttributes ? arrayAttributes[pidx] : PropertyAttributes(Attr_Accessor);
+ return p;
+ }
+ }
+ if (isStringObject()) {
+ if (attrs)
+ *attrs = Attr_NotConfigurable|Attr_NotWritable;
+ return static_cast<StringObject *>(this)->getIndex(index);
+ }
+
+ if (attrs)
+ *attrs = Attr_Invalid;
+ return 0;
+}
+
+// Section 8.12.2
+Property *Object::__getPropertyDescriptor__(String *name, PropertyAttributes *attrs) const
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return __getPropertyDescriptor__(idx);
+
+
+ const Object *o = this;
+ while (o) {
+ uint idx = o->internalClass->find(name);
+ if (idx < UINT_MAX) {
+ if (attrs)
+ *attrs = o->internalClass->propertyData[idx];
+ return o->memberData + idx;
+ }
+
+ o = o->prototype;
+ }
+ if (attrs)
+ *attrs = Attr_Invalid;
+ return 0;
+}
+
+Property *Object::__getPropertyDescriptor__(uint index, PropertyAttributes *attrs) const
+{
+ const Object *o = this;
+ while (o) {
+ uint pidx = o->propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX) {
+ Property *p = o->arrayData + pidx;
+ if (!o->arrayAttributes || !o->arrayAttributes[pidx].isGeneric()) {
+ if (attrs)
+ *attrs = o->arrayAttributes ? o->arrayAttributes[pidx] : PropertyAttributes(Attr_Data);
+ return p;
+ }
+ }
+ if (o->isStringObject()) {
+ Property *p = static_cast<const StringObject *>(o)->getIndex(index);
+ if (p) {
+ if (attrs)
+ *attrs = (Attr_NotWritable|Attr_NotConfigurable);
+ return p;
+ }
+ }
+ o = o->prototype;
+ }
+ if (attrs)
+ *attrs = Attr_Invalid;
+ return 0;
+}
+
+Value Object::get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty)
+{
+ return static_cast<Object *>(m)->internalGet(ctx, name, hasProperty);
+}
+
+Value Object::getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty)
+{
+ return static_cast<Object *>(m)->internalGetIndexed(ctx, index, hasProperty);
+}
+
+void Object::put(Managed *m, ExecutionContext *ctx, String *name, const Value &value)
+{
+ static_cast<Object *>(m)->internalPut(ctx, name, value);
+}
+
+void Object::putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value)
+{
+ static_cast<Object *>(m)->internalPutIndexed(ctx, index, value);
+}
+
+PropertyAttributes Object::query(Managed *m, ExecutionContext *ctx, String *name)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return queryIndexed(m, ctx, idx);
+
+ const Object *o = static_cast<Object *>(m);
+ while (o) {
+ uint idx = o->internalClass->find(name);
+ if (idx < UINT_MAX)
+ return o->internalClass->propertyData[idx];
+
+ o = o->prototype;
+ }
+ return Attr_Invalid;
+}
+
+PropertyAttributes Object::queryIndexed(Managed *m, ExecutionContext *ctx, uint index)
+{
+ const Object *o = static_cast<Object *>(m);
+ while (o) {
+ uint pidx = o->propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX) {
+ if (o->arrayAttributes)
+ return o->arrayAttributes[pidx];
+ return Attr_Data;
+ }
+ if (o->isStringObject()) {
+ Property *p = static_cast<const StringObject *>(o)->getIndex(index);
+ if (p)
+ return Attr_Data;
+ }
+ o = o->prototype;
+ }
+ return Attr_Invalid;
+}
+
+bool Object::deleteProperty(Managed *m, ExecutionContext *ctx, String *name)
+{
+ return static_cast<Object *>(m)->internalDeleteProperty(ctx, name);
+}
+
+bool Object::deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index)
+{
+ return static_cast<Object *>(m)->internalDeleteIndexedProperty(ctx, index);
+}
+
+
+// Section 8.12.3
+Value Object::internalGet(ExecutionContext *ctx, String *name, bool *hasProperty)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return getIndexed(ctx, idx, hasProperty);
+
+ name->makeIdentifier(ctx);
+
+ if (name->isEqualTo(ctx->engine->id___proto__)) {
+ if (hasProperty)
+ *hasProperty = true;
+ return Value::fromObject(prototype);
+ }
+
+ Object *o = this;
+ while (o) {
+ uint idx = o->internalClass->find(name);
+ if (idx < UINT_MAX) {
+ if (hasProperty)
+ *hasProperty = true;
+ return getValue(ctx, o->memberData + idx, o->internalClass->propertyData.at(idx));
+ }
+
+ o = o->prototype;
+ }
+
+ if (hasProperty)
+ *hasProperty = false;
+ return Value::undefinedValue();
+}
+
+Value Object::internalGetIndexed(ExecutionContext *ctx, uint index, bool *hasProperty)
+{
+ Property *pd = 0;
+ PropertyAttributes attrs = Attr_Data;
+ Object *o = this;
+ while (o) {
+ uint pidx = o->propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX) {
+ if (!o->arrayAttributes || !o->arrayAttributes[pidx].isGeneric()) {
+ pd = o->arrayData + pidx;
+ if (o->arrayAttributes)
+ attrs = o->arrayAttributes[pidx];
+ break;
+ }
+ }
+ if (o->isStringObject()) {
+ pd = static_cast<StringObject *>(o)->getIndex(index);
+ if (pd) {
+ attrs = (Attr_NotWritable|Attr_NotConfigurable);
+ break;
+ }
+ }
+ o = o->prototype;
+ }
+
+ if (pd) {
+ if (hasProperty)
+ *hasProperty = true;
+ return getValue(ctx, pd, attrs);
+ }
+
+ if (hasProperty)
+ *hasProperty = false;
+ return Value::undefinedValue();
+}
+
+
+// Section 8.12.5
+void Object::internalPut(ExecutionContext *ctx, String *name, const Value &value)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return putIndexed(ctx, idx, value);
+
+ name->makeIdentifier(ctx);
+
+ uint member = internalClass->find(name);
+ Property *pd = 0;
+ PropertyAttributes attrs;
+ if (member < UINT_MAX) {
+ pd = memberData + member;
+ attrs = internalClass->propertyData[member];
+ }
+
+ // clause 1
+ if (pd) {
+ if (attrs.isAccessor()) {
+ if (pd->setter())
+ goto cont;
+ goto reject;
+ } else if (!attrs.isWritable())
+ goto reject;
+ else if (isArrayObject() && name->isEqualTo(ctx->engine->id_length)) {
+ bool ok;
+ uint l = value.asArrayLength(&ok);
+ if (!ok)
+ ctx->throwRangeError(value);
+ ok = setArrayLength(l);
+ if (!ok)
+ goto reject;
+ } else {
+ pd->value = value;
+ }
+ return;
+ } else if (!prototype) {
+ if (!extensible)
+ goto reject;
+ } else {
+ // clause 4
+ if ((pd = prototype->__getPropertyDescriptor__(name, &attrs))) {
+ if (attrs.isAccessor()) {
+ if (!pd->setter())
+ goto reject;
+ } else if (!extensible || !attrs.isWritable()) {
+ goto reject;
+ }
+ } else if (!extensible) {
+ goto reject;
+ }
+ }
+
+ cont:
+
+ // Clause 5
+ if (pd && attrs.isAccessor()) {
+ assert(pd->setter() != 0);
+
+ Value args[1];
+ args[0] = value;
+ pd->setter()->call(ctx, Value::fromObject(this), args, 1);
+ return;
+ }
+
+ {
+ Property *p = insertMember(name, Attr_Data);
+ p->value = value;
+ return;
+ }
+
+ reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+}
+
+void Object::internalPutIndexed(ExecutionContext *ctx, uint index, const Value &value)
+{
+ Property *pd = 0;
+ PropertyAttributes attrs;
+
+ uint pidx = propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX) {
+ if (arrayAttributes && arrayAttributes[pidx].isGeneric()) {
+ pidx = UINT_MAX;
+ } else {
+ pd = arrayData + pidx;
+ attrs = arrayAttributes ? arrayAttributes[pidx] : PropertyAttributes(Attr_Data);
+ }
+ }
+
+ if (!pd && isStringObject()) {
+ pd = static_cast<StringObject *>(this)->getIndex(index);
+ if (pd)
+ // not writable
+ goto reject;
+ }
+
+ // clause 1
+ if (pd) {
+ if (attrs.isAccessor()) {
+ if (pd->setter())
+ goto cont;
+ goto reject;
+ } else if (!attrs.isWritable())
+ goto reject;
+ else
+ pd->value = value;
+ return;
+ } else if (!prototype) {
+ if (!extensible)
+ goto reject;
+ } else {
+ // clause 4
+ if ((pd = prototype->__getPropertyDescriptor__(index, &attrs))) {
+ if (attrs.isAccessor()) {
+ if (!pd->setter())
+ goto reject;
+ } else if (!extensible || !attrs.isWritable()) {
+ goto reject;
+ }
+ } else if (!extensible) {
+ goto reject;
+ }
+ }
+
+ cont:
+
+ // Clause 5
+ if (pd && attrs.isAccessor()) {
+ assert(pd->setter() != 0);
+
+ Value args[1];
+ args[0] = value;
+ pd->setter()->call(ctx, Value::fromObject(this), args, 1);
+ return;
+ }
+
+ arraySet(index, value);
+ return;
+
+ reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+}
+
+// Section 8.12.7
+bool Object::internalDeleteProperty(ExecutionContext *ctx, String *name)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return deleteIndexedProperty(ctx, idx);
+
+ name->makeIdentifier(ctx);
+
+ uint memberIdx = internalClass->find(name);
+ if (memberIdx != UINT_MAX) {
+ if (internalClass->propertyData[memberIdx].isConfigurable()) {
+ internalClass->removeMember(this, name->identifier);
+ memmove(memberData + memberIdx, memberData + memberIdx + 1, (internalClass->size - memberIdx)*sizeof(Property));
+ return true;
+ }
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return false;
+ }
+
+ return true;
+}
+
+bool Object::internalDeleteIndexedProperty(ExecutionContext *ctx, uint index)
+{
+ uint pidx = propertyIndexFromArrayIndex(index);
+ if (pidx == UINT_MAX)
+ return true;
+ if (arrayAttributes && arrayAttributes[pidx].isGeneric())
+ return true;
+
+ if (!arrayAttributes || arrayAttributes[pidx].isConfigurable()) {
+ arrayData[pidx].value = Value::undefinedValue();
+ if (!arrayAttributes)
+ ensureArrayAttributes();
+ arrayAttributes[pidx].clear();
+ if (sparseArray) {
+ arrayData[pidx].value.int_32 = arrayFreeList;
+ arrayFreeList = pidx;
+ }
+ return true;
+ }
+
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return false;
+}
+
+// Section 8.12.9
+bool Object::__defineOwnProperty__(ExecutionContext *ctx, String *name, const Property &p, PropertyAttributes attrs)
+{
+ uint idx = name->asArrayIndex();
+ if (idx != UINT_MAX)
+ return __defineOwnProperty__(ctx, idx, p, attrs);
+
+ name->makeIdentifier(ctx);
+
+ Property *current;
+ PropertyAttributes *cattrs;
+
+ if (isArrayObject() && name->isEqualTo(ctx->engine->id_length)) {
+ assert(ArrayObject::LengthPropertyIndex == internalClass->find(ctx->engine->id_length));
+ Property *lp = memberData + ArrayObject::LengthPropertyIndex;
+ cattrs = internalClass->propertyData.data() + ArrayObject::LengthPropertyIndex;
+ if (attrs.isEmpty() || p.isSubset(attrs, *lp, *cattrs))
+ return true;
+ if (!cattrs->isWritable() || attrs.type() == PropertyAttributes::Accessor || attrs.isConfigurable() || attrs.isEnumerable())
+ goto reject;
+ bool succeeded = true;
+ if (attrs.type() == PropertyAttributes::Data) {
+ bool ok;
+ uint l = p.value.asArrayLength(&ok);
+ if (!ok)
+ ctx->throwRangeError(p.value);
+ succeeded = setArrayLength(l);
+ }
+ if (attrs.hasWritable() && !attrs.isWritable())
+ cattrs->setWritable(false);
+ if (!succeeded)
+ goto reject;
+ return true;
+ }
+
+ // Clause 1
+ {
+ uint member = internalClass->find(name);
+ current = (member < UINT_MAX) ? memberData + member : 0;
+ cattrs = internalClass->propertyData.data() + member;
+ }
+
+ if (!current) {
+ // clause 3
+ if (!extensible)
+ goto reject;
+ // clause 4
+ Property *pd = insertMember(name, attrs);
+ *pd = p;
+ pd->fullyPopulated(&attrs);
+ return true;
+ }
+
+ return __defineOwnProperty__(ctx, current, name, p, attrs);
+reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return false;
+}
+
+bool Object::__defineOwnProperty__(ExecutionContext *ctx, uint index, const Property &p, PropertyAttributes attrs)
+{
+ Property *current = 0;
+
+ // 15.4.5.1, 4b
+ if (isArrayObject() && index >= arrayLength() && !internalClass->propertyData[ArrayObject::LengthPropertyIndex].isWritable())
+ goto reject;
+
+ if (isNonStrictArgumentsObject)
+ return static_cast<ArgumentsObject *>(this)->defineOwnProperty(ctx, index, p, attrs);
+
+ // Clause 1
+ {
+ uint pidx = propertyIndexFromArrayIndex(index);
+ if (pidx < UINT_MAX && (!arrayAttributes || !arrayAttributes[pidx].isGeneric()))
+ current = arrayData + pidx;
+ if (!current && isStringObject())
+ current = static_cast<StringObject *>(this)->getIndex(index);
+ }
+
+ if (!current) {
+ // clause 3
+ if (!extensible)
+ goto reject;
+ // clause 4
+ Property *pd = arrayInsert(index, attrs);
+ *pd = p;
+ pd->fullyPopulated(&attrs);
+ return true;
+ }
+
+ return __defineOwnProperty__(ctx, current, 0 /*member*/, p, attrs);
+reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return false;
+}
+
+bool Object::__defineOwnProperty__(ExecutionContext *ctx, Property *current, String *member, const Property &p, PropertyAttributes attrs)
+{
+ // clause 5
+ if (attrs.isEmpty())
+ return true;
+
+ PropertyAttributes cattrs = Attr_Data;
+ if (member)
+ cattrs = internalClass->propertyData[current - memberData];
+ else if (arrayAttributes)
+ cattrs = arrayAttributes[current - arrayData];
+
+ // clause 6
+ if (p.isSubset(attrs, *current, cattrs))
+ return true;
+
+ // clause 7
+ if (!cattrs.isConfigurable()) {
+ if (attrs.isConfigurable())
+ goto reject;
+ if (attrs.hasEnumerable() && attrs.isEnumerable() != cattrs.isEnumerable())
+ goto reject;
+ }
+
+ // clause 8
+ if (attrs.isGeneric())
+ goto accept;
+
+ // clause 9
+ if (cattrs.isData() != attrs.isData()) {
+ // 9a
+ if (!cattrs.isConfigurable())
+ goto reject;
+ if (cattrs.isData()) {
+ // 9b
+ cattrs.setType(PropertyAttributes::Accessor);
+ cattrs.clearWritable();
+ current->setGetter(0);
+ current->setSetter(0);
+ } else {
+ // 9c
+ cattrs.setType(PropertyAttributes::Data);
+ cattrs.setWritable(false);
+ current->value = Value::undefinedValue();
+ }
+ } else if (cattrs.isData() && attrs.isData()) { // clause 10
+ if (!cattrs.isConfigurable() && !cattrs.isWritable()) {
+ if (attrs.isWritable() || !current->value.sameValue(p.value))
+ goto reject;
+ }
+ } else { // clause 10
+ assert(cattrs.isAccessor() && attrs.isAccessor());
+ if (!cattrs.isConfigurable()) {
+ if (p.getter() && !(current->getter() == p.getter() || (!current->getter() && (quintptr)p.getter() == 0x1)))
+ goto reject;
+ if (p.setter() && !(current->setter() == p.setter() || (!current->setter() && (quintptr)p.setter() == 0x1)))
+ goto reject;
+ }
+ }
+
+ accept:
+
+ current->merge(cattrs, p, attrs);
+ if (member) {
+ internalClass = internalClass->changeMember(member, cattrs);
+ } else {
+ if (cattrs != Attr_Data)
+ ensureArrayAttributes();
+ if (arrayAttributes)
+ arrayAttributes[current - arrayData] = cattrs;
+ }
+ return true;
+ reject:
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return false;
+}
+
+
+bool Object::__defineOwnProperty__(ExecutionContext *ctx, const QString &name, const Property &p, PropertyAttributes attrs)
+{
+ return __defineOwnProperty__(ctx, ctx->engine->newString(name), p, attrs);
+}
+
+
+void Object::copyArrayData(Object *other)
+{
+ arrayReserve(other->arrayDataLen);
+ arrayDataLen = other->arrayDataLen;
+ memcpy(arrayData, other->arrayData, arrayDataLen*sizeof(Property));
+ arrayOffset = 0;
+ if (other->sparseArray) {
+ sparseArray = new SparseArray(*other->sparseArray);
+ arrayFreeList = other->arrayFreeList;
+ }
+ if (isArrayObject())
+ setArrayLengthUnchecked(other->arrayLength());
+}
+
+
+Value Object::arrayIndexOf(Value v, uint fromIndex, uint endIndex, ExecutionContext *ctx, Object *o)
+{
+ bool protoHasArray = false;
+ Object *p = o;
+ while ((p = p->prototype))
+ if (p->arrayDataLen)
+ protoHasArray = true;
+
+ if (protoHasArray || o->arrayAttributes) {
+ // lets be safe and slow
+ for (uint i = fromIndex; i < endIndex; ++i) {
+ bool exists;
+ Value value = o->getIndexed(ctx, i, &exists);
+ if (exists && __qmljs_strict_equal(value, v))
+ return Value::fromDouble(i);
+ }
+ } else if (sparseArray) {
+ for (SparseArrayNode *n = sparseArray->lowerBound(fromIndex); n != sparseArray->end() && n->key() < endIndex; n = n->nextNode()) {
+ Value value = o->getValue(ctx, arrayData + n->value, arrayAttributes ? arrayAttributes[n->value] : Attr_Data);
+ if (__qmljs_strict_equal(value, v))
+ return Value::fromDouble(n->key());
+ }
+ } else {
+ if ((int) endIndex > arrayDataLen)
+ endIndex = arrayDataLen;
+ Property *pd = arrayData;
+ Property *end = pd + endIndex;
+ pd += fromIndex;
+ while (pd < end) {
+ if (!arrayAttributes || !arrayAttributes[pd - arrayData].isGeneric()) {
+ Value value = o->getValue(ctx, pd, arrayAttributes ? arrayAttributes[pd - arrayData] : Attr_Data);
+ if (__qmljs_strict_equal(value, v))
+ return Value::fromDouble(pd - arrayData);
+ }
+ ++pd;
+ }
+ }
+ return Value::fromInt32(-1);
+}
+
+void Object::arrayConcat(const ArrayObject *other)
+{
+ int newLen = arrayDataLen + other->arrayLength();
+ if (other->sparseArray)
+ initSparse();
+ // ### copy attributes as well!
+ if (sparseArray) {
+ if (other->sparseArray) {
+ for (const SparseArrayNode *it = other->sparseArray->begin(); it != other->sparseArray->end(); it = it->nextNode())
+ arraySet(arrayDataLen + it->key(), other->arrayData + it->value);
+ } else {
+ int oldSize = arrayDataLen;
+ arrayReserve(oldSize + other->arrayLength());
+ memcpy(arrayData + oldSize, other->arrayData, other->arrayLength()*sizeof(Property));
+ if (arrayAttributes)
+ std::fill(arrayAttributes + oldSize, arrayAttributes + oldSize + other->arrayLength(), PropertyAttributes(Attr_Data));
+ for (uint i = 0; i < other->arrayLength(); ++i) {
+ SparseArrayNode *n = sparseArray->insert(arrayDataLen + i);
+ n->value = oldSize + i;
+ }
+ }
+ } else {
+ int oldSize = arrayLength();
+ arrayReserve(oldSize + other->arrayDataLen);
+ if (oldSize > arrayDataLen) {
+ ensureArrayAttributes();
+ std::fill(arrayAttributes + arrayDataLen, arrayAttributes + oldSize, PropertyAttributes());
+ }
+ arrayDataLen = oldSize + other->arrayDataLen;
+ if (other->arrayAttributes) {
+ for (int i = 0; i < arrayDataLen; ++i) {
+ bool exists;
+ arrayData[oldSize + i].value = const_cast<ArrayObject *>(other)->getIndexed(internalClass->engine->current, i, &exists);
+ if (arrayAttributes)
+ arrayAttributes[oldSize + i] = Attr_Data;
+ if (!exists) {
+ ensureArrayAttributes();
+ arrayAttributes[oldSize + i].clear();
+ }
+ }
+ } else {
+ memcpy(arrayData + oldSize, other->arrayData, other->arrayDataLen*sizeof(Property));
+ if (arrayAttributes)
+ std::fill(arrayAttributes + oldSize, arrayAttributes + oldSize + other->arrayDataLen, PropertyAttributes(Attr_Data));
+ }
+ }
+ setArrayLengthUnchecked(newLen);
+}
+
+void Object::arraySort(ExecutionContext *context, Object *thisObject, const Value &comparefn, uint len)
+{
+ if (!arrayDataLen)
+ return;
+
+ if (sparseArray) {
+ context->throwUnimplemented("Object::sort unimplemented for sparse arrays");
+ return;
+ }
+
+ if (len > arrayDataLen)
+ len = arrayDataLen;
+
+ // The spec says the sorting goes through a series of get,put and delete operations.
+ // this implies that the attributes don't get sorted around.
+ // behavior of accessor properties is implementation defined. We simply turn them all
+ // into data properties and then sort. This is in line with the sentence above.
+ if (arrayAttributes) {
+ for (uint i = 0; i < len; i++) {
+ if (arrayAttributes[i].isGeneric()) {
+ while (--len > i)
+ if (!arrayAttributes[len].isGeneric())
+ break;
+ arrayData[i].value = getValue(context, arrayData + len, arrayAttributes[len]);
+ arrayAttributes[i] = Attr_Data;
+ arrayAttributes[len].clear();
+ } else if (arrayAttributes[i].isAccessor()) {
+ arrayData[i].value = getValue(context, arrayData + i, arrayAttributes[i]);
+ arrayAttributes[i] = Attr_Data;
+ }
+ }
+ }
+
+ ArrayElementLessThan lessThan(context, thisObject, comparefn);
+
+ Property *begin = arrayData;
+ std::sort(begin, begin + len, lessThan);
+}
+
+
+void Object::initSparse()
+{
+ if (!sparseArray) {
+ sparseArray = new SparseArray;
+ for (int i = 0; i < arrayDataLen; ++i) {
+ if (!arrayAttributes || !arrayAttributes[i].isGeneric()) {
+ SparseArrayNode *n = sparseArray->insert(i);
+ n->value = i + arrayOffset;
+ }
+ }
+
+ uint off = arrayOffset;
+ if (!arrayOffset) {
+ arrayFreeList = arrayDataLen;
+ } else {
+ arrayFreeList = 0;
+ arrayData -= off;
+ arrayAlloc += off;
+ int o = off;
+ for (int i = 0; i < o - 1; ++i) {
+ arrayData[i].value = Value::fromInt32(i + 1);
+ }
+ arrayData[o - 1].value = Value::fromInt32(arrayDataLen + off);
+ }
+ for (int i = arrayDataLen + off; i < arrayAlloc; ++i) {
+ arrayData[i].value = Value::fromInt32(i + 1);
+ }
+ }
+}
+
+void Object::arrayReserve(uint n)
+{
+ if (n < 8)
+ n = 8;
+ if (n >= arrayAlloc) {
+ uint off;
+ if (sparseArray) {
+ assert(arrayFreeList == arrayAlloc);
+ // ### FIXME
+ arrayDataLen = arrayAlloc;
+ off = 0;
+ } else {
+ off = arrayOffset;
+ }
+ arrayAlloc = qMax(n, 2*arrayAlloc);
+ Property *newArrayData = new Property[arrayAlloc];
+ if (arrayData) {
+ memcpy(newArrayData, arrayData, sizeof(Property)*arrayDataLen);
+ delete [] (arrayData - off);
+ }
+ arrayData = newArrayData;
+ if (sparseArray) {
+ for (uint i = arrayFreeList; i < arrayAlloc; ++i) {
+ arrayData[i].value = Value::deletedValue();
+ arrayData[i].value = Value::fromInt32(i + 1);
+ }
+ } else {
+ arrayOffset = 0;
+ }
+
+ if (arrayAttributes) {
+ PropertyAttributes *newAttrs = new PropertyAttributes[arrayAlloc];
+ memcpy(newAttrs, arrayAttributes, sizeof(PropertyAttributes)*arrayDataLen);
+ delete [] (arrayAttributes - off);
+
+ arrayAttributes = newAttrs;
+ if (sparseArray) {
+ for (uint i = arrayFreeList; i < arrayAlloc; ++i)
+ arrayAttributes[i] = Attr_Invalid;
+ }
+ }
+ }
+}
+
+void Object::ensureArrayAttributes()
+{
+ if (arrayAttributes)
+ return;
+
+ arrayAttributes = new PropertyAttributes[arrayAlloc];
+ for (uint i = 0; i < arrayDataLen; ++i)
+ arrayAttributes[i] = Attr_Data;
+ for (uint i = arrayDataLen; i < arrayAlloc; ++i)
+ arrayAttributes[i] = Attr_Invalid;
+}
+
+
+bool Object::setArrayLength(uint newLen) {
+ assert(isArrayObject());
+ const Property *lengthProperty = memberData + ArrayObject::LengthPropertyIndex;
+ if (lengthProperty && !internalClass->propertyData[ArrayObject::LengthPropertyIndex].isWritable())
+ return false;
+ uint oldLen = arrayLength();
+ bool ok = true;
+ if (newLen < oldLen) {
+ if (sparseArray) {
+ SparseArrayNode *begin = sparseArray->lowerBound(newLen);
+ if (begin != sparseArray->end()) {
+ SparseArrayNode *it = sparseArray->end()->previousNode();
+ while (1) {
+ Property &pd = arrayData[it->value];
+ if (arrayAttributes) {
+ if (!arrayAttributes[it->value].isConfigurable()) {
+ ok = false;
+ newLen = it->key() + 1;
+ break;
+ } else {
+ arrayAttributes[it->value].clear();
+ }
+ }
+ pd.value.tag = Value::_Deleted_Type;
+ pd.value.int_32 = arrayFreeList;
+ arrayFreeList = it->value;
+ bool brk = (it == begin);
+ SparseArrayNode *prev = it->previousNode();
+ sparseArray->erase(it);
+ if (brk)
+ break;
+ it = prev;
+ }
+ }
+ } else {
+ Property *it = arrayData + arrayDataLen;
+ const Property *begin = arrayData + newLen;
+ while (--it >= begin) {
+ if (arrayAttributes) {
+ if (!arrayAttributes[it - arrayData].isEmpty() && !arrayAttributes[it - arrayData].isConfigurable()) {
+ ok = false;
+ newLen = it - arrayData + 1;
+ break;
+ } else {
+ arrayAttributes[it - arrayData].clear();
+ }
+ it->value = Value::deletedValue();
+ }
+ }
+ arrayDataLen = newLen;
+ }
+ } else {
+ if (newLen >= 0x100000)
+ initSparse();
+ }
+ setArrayLengthUnchecked(newLen);
+ return ok;
+}
+
+void Object::markArrayObjects() const
+{
+ for (uint i = 0; i < arrayDataLen; ++i) {
+ const Property &pd = arrayData[i];
+ if (!arrayAttributes || arrayAttributes[i].isData()) {
+ if (Managed *m = pd.value.asManaged())
+ m->mark();
+ } else if (arrayAttributes[i].isAccessor()) {
+ if (pd.getter())
+ pd.getter()->mark();
+ if (pd.setter())
+ pd.setter()->mark();
+ }
+ }
+}
+
+void ArrayObject::init(ExecutionContext *context)
+{
+ type = Type_ArrayObject;
+ internalClass = context->engine->arrayClass;
+
+ memberData = new Property[4];
+ memberData[LengthPropertyIndex].value = Value::fromInt32(0);
+}
+
+
+DEFINE_MANAGED_VTABLE(ForEachIteratorObject);
+
+void ForEachIteratorObject::markObjects(Managed *that)
+{
+ ForEachIteratorObject *o = static_cast<ForEachIteratorObject *>(that);
+ Object::markObjects(that);
+ if (o->it.object)
+ o->it.object->mark();
+}
diff --git a/src/qml/qml/v4vm/qv4object.h b/src/qml/qml/v4vm/qv4object.h
new file mode 100644
index 0000000000..d1bac5f03f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4object.h
@@ -0,0 +1,417 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_OBJECTS_H
+#define QMLJS_OBJECTS_H
+
+#include "qv4global.h"
+#include "qv4runtime.h"
+#include "qv4engine.h"
+#include "qv4context.h"
+#include "qv4sparsearray.h"
+#include "qv4string.h"
+#include "qv4codegen_p.h"
+#include "qv4isel_p.h"
+#include "qv4managed.h"
+#include "qv4property.h"
+#include "qv4internalclass.h"
+#include "qv4objectiterator.h"
+
+#include <QtCore/QString>
+#include <QtCore/QHash>
+#include <QtCore/QScopedPointer>
+#include <cstdio>
+#include <cassert>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct Value;
+struct Function;
+struct Lookup;
+struct Object;
+struct ObjectIterator;
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct ArrayObject;
+struct DateObject;
+struct FunctionObject;
+struct RegExpObject;
+struct ErrorObject;
+struct ArgumentsObject;
+struct ExecutionContext;
+struct CallContext;
+struct ExecutionEngine;
+class MemoryManager;
+
+struct ObjectPrototype;
+struct StringPrototype;
+struct NumberPrototype;
+struct BooleanPrototype;
+struct ArrayPrototype;
+struct FunctionPrototype;
+struct DatePrototype;
+struct RegExpPrototype;
+struct ErrorPrototype;
+struct EvalErrorPrototype;
+struct RangeErrorPrototype;
+struct ReferenceErrorPrototype;
+struct SyntaxErrorPrototype;
+struct TypeErrorPrototype;
+struct URIErrorPrototype;
+
+
+struct Q_V4_EXPORT Object: Managed {
+
+ class ExternalResource {
+ public:
+ virtual ~ExternalResource() {}
+ };
+
+ Object *prototype;
+ InternalClass *internalClass;
+ uint memberDataAlloc;
+ Property *memberData;
+
+ union {
+ uint arrayFreeList;
+ uint arrayOffset;
+ };
+ uint arrayDataLen;
+ uint arrayAlloc;
+ PropertyAttributes *arrayAttributes;
+ Property *arrayData;
+ SparseArray *sparseArray;
+ ExternalResource *externalResource;
+
+ enum {
+ InlinePropertySize = 4
+ };
+ Property inlineProperties[InlinePropertySize];
+
+ Object(ExecutionEngine *engine);
+ Object(ExecutionContext *context);
+ ~Object();
+
+ Property *__getOwnProperty__(String *name, PropertyAttributes *attrs = 0);
+ Property *__getOwnProperty__(uint index, PropertyAttributes *attrs = 0);
+
+ Property *__getPropertyDescriptor__(String *name, PropertyAttributes *attrs = 0) const;
+ Property *__getPropertyDescriptor__(uint index, PropertyAttributes *attrs = 0) const;
+
+ bool __hasProperty__(String *name) const {
+ return __getPropertyDescriptor__(name);
+ }
+ bool __hasProperty__(uint index) const {
+ return __getPropertyDescriptor__(index);
+ }
+
+ bool __defineOwnProperty__(ExecutionContext *ctx, Property *current, String *member, const Property &p, PropertyAttributes attrs);
+ bool __defineOwnProperty__(ExecutionContext *ctx, String *name, const Property &p, PropertyAttributes attrs);
+ bool __defineOwnProperty__(ExecutionContext *ctx, uint index, const Property &p, PropertyAttributes attrs);
+ bool __defineOwnProperty__(ExecutionContext *ctx, const QString &name, const Property &p, PropertyAttributes attrs);
+
+ //
+ // helpers
+ //
+ void put(ExecutionContext *ctx, const QString &name, const Value &value);
+
+ static Value getValue(const Value &thisObject, ExecutionContext *ctx, const Property *p, PropertyAttributes attrs);
+ Value getValue(ExecutionContext *ctx, const Property *p, PropertyAttributes attrs) const {
+ return getValue(Value::fromObject(const_cast<Object *>(this)), ctx, p, attrs);
+ }
+
+ void putValue(ExecutionContext *ctx, Property *pd, PropertyAttributes attrs, const Value &value);
+
+ void inplaceBinOp(ExecutionContext *ctx, BinOp op, String *name, const Value &rhs);
+ void inplaceBinOp(ExecutionContext *ctx, BinOp op, const Value &index, const Value &rhs);
+
+ /* The spec default: Writable: true, Enumerable: false, Configurable: true */
+ void defineDefaultProperty(String *name, Value value);
+ void defineDefaultProperty(ExecutionContext *context, const QString &name, Value value);
+ void defineDefaultProperty(ExecutionContext *context, const QString &name, Value (*code)(SimpleCallContext *), int count = 0);
+ /* Fixed: Writable: false, Enumerable: false, Configurable: false */
+ void defineReadonlyProperty(ExecutionEngine *engine, const QString &name, Value value);
+ void defineReadonlyProperty(String *name, Value value);
+
+ Property *insertMember(String *s, PropertyAttributes attributes);
+
+ // Array handling
+
+ uint allocArrayValue() {
+ uint idx = arrayFreeList;
+ if (arrayAlloc <= arrayFreeList)
+ arrayReserve(arrayAlloc + 1);
+ arrayFreeList = arrayData[arrayFreeList].value.uint_32;
+ if (arrayAttributes)
+ arrayAttributes[idx].setType(PropertyAttributes::Data);
+ return idx;
+ }
+
+ uint allocArrayValue(Value v) {
+ uint idx = allocArrayValue();
+ Property *pd = &arrayData[idx];
+ pd->value = v;
+ return idx;
+ }
+ void freeArrayValue(int idx) {
+ Property &pd = arrayData[idx];
+ pd.value.tag = Value::_Deleted_Type;
+ pd.value.int_32 = arrayFreeList;
+ arrayFreeList = idx;
+ if (arrayAttributes)
+ arrayAttributes[idx].clear();
+ }
+
+ void getArrayHeadRoom() {
+ assert(!sparseArray && !arrayOffset);
+ arrayOffset = qMax(arrayDataLen >> 2, (uint)16);
+ Property *newArray = new Property[arrayOffset + arrayAlloc];
+ memcpy(newArray + arrayOffset, arrayData, arrayDataLen*sizeof(Property));
+ delete [] arrayData;
+ arrayData = newArray + arrayOffset;
+ if (arrayAttributes) {
+ PropertyAttributes *newAttrs = new PropertyAttributes[arrayOffset + arrayAlloc];
+ memcpy(newAttrs + arrayOffset, arrayAttributes, arrayDataLen*sizeof(PropertyAttributes));
+ delete [] arrayAttributes;
+ arrayAttributes = newAttrs + arrayOffset;
+ }
+ }
+
+public:
+ void copyArrayData(Object *other);
+ void initSparse();
+
+ uint arrayLength() const;
+ bool setArrayLength(uint newLen);
+
+ void setArrayLengthUnchecked(uint l);
+
+ Property *arrayInsert(uint index, PropertyAttributes attributes = Attr_Data) {
+
+ Property *pd;
+ if (!sparseArray && (index < 0x1000 || index < arrayDataLen + (arrayDataLen >> 2))) {
+ if (index >= arrayAlloc)
+ arrayReserve(index + 1);
+ if (index >= arrayDataLen) {
+ ensureArrayAttributes();
+ for (uint i = arrayDataLen; i < index; ++i)
+ arrayAttributes[i].clear();
+ arrayDataLen = index + 1;
+ }
+ pd = arrayData + index;
+ } else {
+ initSparse();
+ SparseArrayNode *n = sparseArray->insert(index);
+ if (n->value == UINT_MAX)
+ n->value = allocArrayValue();
+ pd = arrayData + n->value;
+ }
+ if (index >= arrayLength())
+ setArrayLengthUnchecked(index + 1);
+ if (arrayAttributes || attributes != Attr_Data) {
+ if (!arrayAttributes)
+ ensureArrayAttributes();
+ attributes.resolve();
+ arrayAttributes[pd - arrayData] = attributes;
+ }
+ return pd;
+ }
+
+ void arraySet(uint index, const Property *pd) {
+ *arrayInsert(index) = *pd;
+ }
+
+ void arraySet(uint index, Value value) {
+ Property *pd = arrayInsert(index);
+ pd->value = value;
+ }
+
+ uint propertyIndexFromArrayIndex(uint index) const
+ {
+ if (!sparseArray) {
+ if (index >= arrayDataLen)
+ return UINT_MAX;
+ return index;
+ } else {
+ SparseArrayNode *n = sparseArray->findNode(index);
+ if (!n)
+ return UINT_MAX;
+ return n->value;
+ }
+ }
+
+ Property *arrayAt(uint index) const {
+ uint pidx = propertyIndexFromArrayIndex(index);
+ if (pidx == UINT_MAX)
+ return 0;
+ return arrayData + pidx;
+ }
+
+ Property *nonSparseArrayAt(uint index) const {
+ if (sparseArray)
+ return 0;
+ if (index >= arrayDataLen)
+ return 0;
+ return arrayData + index;
+ }
+
+ void markArrayObjects() const;
+
+ void push_back(Value v) {
+ uint idx = arrayLength();
+ if (!sparseArray) {
+ if (idx >= arrayAlloc)
+ arrayReserve(idx + 1);
+ arrayData[idx].value = v;
+ arrayDataLen = idx + 1;
+ } else {
+ uint idx = allocArrayValue(v);
+ sparseArray->push_back(idx, arrayLength());
+ }
+ setArrayLengthUnchecked(idx + 1);
+ }
+
+ SparseArrayNode *sparseArrayBegin() { return sparseArray ? sparseArray->begin() : 0; }
+ SparseArrayNode *sparseArrayEnd() { return sparseArray ? sparseArray->end() : 0; }
+
+ void arrayConcat(const ArrayObject *other);
+ void arraySort(ExecutionContext *context, Object *thisObject, const Value &comparefn, uint arrayDataLen);
+ Value arrayIndexOf(Value v, uint fromIndex, uint arrayDataLen, ExecutionContext *ctx, Object *o);
+
+ void arrayReserve(uint n);
+ void ensureArrayAttributes();
+
+ using Managed::get;
+ using Managed::getIndexed;
+ using Managed::put;
+ using Managed::putIndexed;
+ using Managed::deleteProperty;
+ using Managed::deleteIndexedProperty;
+protected:
+ static const ManagedVTable static_vtbl;
+ static void destroy(Managed *that);
+ static void markObjects(Managed *that);
+ static Value get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty);
+ static Value getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty);
+ static void put(Managed *m, ExecutionContext *ctx, String *name, const Value &value);
+ static void putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value);
+ static PropertyAttributes query(Managed *m, ExecutionContext *ctx, String *name);
+ static PropertyAttributes queryIndexed(Managed *m, ExecutionContext *ctx, uint index);
+ static bool deleteProperty(Managed *m, ExecutionContext *ctx, String *name);
+ static bool deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index);
+
+private:
+ Value internalGet(ExecutionContext *ctx, String *name, bool *hasProperty);
+ Value internalGetIndexed(ExecutionContext *ctx, uint index, bool *hasProperty);
+ void internalPut(ExecutionContext *ctx, String *name, const Value &value);
+ void internalPutIndexed(ExecutionContext *ctx, uint index, const Value &value);
+ bool internalDeleteProperty(ExecutionContext *ctx, String *name);
+ bool internalDeleteIndexedProperty(ExecutionContext *ctx, uint index);
+
+ friend struct ObjectIterator;
+ friend struct ObjectPrototype;
+};
+
+struct ForEachIteratorObject: Object {
+ ObjectIterator it;
+ ForEachIteratorObject(ExecutionContext *ctx, Object *o)
+ : Object(ctx->engine), it(ctx, o, ObjectIterator::EnumberableOnly|ObjectIterator::WithProtoChain) {
+ vtbl = &static_vtbl;
+ type = Type_ForeachIteratorObject;
+ }
+
+ Value nextPropertyName() { return it.nextPropertyNameAsString(); }
+
+protected:
+ static const ManagedVTable static_vtbl;
+ static void markObjects(Managed *that);
+};
+
+struct BooleanObject: Object {
+ Value value;
+ BooleanObject(ExecutionEngine *engine, const Value &value): Object(engine), value(value) { type = Type_BooleanObject; }
+};
+
+struct NumberObject: Object {
+ Value value;
+ NumberObject(ExecutionEngine *engine, const Value &value): Object(engine), value(value) { type = Type_NumberObject; }
+};
+
+struct ArrayObject: Object {
+ enum {
+ LengthPropertyIndex = 0
+ };
+
+ ArrayObject(ExecutionContext *ctx) : Object(ctx->engine) { init(ctx); }
+ void init(ExecutionContext *context);
+};
+
+inline uint Object::arrayLength() const
+{
+ if (isArrayObject()) {
+ // length is always the first property of an array
+ Value v = memberData[ArrayObject::LengthPropertyIndex].value;
+ if (v.isInteger())
+ return v.integerValue();
+ return Value::toUInt32(v.doubleValue());
+ }
+ return 0;
+}
+
+inline void Object::setArrayLengthUnchecked(uint l)
+{
+ if (isArrayObject()) {
+ // length is always the first property of an array
+ Property &lengthProperty = memberData[ArrayObject::LengthPropertyIndex];
+ lengthProperty.value = Value::fromUInt32(l);
+ }
+}
+
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_OBJECTS_H
diff --git a/src/qml/qml/v4vm/qv4objectiterator.cpp b/src/qml/qml/v4vm/qv4objectiterator.cpp
new file mode 100644
index 0000000000..44ba9efcaa
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4objectiterator.cpp
@@ -0,0 +1,185 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include "qv4objectiterator.h"
+#include "qv4object.h"
+#include "qv4stringobject.h"
+#include "qv4identifier.h"
+
+namespace QQmlJS {
+namespace VM {
+
+ObjectIterator::ObjectIterator(ExecutionContext *context, Object *o, uint flags)
+ : context(context)
+ , object(o)
+ , current(o)
+ , arrayNode(0)
+ , arrayIndex(0)
+ , memberIndex(0)
+ , flags(flags)
+{
+ if (current) {
+ if (current->asStringObject())
+ this->flags |= CurrentIsString;
+ }
+}
+
+Property *ObjectIterator::next(String **name, uint *index, PropertyAttributes *attrs)
+{
+ Property *p = 0;
+ *name = 0;
+ *index = UINT_MAX;
+ while (1) {
+ if (!current)
+ break;
+
+ if (flags & CurrentIsString) {
+ StringObject *s = static_cast<StringObject *>(current);
+ uint slen = s->value.stringValue()->toQString().length();
+ while (arrayIndex < slen) {
+ *index = arrayIndex;
+ ++arrayIndex;
+ if (attrs)
+ *attrs = s->arrayAttributes ? s->arrayAttributes[arrayIndex] : PropertyAttributes(Attr_NotWritable|Attr_NotConfigurable);
+ return s->__getOwnProperty__(*index);
+ }
+ flags &= ~CurrentIsString;
+ arrayNode = current->sparseArrayBegin();
+ // iterate until we're past the end of the string
+ while (arrayNode && arrayNode->key() < slen)
+ arrayNode = arrayNode->nextNode();
+ }
+
+ if (!arrayIndex)
+ arrayNode = current->sparseArrayBegin();
+
+ // sparse arrays
+ if (arrayNode) {
+ while (arrayNode != current->sparseArrayEnd()) {
+ int k = arrayNode->key();
+ uint pidx = arrayNode->value;
+ p = current->arrayData + pidx;
+ arrayNode = arrayNode->nextNode();
+ PropertyAttributes a = current->arrayAttributes ? current->arrayAttributes[pidx] : PropertyAttributes(Attr_Data);
+ if (!(flags & EnumberableOnly) || a.isEnumerable()) {
+ arrayIndex = k + 1;
+ *index = k;
+ if (attrs)
+ *attrs = a;
+ return p;
+ }
+ }
+ arrayNode = 0;
+ arrayIndex = UINT_MAX;
+ }
+ // dense arrays
+ while (arrayIndex < current->arrayDataLen) {
+ uint pidx = current->propertyIndexFromArrayIndex(arrayIndex);
+ p = current->arrayData + pidx;
+ PropertyAttributes a = current->arrayAttributes ? current->arrayAttributes[pidx] : PropertyAttributes(Attr_Data);
+ ++arrayIndex;
+ if ((!current->arrayAttributes || !current->arrayAttributes[pidx].isGeneric())
+ && (!(flags & EnumberableOnly) || a.isEnumerable())) {
+ *index = arrayIndex - 1;
+ if (attrs)
+ *attrs = a;
+ return p;
+ }
+ }
+
+ if (memberIndex == current->internalClass->size) {
+ if (flags & WithProtoChain)
+ current = current->prototype;
+ else
+ current = 0;
+ if (current && current->asStringObject())
+ flags |= CurrentIsString;
+ else
+ flags &= ~CurrentIsString;
+
+
+ arrayIndex = 0;
+ memberIndex = 0;
+ continue;
+ }
+ String *n = current->internalClass->nameMap.at(memberIndex);
+ assert(n);
+ // ### check that it's not a repeated attribute
+
+ p = current->memberData + memberIndex;
+ PropertyAttributes a = current->internalClass->propertyData[memberIndex];
+ ++memberIndex;
+ if (!(flags & EnumberableOnly) || a.isEnumerable()) {
+ *name = n;
+ if (attrs)
+ *attrs = a;
+ return p;
+ }
+ }
+ return 0;
+}
+
+Value ObjectIterator::nextPropertyName()
+{
+ uint index;
+ String *name;
+ next(&name, &index);
+ if (name)
+ return Value::fromString(name);
+ if (index < UINT_MAX)
+ return Value::fromDouble(index);
+ return Value::nullValue();
+}
+
+Value ObjectIterator::nextPropertyNameAsString()
+{
+ uint index;
+ String *name;
+ next(&name, &index);
+ if (name)
+ return Value::fromString(name);
+ if (index < UINT_MAX)
+ return __qmljs_to_string(Value::fromDouble(index), context);
+ return Value::nullValue();
+}
+
+}
+}
+
diff --git a/src/qml/qml/v4vm/qv4objectiterator.h b/src/qml/qml/v4vm/qv4objectiterator.h
new file mode 100644
index 0000000000..c740132661
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4objectiterator.h
@@ -0,0 +1,84 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4OBJECTITERATOR_H
+#define QV4OBJECTITERATOR_H
+
+#include "qv4value.h"
+#include <qv4internalclass.h>
+#include <qv4property.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct SparseArrayNode;
+struct Object;
+
+struct ObjectIterator
+{
+ enum Flags {
+ NoFlags = 0,
+ EnumberableOnly = 0x1,
+ WithProtoChain = 0x2,
+ CurrentIsString = 0x4
+ };
+
+ ExecutionContext *context;
+ Object *object;
+ Object *current;
+ SparseArrayNode *arrayNode;
+ uint arrayIndex;
+ uint memberIndex;
+ uint flags;
+
+ ObjectIterator(ExecutionContext *context, Object *o, uint flags);
+ Property *next(String **name, uint *index, PropertyAttributes *attributes = 0);
+ Value nextPropertyName();
+ Value nextPropertyNameAsString();
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4objectproto.cpp b/src/qml/qml/v4vm/qv4objectproto.cpp
new file mode 100644
index 0000000000..df3d4c8e43
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4objectproto.cpp
@@ -0,0 +1,565 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+
+#include "qv4objectproto.h"
+#include "qv4mm.h"
+#include <QtCore/qnumeric.h>
+#include <QtCore/qmath.h>
+#include <QtCore/QDateTime>
+#include <QtCore/QStringList>
+#include <QtCore/QDebug>
+#include <cassert>
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include <qv4isel_masm_p.h>
+
+#ifndef Q_OS_WIN
+# include <time.h>
+# ifndef Q_OS_VXWORKS
+# include <sys/time.h>
+# else
+# include "qplatformdefs.h"
+# endif
+#else
+# include <windows.h>
+#endif
+
+using namespace QQmlJS::VM;
+
+
+DEFINE_MANAGED_VTABLE(ObjectCtor);
+
+ObjectCtor::ObjectCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value ObjectCtor::construct(Managed *that, ExecutionContext *ctx, Value *args, int argc)
+{
+ ObjectCtor *ctor = static_cast<ObjectCtor *>(that);
+ if (!argc || args[0].isUndefined() || args[0].isNull()) {
+ Object *obj = ctx->engine->newObject();
+ Value proto = ctor->get(ctx, ctx->engine->id_prototype);
+ if (proto.isObject())
+ obj->prototype = proto.objectValue();
+ return Value::fromObject(obj);
+ }
+ return __qmljs_to_object(ctx, args[0]);
+}
+
+Value ObjectCtor::call(Managed *, ExecutionContext *ctx, const Value &/*thisObject*/, Value *args, int argc)
+{
+ if (!argc || args[0].isUndefined() || args[0].isNull())
+ return Value::fromObject(ctx->engine->newObject());
+ return __qmljs_to_object(ctx, args[0]);
+}
+
+void ObjectPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("getPrototypeOf"), method_getPrototypeOf, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("getOwnPropertyDescriptor"), method_getOwnPropertyDescriptor, 2);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("getOwnPropertyNames"), method_getOwnPropertyNames, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("create"), method_create, 2);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("defineProperty"), method_defineProperty, 3);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("defineProperties"), method_defineProperties, 2);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("seal"), method_seal, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("freeze"), method_freeze, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("preventExtensions"), method_preventExtensions, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("isSealed"), method_isSealed, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("isFrozen"), method_isFrozen, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("isExtensible"), method_isExtensible, 1);
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("keys"), method_keys, 1);
+
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleString"), method_toLocaleString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("valueOf"), method_valueOf, 0);
+ defineDefaultProperty(ctx, QStringLiteral("hasOwnProperty"), method_hasOwnProperty, 1);
+ defineDefaultProperty(ctx, QStringLiteral("isPrototypeOf"), method_isPrototypeOf, 1);
+ defineDefaultProperty(ctx, QStringLiteral("propertyIsEnumerable"), method_propertyIsEnumerable, 1);
+ defineDefaultProperty(ctx, QStringLiteral("__defineGetter__"), method_defineGetter, 0);
+ defineDefaultProperty(ctx, QStringLiteral("__defineSetter__"), method_defineSetter, 0);
+}
+
+Value ObjectPrototype::method_getPrototypeOf(SimpleCallContext *ctx)
+{
+ Value o = ctx->argument(0);
+ if (! o.isObject())
+ ctx->throwTypeError();
+
+ Object *p = o.objectValue()->prototype;
+ return p ? Value::fromObject(p) : Value::nullValue();
+}
+
+Value ObjectPrototype::method_getOwnPropertyDescriptor(SimpleCallContext *ctx)
+{
+ Value O = ctx->argument(0);
+ if (!O.isObject())
+ ctx->throwTypeError();
+
+ String *name = ctx->argument(1).toString(ctx);
+ PropertyAttributes attrs;
+ Property *desc = O.objectValue()->__getOwnProperty__(name, &attrs);
+ return fromPropertyDescriptor(ctx, desc, attrs);
+}
+
+Value ObjectPrototype::method_getOwnPropertyNames(SimpleCallContext *context)
+{
+ Object *O = context->argumentCount ? context->arguments[0].asObject() : 0;
+ if (!O)
+ context->throwTypeError();
+
+ ArrayObject *array = context->engine->newArrayObject(context)->asArrayObject();
+ ObjectIterator it(context, O, ObjectIterator::NoFlags);
+ while (1) {
+ Value v = it.nextPropertyNameAsString();
+ if (v.isNull())
+ break;
+ array->push_back(v);
+ }
+ return Value::fromObject(array);
+}
+
+Value ObjectPrototype::method_create(SimpleCallContext *ctx)
+{
+ Value O = ctx->argument(0);
+ if (!O.isObject() && !O.isNull())
+ ctx->throwTypeError();
+
+ Object *newObject = ctx->engine->newObject();
+ newObject->prototype = O.asObject();
+
+ Value objValue = Value::fromObject(newObject);
+ if (ctx->argumentCount > 1 && !ctx->argument(1).isUndefined()) {
+ ctx->arguments[0] = objValue;
+ method_defineProperties(ctx);
+ }
+
+ return objValue;
+}
+
+Value ObjectPrototype::method_defineProperty(SimpleCallContext *ctx)
+{
+ Value O = ctx->argument(0);
+ if (!O.isObject())
+ ctx->throwTypeError();
+
+ String *name = ctx->argument(1).toString(ctx);
+
+ Value attributes = ctx->argument(2);
+ Property pd;
+ PropertyAttributes attrs;
+ toPropertyDescriptor(ctx, attributes, &pd, &attrs);
+
+ if (!O.objectValue()->__defineOwnProperty__(ctx, name, pd, attrs))
+ ctx->throwTypeError();
+
+ return O;
+}
+
+Value ObjectPrototype::method_defineProperties(SimpleCallContext *ctx)
+{
+ Value O = ctx->argument(0);
+ if (!O.isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(1).toObject(ctx);
+
+ ObjectIterator it(ctx, o, ObjectIterator::EnumberableOnly);
+ while (1) {
+ uint index;
+ String *name;
+ PropertyAttributes attrs;
+ Property *pd = it.next(&name, &index, &attrs);
+ if (!pd)
+ break;
+ Property n;
+ PropertyAttributes nattrs;
+ toPropertyDescriptor(ctx, o->getValue(ctx, pd, attrs), &n, &nattrs);
+ bool ok;
+ if (name)
+ ok = O.objectValue()->__defineOwnProperty__(ctx, name, n, nattrs);
+ else
+ ok = O.objectValue()->__defineOwnProperty__(ctx, index, n, nattrs);
+ if (!ok)
+ ctx->throwTypeError();
+ }
+
+ return O;
+}
+
+Value ObjectPrototype::method_seal(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ o->extensible = false;
+
+ o->internalClass = o->internalClass->sealed();
+
+ o->ensureArrayAttributes();
+ for (uint i = 0; i < o->arrayDataLen; ++i) {
+ if (!o->arrayAttributes[i].isGeneric())
+ o->arrayAttributes[i].setConfigurable(false);
+ }
+
+ return ctx->argument(0);
+}
+
+Value ObjectPrototype::method_freeze(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ o->extensible = false;
+
+ o->internalClass = o->internalClass->frozen();
+
+ o->ensureArrayAttributes();
+ for (uint i = 0; i < o->arrayDataLen; ++i) {
+ if (!o->arrayAttributes[i].isGeneric())
+ o->arrayAttributes[i].setConfigurable(false);
+ if (o->arrayAttributes[i].isData())
+ o->arrayAttributes[i].setWritable(false);
+ }
+ return ctx->argument(0);
+}
+
+Value ObjectPrototype::method_preventExtensions(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ o->extensible = false;
+ return ctx->argument(0);
+}
+
+Value ObjectPrototype::method_isSealed(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ if (o->extensible)
+ return Value::fromBoolean(false);
+
+ if (o->internalClass != o->internalClass->sealed())
+ return Value::fromBoolean(false);
+
+ if (!o->arrayDataLen)
+ return Value::fromBoolean(true);
+
+ if (!o->arrayAttributes)
+ return Value::fromBoolean(false);
+
+ for (uint i = 0; i < o->arrayDataLen; ++i) {
+ if (!o->arrayAttributes[i].isGeneric())
+ if (o->arrayAttributes[i].isConfigurable())
+ return Value::fromBoolean(false);
+ }
+
+ return Value::fromBoolean(true);
+}
+
+Value ObjectPrototype::method_isFrozen(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ if (o->extensible)
+ return Value::fromBoolean(false);
+
+ if (o->internalClass != o->internalClass->frozen())
+ return Value::fromBoolean(false);
+
+ if (!o->arrayDataLen)
+ return Value::fromBoolean(true);
+
+ if (!o->arrayAttributes)
+ return Value::fromBoolean(false);
+
+ for (uint i = 0; i < o->arrayDataLen; ++i) {
+ if (!o->arrayAttributes[i].isGeneric())
+ if (o->arrayAttributes[i].isConfigurable() || o->arrayAttributes[i].isWritable())
+ return Value::fromBoolean(false);
+ }
+
+ return Value::fromBoolean(true);
+}
+
+Value ObjectPrototype::method_isExtensible(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+ return Value::fromBoolean(o->extensible);
+}
+
+Value ObjectPrototype::method_keys(SimpleCallContext *ctx)
+{
+ if (!ctx->argument(0).isObject())
+ ctx->throwTypeError();
+
+ Object *o = ctx->argument(0).objectValue();
+
+ ArrayObject *a = ctx->engine->newArrayObject(ctx);
+
+ ObjectIterator it(ctx, o, ObjectIterator::EnumberableOnly);
+ while (1) {
+ uint index;
+ String *name;
+ Property *pd = it.next(&name, &index);
+ if (!pd)
+ break;
+ Value key;
+ if (name) {
+ key = Value::fromString(name);
+ } else {
+ key = Value::fromDouble(index);
+ key = __qmljs_to_string(key, ctx);
+ }
+ a->push_back(key);
+ }
+
+ return Value::fromObject(a);
+}
+
+Value ObjectPrototype::method_toString(SimpleCallContext *ctx)
+{
+ if (ctx->thisObject.isUndefined()) {
+ return Value::fromString(ctx, QStringLiteral("[object Undefined]"));
+ } else if (ctx->thisObject.isNull()) {
+ return Value::fromString(ctx, QStringLiteral("[object Null]"));
+ } else {
+ Value obj = __qmljs_to_object(ctx, ctx->thisObject);
+ QString className = obj.objectValue()->className();
+ return Value::fromString(ctx, QString::fromUtf8("[object %1]").arg(className));
+ }
+}
+
+Value ObjectPrototype::method_toLocaleString(SimpleCallContext *ctx)
+{
+ Object *o = ctx->thisObject.toObject(ctx);
+ Value ts = o->get(ctx, ctx->engine->newString(QStringLiteral("toString")));
+ FunctionObject *f = ts.asFunctionObject();
+ if (!f)
+ ctx->throwTypeError();
+ return f->call(ctx, Value::fromObject(o), 0, 0);
+}
+
+Value ObjectPrototype::method_valueOf(SimpleCallContext *ctx)
+{
+ return Value::fromObject(ctx->thisObject.toObject(ctx));
+}
+
+Value ObjectPrototype::method_hasOwnProperty(SimpleCallContext *ctx)
+{
+ String *P = ctx->argument(0).toString(ctx);
+ Object *O = ctx->thisObject.toObject(ctx);
+ bool r = O->__getOwnProperty__(P) != 0;
+ return Value::fromBoolean(r);
+}
+
+Value ObjectPrototype::method_isPrototypeOf(SimpleCallContext *ctx)
+{
+ Value V = ctx->argument(0);
+ if (! V.isObject())
+ return Value::fromBoolean(false);
+
+ Object *O = ctx->thisObject.toObject(ctx);
+ Object *proto = V.objectValue()->prototype;
+ while (proto) {
+ if (O == proto)
+ return Value::fromBoolean(true);
+ proto = proto->prototype;
+ }
+ return Value::fromBoolean(false);
+}
+
+Value ObjectPrototype::method_propertyIsEnumerable(SimpleCallContext *ctx)
+{
+ String *p = ctx->argument(0).toString(ctx);
+
+ Object *o = ctx->thisObject.toObject(ctx);
+ PropertyAttributes attrs;
+ o->__getOwnProperty__(p, &attrs);
+ return Value::fromBoolean(attrs.isEnumerable());
+}
+
+Value ObjectPrototype::method_defineGetter(SimpleCallContext *ctx)
+{
+ if (ctx->argumentCount < 2)
+ ctx->throwTypeError();
+ String *prop = ctx->argument(0).toString(ctx);
+
+ FunctionObject *f = ctx->argument(1).asFunctionObject();
+ if (!f)
+ ctx->throwTypeError();
+
+ Object *o = ctx->thisObject.toObject(ctx);
+
+ Property pd = Property::fromAccessor(f, 0);
+ o->__defineOwnProperty__(ctx, prop, pd, Attr_Accessor);
+ return Value::undefinedValue();
+}
+
+Value ObjectPrototype::method_defineSetter(SimpleCallContext *ctx)
+{
+ if (ctx->argumentCount < 2)
+ ctx->throwTypeError();
+ String *prop = ctx->argument(0).toString(ctx);
+
+ FunctionObject *f = ctx->argument(1).asFunctionObject();
+ if (!f)
+ ctx->throwTypeError();
+
+ Object *o = ctx->thisObject.toObject(ctx);
+
+ Property pd = Property::fromAccessor(0, f);
+ o->__defineOwnProperty__(ctx, prop, pd, Attr_Accessor);
+ return Value::undefinedValue();
+}
+
+void ObjectPrototype::toPropertyDescriptor(ExecutionContext *ctx, Value v, Property *desc, PropertyAttributes *attrs)
+{
+ if (!v.isObject())
+ ctx->throwTypeError();
+
+ Object *o = v.objectValue();
+
+ attrs->clear();
+ desc->setGetter(0);
+ desc->setSetter(0);
+
+ if (o->__hasProperty__(ctx->engine->id_enumerable))
+ attrs->setEnumerable(o->get(ctx, ctx->engine->id_enumerable).toBoolean());
+
+ if (o->__hasProperty__(ctx->engine->id_configurable))
+ attrs->setConfigurable(o->get(ctx, ctx->engine->id_configurable).toBoolean());
+
+ if (o->__hasProperty__(ctx->engine->id_get)) {
+ Value get = o->get(ctx, ctx->engine->id_get);
+ FunctionObject *f = get.asFunctionObject();
+ if (f) {
+ desc->setGetter(f);
+ } else if (get.isUndefined()) {
+ desc->setGetter((FunctionObject *)0x1);
+ } else {
+ ctx->throwTypeError();
+ }
+ attrs->setType(PropertyAttributes::Accessor);
+ }
+
+ if (o->__hasProperty__(ctx->engine->id_set)) {
+ Value set = o->get(ctx, ctx->engine->id_set);
+ FunctionObject *f = set.asFunctionObject();
+ if (f) {
+ desc->setSetter(f);
+ } else if (set.isUndefined()) {
+ desc->setSetter((FunctionObject *)0x1);
+ } else {
+ ctx->throwTypeError();
+ }
+ attrs->setType(PropertyAttributes::Accessor);
+ }
+
+ if (o->__hasProperty__(ctx->engine->id_writable)) {
+ if (attrs->isAccessor())
+ ctx->throwTypeError();
+ attrs->setWritable(o->get(ctx, ctx->engine->id_writable).toBoolean());
+ // writable forces it to be a data descriptor
+ desc->value = Value::undefinedValue();
+ }
+
+ if (o->__hasProperty__(ctx->engine->id_value)) {
+ if (attrs->isAccessor())
+ ctx->throwTypeError();
+ desc->value = o->get(ctx, ctx->engine->id_value);
+ attrs->setType(PropertyAttributes::Data);
+ }
+
+ if (attrs->isGeneric())
+ desc->value = Value::deletedValue();
+}
+
+
+Value ObjectPrototype::fromPropertyDescriptor(ExecutionContext *ctx, const Property *desc, PropertyAttributes attrs)
+{
+ if (!desc)
+ return Value::undefinedValue();
+
+ ExecutionEngine *engine = ctx->engine;
+// Let obj be the result of creating a new object as if by the expression new Object() where Object is the standard built-in constructor with that name.
+ Object *o = engine->newObject();
+
+ Property pd;
+ if (attrs.isData()) {
+ pd.value = desc->value;
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("value")), pd, Attr_Data);
+ pd.value = Value::fromBoolean(attrs.isWritable());
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("writable")), pd, Attr_Data);
+ } else {
+ pd.value = desc->getter() ? Value::fromObject(desc->getter()) : Value::undefinedValue();
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("get")), pd, Attr_Data);
+ pd.value = desc->setter() ? Value::fromObject(desc->setter()) : Value::undefinedValue();
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("set")), pd, Attr_Data);
+ }
+ pd.value = Value::fromBoolean(attrs.isEnumerable());
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("enumerable")), pd, Attr_Data);
+ pd.value = Value::fromBoolean(attrs.isConfigurable());
+ o->__defineOwnProperty__(ctx, engine->newString(QStringLiteral("configurable")), pd, Attr_Data);
+
+ return Value::fromObject(o);
+}
diff --git a/src/qml/qml/v4vm/qv4objectproto.h b/src/qml/qml/v4vm/qv4objectproto.h
new file mode 100644
index 0000000000..0a19f45b06
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4objectproto.h
@@ -0,0 +1,104 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4ECMAOBJECTS_P_H
+#define QV4ECMAOBJECTS_P_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct ObjectCtor: FunctionObject
+{
+ ObjectCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct ObjectPrototype: Object
+{
+ ObjectPrototype(ExecutionEngine *engine) : Object(engine) {}
+
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_getPrototypeOf(SimpleCallContext *ctx);
+ static Value method_getOwnPropertyDescriptor(SimpleCallContext *ctx);
+ static Value method_getOwnPropertyNames(SimpleCallContext *context);
+ static Value method_create(SimpleCallContext *ctx);
+ static Value method_defineProperty(SimpleCallContext *ctx);
+ static Value method_defineProperties(SimpleCallContext *ctx);
+ static Value method_seal(SimpleCallContext *ctx);
+ static Value method_freeze(SimpleCallContext *ctx);
+ static Value method_preventExtensions(SimpleCallContext *ctx);
+ static Value method_isSealed(SimpleCallContext *ctx);
+ static Value method_isFrozen(SimpleCallContext *ctx);
+ static Value method_isExtensible(SimpleCallContext *ctx);
+ static Value method_keys(SimpleCallContext *ctx);
+
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_toLocaleString(SimpleCallContext *ctx);
+ static Value method_valueOf(SimpleCallContext *ctx);
+ static Value method_hasOwnProperty(SimpleCallContext *ctx);
+ static Value method_isPrototypeOf(SimpleCallContext *ctx);
+ static Value method_propertyIsEnumerable(SimpleCallContext *ctx);
+
+ static Value method_defineGetter(SimpleCallContext *ctx);
+ static Value method_defineSetter(SimpleCallContext *ctx);
+
+ static void toPropertyDescriptor(ExecutionContext *ctx, Value v, Property *desc, PropertyAttributes *attrs);
+ static Value fromPropertyDescriptor(ExecutionContext *ctx, const Property *desc, PropertyAttributes attrs);
+};
+
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4property.h b/src/qml/qml/v4vm/qv4property.h
new file mode 100644
index 0000000000..afb5ede277
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4property.h
@@ -0,0 +1,152 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4PROPERTYDESCRIPTOR_H
+#define QV4PROPERTYDESCRIPTOR_H
+
+#include "qv4global.h"
+#include "qv4value.h"
+#include "qv4internalclass.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct FunctionObject;
+
+struct Property {
+ union {
+ Value value;
+ struct {
+ FunctionObject *get;
+ FunctionObject *set;
+ };
+ };
+
+ // Section 8.10
+ inline void fullyPopulated(PropertyAttributes *attrs) {
+ if (!attrs->hasType()) {
+ value = Value::undefinedValue();
+ }
+ if (attrs->type() == PropertyAttributes::Accessor) {
+ attrs->clearWritable();
+ if (get == (FunctionObject *)0x1)
+ get = 0;
+ if (set == (FunctionObject *)0x1)
+ set = 0;
+ }
+ attrs->resolve();
+ }
+
+ static inline Property fromValue(Value v) {
+ Property pd;
+ pd.value = v;
+ return pd;
+ }
+ static inline Property fromAccessor(FunctionObject *getter, FunctionObject *setter) {
+ Property pd;
+ pd.get = getter;
+ pd.set = setter;
+ return pd;
+ }
+
+ static Property genericDescriptor() {
+ Property pd;
+ pd.value = Value::deletedValue();
+ return pd;
+ }
+
+ inline bool isSubset(const PropertyAttributes &attrs, const Property &other, PropertyAttributes otherAttrs) const;
+ inline void merge(PropertyAttributes &attrs, const Property &other, PropertyAttributes otherAttrs);
+
+ inline FunctionObject *getter() const { return get; }
+ inline FunctionObject *setter() const { return set; }
+ inline void setGetter(FunctionObject *g) { get = g; }
+ inline void setSetter(FunctionObject *s) { set = s; }
+};
+
+inline bool Property::isSubset(const PropertyAttributes &attrs, const Property &other, PropertyAttributes otherAttrs) const
+{
+ if (attrs.type() != PropertyAttributes::Generic && attrs.type() != otherAttrs.type())
+ return false;
+ if (attrs.hasEnumerable() && attrs.isEnumerable() != otherAttrs.isEnumerable())
+ return false;
+ if (attrs.hasConfigurable() && attrs.isConfigurable() != otherAttrs.isConfigurable())
+ return false;
+ if (attrs.hasWritable() && attrs.isWritable() != otherAttrs.isWritable())
+ return false;
+ if (attrs.type() == PropertyAttributes::Data && !value.sameValue(other.value))
+ return false;
+ if (attrs.type() == PropertyAttributes::Accessor) {
+ if (get != other.get)
+ return false;
+ if (set != other.set)
+ return false;
+ }
+ return true;
+}
+
+inline void Property::merge(PropertyAttributes &attrs, const Property &other, PropertyAttributes otherAttrs)
+{
+ if (otherAttrs.hasEnumerable())
+ attrs.setEnumerable(otherAttrs.isEnumerable());
+ if (otherAttrs.hasConfigurable())
+ attrs.setConfigurable(otherAttrs.isConfigurable());
+ if (otherAttrs.hasWritable())
+ attrs.setWritable(otherAttrs.isWritable());
+ if (otherAttrs.type() == PropertyAttributes::Accessor) {
+ attrs.setType(PropertyAttributes::Accessor);
+ if (other.get)
+ get = (other.get == (FunctionObject *)0x1) ? 0 : other.get;
+ if (other.set)
+ set = (other.set == (FunctionObject *)0x1) ? 0 : other.set;
+ } else if (otherAttrs.type() == PropertyAttributes::Data){
+ attrs.setType(PropertyAttributes::Data);
+ value = other.value;
+ }
+}
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4regexp.cpp b/src/qml/qml/v4vm/qv4regexp.cpp
new file mode 100644
index 0000000000..c0f7cee51d
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4regexp.cpp
@@ -0,0 +1,167 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4regexp.h"
+
+#include "qv4engine.h"
+
+namespace QQmlJS {
+namespace VM {
+
+RegExpCache::~RegExpCache()
+{
+ for (RegExpCache::Iterator it = begin(), e = end();
+ it != e; ++it)
+ it.value()->m_cache = 0;
+ clear();
+}
+
+DEFINE_MANAGED_VTABLE(RegExp);
+
+uint RegExp::match(const QString &string, int start, uint *matchOffsets) const
+{
+ if (!isValid())
+ return JSC::Yarr::offsetNoMatch;
+
+ return JSC::Yarr::interpret(m_byteCode.get(), WTF::String(string).characters16(), string.length(), start, matchOffsets);
+}
+
+RegExp* RegExp::create(ExecutionEngine* engine, const QString& pattern, bool ignoreCase, bool multiline)
+{
+ RegExpCacheKey key(pattern, ignoreCase, multiline);
+
+ RegExpCache *cache = engine->regExpCache;
+ if (cache) {
+ if (RegExp *result = cache->value(key))
+ return result;
+ }
+
+ RegExp *result = new (engine->memoryManager) RegExp(engine, pattern, ignoreCase, multiline);
+
+ if (!cache)
+ cache = engine->regExpCache = new RegExpCache;
+
+ result->m_cache = cache;
+ cache->insert(key, result);
+
+ return result;
+}
+
+RegExp::RegExp(ExecutionEngine* engine, const QString &pattern, bool ignoreCase, bool multiline)
+ : m_pattern(pattern)
+ , m_cache(0)
+ , m_subPatternCount(0)
+ , m_ignoreCase(ignoreCase)
+ , m_multiLine(multiline)
+{
+ vtbl = &static_vtbl;
+ type = Type_RegExpObject;
+
+ if (!engine)
+ return;
+ const char* error = 0;
+ JSC::Yarr::YarrPattern yarrPattern(WTF::String(pattern), ignoreCase, multiline, &error);
+ if (error)
+ return;
+ m_subPatternCount = yarrPattern.m_numSubpatterns;
+ m_byteCode = JSC::Yarr::byteCompile(yarrPattern, &engine->bumperPointerAllocator);
+}
+
+RegExp::~RegExp()
+{
+ if (m_cache) {
+ RegExpCacheKey key(this);
+ m_cache->remove(key);
+ }
+ _data = 0;
+}
+
+void RegExp::destroy(Managed *that)
+{
+ static_cast<RegExp*>(that)->~RegExp();
+}
+
+void RegExp::markObjects(Managed *that)
+{
+}
+
+Value RegExp::get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty)
+{
+ return Value::undefinedValue();
+}
+
+Value RegExp::getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty)
+{
+ return Value::undefinedValue();
+}
+
+void RegExp::put(Managed *m, ExecutionContext *ctx, String *name, const Value &value)
+{
+}
+
+void RegExp::putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value)
+{
+}
+
+PropertyAttributes RegExp::query(Managed *m, ExecutionContext *ctx, String *name)
+{
+ return Attr_Invalid;
+}
+
+PropertyAttributes RegExp::queryIndexed(Managed *m, ExecutionContext *ctx, uint index)
+{
+ return Attr_Invalid;
+}
+
+bool RegExp::deleteProperty(Managed *m, ExecutionContext *ctx, String *name)
+{
+ return false;
+}
+
+bool RegExp::deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index)
+{
+ return false;
+}
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+
diff --git a/src/qml/qml/v4vm/qv4regexp.h b/src/qml/qml/v4vm/qv4regexp.h
new file mode 100644
index 0000000000..b0c95843f3
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4regexp.h
@@ -0,0 +1,149 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4REGEXP_H
+#define QV4REGEXP_H
+
+#include <QString>
+#include <QVector>
+
+#include <wtf/RefPtr.h>
+#include <wtf/FastAllocBase.h>
+#include <wtf/BumpPointerAllocator.h>
+
+#include <limits.h>
+
+#include <yarr/Yarr.h>
+#include <yarr/YarrInterpreter.h>
+
+#include "qv4managed.h"
+#include "qv4engine.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct ExecutionEngine;
+
+struct RegExpCacheKey
+{
+ RegExpCacheKey(const QString &pattern, bool ignoreCase, bool multiLine)
+ : pattern(pattern)
+ , ignoreCase(ignoreCase)
+ , multiLine(multiLine)
+ { }
+ explicit inline RegExpCacheKey(const RegExp *re);
+
+ bool operator==(const RegExpCacheKey &other) const
+ { return pattern == other.pattern && ignoreCase == other.ignoreCase && multiLine == other.multiLine; }
+ bool operator!=(const RegExpCacheKey &other) const
+ { return !operator==(other); }
+
+ QString pattern;
+ uint ignoreCase : 1;
+ uint multiLine : 1;
+};
+
+inline uint qHash(const RegExpCacheKey& key, uint seed = 0) Q_DECL_NOTHROW
+{ return qHash(key.pattern, seed); }
+
+class RegExpCache : public QHash<RegExpCacheKey, RegExp*>
+{
+public:
+ ~RegExpCache();
+};
+
+class RegExp : public Managed
+{
+public:
+ static RegExp* create(ExecutionEngine* engine, const QString& pattern, bool ignoreCase = false, bool multiline = false);
+ ~RegExp();
+
+ QString pattern() const { return m_pattern; }
+
+ bool isValid() const { return m_byteCode.get(); }
+
+ uint match(const QString& string, int start, uint *matchOffsets) const;
+
+ bool ignoreCase() const { return m_ignoreCase; }
+ bool multiLine() const { return m_multiLine; }
+ int captureCount() const { return m_subPatternCount + 1; }
+
+protected:
+ static const ManagedVTable static_vtbl;
+ static void destroy(Managed *that);
+ static void markObjects(Managed *that);
+ static Value get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty);
+ static Value getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty);
+ static void put(Managed *m, ExecutionContext *ctx, String *name, const Value &value);
+ static void putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value);
+ static PropertyAttributes query(Managed *m, ExecutionContext *ctx, String *name);
+ static PropertyAttributes queryIndexed(Managed *m, ExecutionContext *ctx, uint index);
+ static bool deleteProperty(Managed *m, ExecutionContext *ctx, String *name);
+ static bool deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index);
+
+
+private:
+ friend class RegExpCache;
+ Q_DISABLE_COPY(RegExp);
+ RegExp(ExecutionEngine* engine, const QString& pattern, bool ignoreCase, bool multiline);
+
+ const QString m_pattern;
+ OwnPtr<JSC::Yarr::BytecodePattern> m_byteCode;
+ RegExpCache *m_cache;
+ int m_subPatternCount;
+ const bool m_ignoreCase;
+ const bool m_multiLine;
+};
+
+inline RegExpCacheKey::RegExpCacheKey(const RegExp *re)
+ : pattern(re->pattern())
+ , ignoreCase(re->ignoreCase())
+ , multiLine(re->multiLine())
+{}
+
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4REGEXP_H
diff --git a/src/qml/qml/v4vm/qv4regexpobject.cpp b/src/qml/qml/v4vm/qv4regexpobject.cpp
new file mode 100644
index 0000000000..3119382e69
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4regexpobject.cpp
@@ -0,0 +1,256 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4regexpobject.h"
+#include "qv4jsir_p.h"
+#include "qv4isel_p.h"
+#include "qv4objectproto.h"
+#include "qv4stringobject.h"
+#include "qv4mm.h"
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include "private/qlocale_tools_p.h"
+
+#include <QtCore/qmath.h>
+#include <QtCore/QDebug>
+#include <cassert>
+#include <typeinfo>
+#include <iostream>
+#include "qv4alloca_p.h"
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(RegExpObject);
+
+RegExpObject::RegExpObject(ExecutionEngine *engine, RegExp* value, bool global)
+ : Object(engine)
+ , value(value)
+ , global(global)
+{
+ vtbl = &static_vtbl;
+ type = Type_RegExpObject;
+
+ Property *lastIndexProperty = insertMember(engine->newIdentifier(QStringLiteral("lastIndex")),
+ Attr_NotEnumerable|Attr_NotConfigurable);
+ lastIndexProperty->value = Value::fromInt32(0);
+ if (!this->value)
+ return;
+ defineReadonlyProperty(engine->newIdentifier(QStringLiteral("source")), Value::fromString(engine->newString(this->value->pattern())));
+ defineReadonlyProperty(engine->newIdentifier(QStringLiteral("global")), Value::fromBoolean(global));
+ defineReadonlyProperty(engine->newIdentifier(QStringLiteral("ignoreCase")), Value::fromBoolean(this->value->ignoreCase()));
+ defineReadonlyProperty(engine->newIdentifier(QStringLiteral("multiline")), Value::fromBoolean(this->value->multiLine()));
+}
+
+void RegExpObject::destroy(Managed *that)
+{
+ static_cast<RegExpObject *>(that)->~RegExpObject();
+}
+
+void RegExpObject::markObjects(Managed *that)
+{
+ RegExpObject *re = static_cast<RegExpObject*>(that);
+ if (re->value)
+ re->value->mark();
+ Object::markObjects(that);
+}
+
+Property *RegExpObject::lastIndexProperty(ExecutionContext *ctx)
+{
+ assert(0 == internalClass->find(ctx->engine->newIdentifier(QStringLiteral("lastIndex"))));
+ return &memberData[0];
+}
+
+DEFINE_MANAGED_VTABLE(RegExpCtor);
+
+RegExpCtor::RegExpCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value RegExpCtor::construct(Managed *, ExecutionContext *ctx, Value *argv, int argc)
+{
+ Value r = argc > 0 ? argv[0] : Value::undefinedValue();
+ Value f = argc > 1 ? argv[1] : Value::undefinedValue();
+ if (RegExpObject *re = r.asRegExpObject()) {
+ if (!f.isUndefined())
+ ctx->throwTypeError();
+
+ RegExpObject *o = ctx->engine->newRegExpObject(re->value, re->global);
+ return Value::fromObject(o);
+ }
+
+ if (r.isUndefined())
+ r = Value::fromString(ctx, QString());
+ else if (!r.isString())
+ r = __qmljs_to_string(r, ctx);
+
+ bool global = false;
+ bool ignoreCase = false;
+ bool multiLine = false;
+ if (!f.isUndefined()) {
+ f = __qmljs_to_string(f, ctx);
+ QString str = f.stringValue()->toQString();
+ for (int i = 0; i < str.length(); ++i) {
+ if (str.at(i) == QChar('g') && !global) {
+ global = true;
+ } else if (str.at(i) == QChar('i') && !ignoreCase) {
+ ignoreCase = true;
+ } else if (str.at(i) == QChar('m') && !multiLine) {
+ multiLine = true;
+ } else {
+ ctx->throwSyntaxError(0);
+ }
+ }
+ }
+
+ RegExp* re = RegExp::create(ctx->engine, r.stringValue()->toQString(), ignoreCase, multiLine);
+ if (!re->isValid())
+ ctx->throwSyntaxError(0);
+
+ RegExpObject *o = ctx->engine->newRegExpObject(re, global);
+ return Value::fromObject(o);
+}
+
+Value RegExpCtor::call(Managed *that, ExecutionContext *ctx, const Value &thisObject, Value *argv, int argc)
+{
+ if (argc > 0 && argv[0].asRegExpObject()) {
+ if (argc == 1 || argv[1].isUndefined())
+ return argv[0];
+ }
+
+ return construct(that, ctx, argv, argc);
+}
+
+void RegExpPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(2));
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("exec"), method_exec, 1);
+ defineDefaultProperty(ctx, QStringLiteral("test"), method_test, 1);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString, 0);
+ defineDefaultProperty(ctx, QStringLiteral("compile"), method_compile, 2);
+}
+
+Value RegExpPrototype::method_exec(SimpleCallContext *ctx)
+{
+ RegExpObject *r = ctx->thisObject.asRegExpObject();
+ if (!r)
+ ctx->throwTypeError();
+
+ Value arg = ctx->argument(0);
+ arg = __qmljs_to_string(arg, ctx);
+ QString s = arg.stringValue()->toQString();
+
+ int offset = r->global ? r->lastIndexProperty(ctx)->value.toInt32() : 0;
+ if (offset < 0 || offset > s.length()) {
+ r->lastIndexProperty(ctx)->value = Value::fromInt32(0);
+ return Value::nullValue();
+ }
+
+ uint* matchOffsets = (uint*)alloca(r->value->captureCount() * 2 * sizeof(uint));
+ int result = r->value->match(s, offset, matchOffsets);
+ if (result == -1) {
+ r->lastIndexProperty(ctx)->value = Value::fromInt32(0);
+ return Value::nullValue();
+ }
+
+ // fill in result data
+ ArrayObject *array = ctx->engine->newArrayObject(ctx)->asArrayObject();
+ for (int i = 0; i < r->value->captureCount(); ++i) {
+ int start = matchOffsets[i * 2];
+ int end = matchOffsets[i * 2 + 1];
+ Value entry = Value::undefinedValue();
+ if (start != -1 && end != -1)
+ entry = Value::fromString(ctx, s.mid(start, end - start));
+ array->push_back(entry);
+ }
+
+ array->put(ctx, QLatin1String("index"), Value::fromInt32(result));
+ array->put(ctx, QLatin1String("input"), arg);
+
+ if (r->global)
+ r->lastIndexProperty(ctx)->value = Value::fromInt32(matchOffsets[1]);
+
+ return Value::fromObject(array);
+}
+
+Value RegExpPrototype::method_test(SimpleCallContext *ctx)
+{
+ Value r = method_exec(ctx);
+ return Value::fromBoolean(!r.isNull());
+}
+
+Value RegExpPrototype::method_toString(SimpleCallContext *ctx)
+{
+ RegExpObject *r = ctx->thisObject.asRegExpObject();
+ if (!r)
+ ctx->throwTypeError();
+
+ QString result = QChar('/') + r->value->pattern();
+ result += QChar('/');
+ // ### 'g' option missing
+ if (r->value->ignoreCase())
+ result += QChar('i');
+ if (r->value->multiLine())
+ result += QChar('m');
+ return Value::fromString(ctx, result);
+}
+
+Value RegExpPrototype::method_compile(SimpleCallContext *ctx)
+{
+ RegExpObject *r = ctx->thisObject.asRegExpObject();
+ if (!r)
+ ctx->throwTypeError();
+
+ RegExpObject *re = ctx->engine->regExpCtor.asFunctionObject()->construct(ctx, ctx->arguments, ctx->argumentCount).asRegExpObject();
+
+ r->value = re->value;
+ r->global = re->global;
+ return Value::undefinedValue();
+}
+
diff --git a/src/qml/qml/v4vm/qv4regexpobject.h b/src/qml/qml/v4vm/qv4regexpobject.h
new file mode 100644
index 0000000000..df4190ba77
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4regexpobject.h
@@ -0,0 +1,108 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4REGEXPOBJECT_H
+#define QV4REGEXPOBJECT_H
+
+#include "qv4runtime.h"
+#include "qv4engine.h"
+#include "qv4context.h"
+#include "qv4functionobject.h"
+#include "qv4string.h"
+#include "qv4codegen_p.h"
+#include "qv4isel_p.h"
+#include "qv4managed.h"
+#include "qv4property.h"
+#include "qv4objectiterator.h"
+#include "qv4regexp.h"
+
+#include <QtCore/QString>
+#include <QtCore/QHash>
+#include <QtCore/QScopedPointer>
+#include <cstdio>
+#include <cassert>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct RegExpObject: Object {
+ RegExp* value;
+ Property *lastIndexProperty(ExecutionContext *ctx);
+ bool global;
+ RegExpObject(ExecutionEngine *engine, RegExp* value, bool global);
+ ~RegExpObject() {}
+
+protected:
+ static const ManagedVTable static_vtbl;
+ static void destroy(Managed *that);
+ static void markObjects(Managed *that);
+};
+
+
+struct RegExpCtor: FunctionObject
+{
+ RegExpCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct RegExpPrototype: RegExpObject
+{
+ RegExpPrototype(ExecutionEngine* engine): RegExpObject(engine, RegExp::create(engine, QString()), false) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_exec(SimpleCallContext *ctx);
+ static Value method_test(SimpleCallContext *ctx);
+ static Value method_toString(SimpleCallContext *ctx);
+ static Value method_compile(SimpleCallContext *ctx);
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_OBJECTS_H
diff --git a/src/qml/qml/v4vm/qv4runtime.cpp b/src/qml/qml/v4vm/qv4runtime.cpp
new file mode 100644
index 0000000000..9cd92677f6
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4runtime.cpp
@@ -0,0 +1,1319 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4global.h"
+#include "debugging.h"
+#include "qv4runtime.h"
+#include "qv4object.h"
+#include "qv4jsir_p.h"
+#include "qv4objectproto.h"
+#include "qv4globalobject.h"
+#include "qv4stringobject.h"
+#include "qv4lookup.h"
+#include "private/qlocale_tools_p.h"
+
+#include <QtCore/qmath.h>
+#include <QtCore/qnumeric.h>
+#include <QtCore/QDebug>
+#include <cstdio>
+#include <cassert>
+#include <typeinfo>
+#include <stdlib.h>
+
+#include "../3rdparty/double-conversion/double-conversion.h"
+
+#if USE(LIBUNWIND_DEBUG)
+#include <libunwind.h>
+#include <execinfo.h>
+#endif
+
+namespace QQmlJS {
+namespace VM {
+
+QString numberToString(double num, int radix = 10)
+{
+ if (isnan(num)) {
+ return QStringLiteral("NaN");
+ } else if (qIsInf(num)) {
+ return QLatin1String(num < 0 ? "-Infinity" : "Infinity");
+ }
+
+ if (radix == 10) {
+ char str[100];
+ double_conversion::StringBuilder builder(str, sizeof(str));
+ double_conversion::DoubleToStringConverter::EcmaScriptConverter().ToShortest(num, &builder);
+ return QString::fromLatin1(builder.Finalize());
+ }
+
+ QString str;
+ bool negative = false;
+
+ if (num < 0) {
+ negative = true;
+ num = -num;
+ }
+
+ double frac = num - ::floor(num);
+ num = Value::toInteger(num);
+
+ do {
+ char c = (char)::fmod(num, radix);
+ c = (c < 10) ? (c + '0') : (c - 10 + 'a');
+ str.prepend(QLatin1Char(c));
+ num = ::floor(num / radix);
+ } while (num != 0);
+
+ if (frac != 0) {
+ str.append(QLatin1Char('.'));
+ do {
+ frac = frac * radix;
+ char c = (char)::floor(frac);
+ c = (c < 10) ? (c + '0') : (c - 10 + 'a');
+ str.append(QLatin1Char(c));
+ frac = frac - ::floor(frac);
+ } while (frac != 0);
+ }
+
+ if (negative)
+ str.prepend(QLatin1Char('-'));
+
+ return str;
+}
+
+Exception::Exception(ExecutionContext *throwingContext, const Value &exceptionValue)
+{
+ this->throwingContext = throwingContext->engine->current;
+ this->exception = PersistentValue(throwingContext->engine->memoryManager, exceptionValue);
+ accepted = false;
+}
+
+Exception::~Exception()
+{
+ assert(accepted);
+}
+
+void Exception::accept(ExecutionContext *catchingContext)
+{
+ assert(!accepted);
+ accepted = true;
+ partiallyUnwindContext(catchingContext);
+}
+
+void Exception::partiallyUnwindContext(ExecutionContext *catchingContext)
+{
+ if (!throwingContext)
+ return;
+ ExecutionContext *context = throwingContext;
+ while (context != catchingContext)
+ context = context->engine->popContext();
+ throwingContext = context;
+}
+
+extern "C" {
+
+void __qmljs_init_closure(ExecutionContext *ctx, Value *result, VM::Function *clos)
+{
+ assert(clos);
+ *result = Value::fromObject(ctx->engine->newScriptFunction(ctx, clos));
+}
+
+Function *__qmljs_register_function(ExecutionContext *ctx, String *name,
+ bool hasDirectEval,
+ bool usesArgumentsObject, bool isStrict,
+ bool hasNestedFunctions,
+ String **formals, unsigned formalCount,
+ String **locals, unsigned localCount)
+{
+ Function *f = ctx->engine->newFunction(name ? name->toQString() : QString());
+
+ f->hasDirectEval = hasDirectEval;
+ f->usesArgumentsObject = usesArgumentsObject;
+ f->isStrict = isStrict;
+ f->hasNestedFunctions = hasNestedFunctions;
+
+ for (unsigned i = 0; i < formalCount; ++i)
+ if (formals[i])
+ f->formals.append(formals[i]);
+ for (unsigned i = 0; i < localCount; ++i)
+ if (locals[i])
+ f->locals.append(locals[i]);
+
+ return f;
+}
+
+void __qmljs_delete_subscript(ExecutionContext *ctx, Value *result, const Value &base, const Value &index)
+{
+ if (Object *o = base.asObject()) {
+ uint n = index.asArrayIndex();
+ if (n < UINT_MAX) {
+ Value res = Value::fromBoolean(o->deleteIndexedProperty(ctx, n));
+ if (result)
+ *result = res;
+ return;
+ }
+ }
+
+ String *name = index.toString(ctx);
+ __qmljs_delete_member(ctx, result, base, name);
+}
+
+void __qmljs_delete_member(ExecutionContext *ctx, Value *result, const Value &base, String *name)
+{
+ Object *obj = base.toObject(ctx);
+ Value res = Value::fromBoolean(obj->deleteProperty(ctx, name));
+ if (result)
+ *result = res;
+}
+
+void __qmljs_delete_name(ExecutionContext *ctx, Value *result, String *name)
+{
+ Value res = Value::fromBoolean(ctx->deleteProperty(name));
+ if (result)
+ *result = res;
+}
+
+void __qmljs_add_helper(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ Value pleft = __qmljs_to_primitive(left, PREFERREDTYPE_HINT);
+ Value pright = __qmljs_to_primitive(right, PREFERREDTYPE_HINT);
+ if (pleft.isString() || pright.isString()) {
+ if (!pleft.isString())
+ pleft = __qmljs_to_string(pleft, ctx);
+ if (!pright.isString())
+ pright = __qmljs_to_string(pright, ctx);
+ String *string = __qmljs_string_concat(ctx, pleft.stringValue(), pright.stringValue());
+ *result = Value::fromString(string);
+ return;
+ }
+ double x = __qmljs_to_number(pleft);
+ double y = __qmljs_to_number(pright);
+ *result = Value::fromDouble(x + y);
+}
+
+void __qmljs_instanceof(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ Object *o = right.asObject();
+ if (!o)
+ ctx->throwTypeError();
+
+ bool r = o->hasInstance(ctx, left);
+ *result = Value::fromBoolean(r);
+}
+
+void __qmljs_in(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ if (!right.isObject())
+ ctx->throwTypeError();
+ String *s = left.toString(ctx);
+ bool r = right.objectValue()->__hasProperty__(s);
+ *result = Value::fromBoolean(r);
+}
+
+void __qmljs_inplace_bit_and_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_bit_and);
+}
+
+void __qmljs_inplace_bit_or_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_bit_or);
+}
+
+void __qmljs_inplace_bit_xor_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_bit_xor);
+}
+
+void __qmljs_inplace_add_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_add);
+}
+
+void __qmljs_inplace_sub_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_sub);
+}
+
+void __qmljs_inplace_mul_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_mul);
+}
+
+void __qmljs_inplace_div_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_div);
+}
+
+void __qmljs_inplace_mod_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_mod);
+}
+
+void __qmljs_inplace_shl_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_shl);
+}
+
+void __qmljs_inplace_shr_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_shr);
+}
+
+void __qmljs_inplace_ushr_name(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->inplaceBitOp(name, value, __qmljs_ushr);
+}
+
+void __qmljs_inplace_bit_and_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_bit_and, index, rhs);
+}
+
+void __qmljs_inplace_bit_or_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_bit_or, index, rhs);
+}
+
+void __qmljs_inplace_bit_xor_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_bit_xor, index, rhs);
+}
+
+void __qmljs_inplace_add_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_add, index, rhs);
+}
+
+void __qmljs_inplace_sub_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_sub, index, rhs);
+}
+
+void __qmljs_inplace_mul_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_mul, index, rhs);
+}
+
+void __qmljs_inplace_div_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_div, index, rhs);
+}
+
+void __qmljs_inplace_mod_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_mod, index, rhs);
+}
+
+void __qmljs_inplace_shl_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_shl, index, rhs);
+}
+
+void __qmljs_inplace_shr_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_shr, index, rhs);
+}
+
+void __qmljs_inplace_ushr_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs)
+{
+ Object *obj = base.toObject(ctx);
+ obj->inplaceBinOp(ctx, __qmljs_ushr, index, rhs);
+}
+
+void __qmljs_inplace_bit_and_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_bit_and, name, rhs);
+}
+
+void __qmljs_inplace_bit_or_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_bit_or, name, rhs);
+}
+
+void __qmljs_inplace_bit_xor_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_bit_xor, name, rhs);
+}
+
+void __qmljs_inplace_add_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_add, name, rhs);
+}
+
+void __qmljs_inplace_sub_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_sub, name, rhs);
+}
+
+void __qmljs_inplace_mul_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_mul, name, rhs);
+}
+
+void __qmljs_inplace_div_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_div, name, rhs);
+}
+
+void __qmljs_inplace_mod_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_mod, name, rhs);
+}
+
+void __qmljs_inplace_shl_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_shl, name, rhs);
+}
+
+void __qmljs_inplace_shr_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_shr, name, rhs);
+}
+
+void __qmljs_inplace_ushr_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs)
+{
+ Object *o = base.toObject(ctx);
+ o->inplaceBinOp(ctx, __qmljs_ushr, name, rhs);
+}
+
+double __qmljs_string_to_number(const String *string)
+{
+ QString s = string->toQString();
+ s = s.trimmed();
+ if (s.startsWith(QLatin1String("0x")) || s.startsWith(QLatin1String("0X")))
+ return s.toLong(0, 16);
+ bool ok;
+ QByteArray ba = s.toLatin1();
+ const char *begin = ba.constData();
+ const char *end = 0;
+ double d = qstrtod(begin, &end, &ok);
+ if (end - begin != ba.size()) {
+ if (ba == "Infinity" || ba == "+Infinity")
+ d = Q_INFINITY;
+ else if (ba == "-Infinity")
+ d = -Q_INFINITY;
+ else
+ d = std::numeric_limits<double>::quiet_NaN();
+ }
+ return d;
+}
+
+Value __qmljs_string_from_number(ExecutionContext *ctx, double number)
+{
+ String *string = ctx->engine->newString(numberToString(number, 10));
+ return Value::fromString(string);
+}
+
+String *__qmljs_string_concat(ExecutionContext *ctx, String *first, String *second)
+{
+ const QString &a = first->toQString();
+ const QString &b = second->toQString();
+ QString newStr(a.length() + b.length(), Qt::Uninitialized);
+ QChar *data = newStr.data();
+ memcpy(data, a.constData(), a.length()*sizeof(QChar));
+ data += a.length();
+ memcpy(data, b.constData(), b.length()*sizeof(QChar));
+
+ return ctx->engine->newString(newStr);
+}
+
+Value __qmljs_object_default_value(Object *object, int typeHint)
+{
+ if (typeHint == PREFERREDTYPE_HINT) {
+ if (object->asDateObject())
+ typeHint = STRING_HINT;
+ else
+ typeHint = NUMBER_HINT;
+ }
+
+ ExecutionEngine *engine = object->internalClass->engine;
+ String *meth1 = engine->newString("toString");
+ String *meth2 = engine->newString("valueOf");
+
+ if (typeHint == NUMBER_HINT)
+ qSwap(meth1, meth2);
+
+ ExecutionContext *ctx = engine->current;
+
+ Value conv = object->get(ctx, meth1);
+ if (FunctionObject *o = conv.asFunctionObject()) {
+ Value r = o->call(ctx, Value::fromObject(object), 0, 0);
+ if (r.isPrimitive())
+ return r;
+ }
+
+ conv = object->get(ctx, meth2);
+ if (FunctionObject *o = conv.asFunctionObject()) {
+ Value r = o->call(ctx, Value::fromObject(object), 0, 0);
+ if (r.isPrimitive())
+ return r;
+ }
+
+ ctx->throwTypeError();
+ return Value::undefinedValue();
+}
+
+Bool __qmljs_to_boolean(const Value &value)
+{
+ return value.toBoolean();
+}
+
+
+Object *__qmljs_convert_to_object(ExecutionContext *ctx, const Value &value)
+{
+ assert(!value.isObject());
+ switch (value.type()) {
+ case Value::Undefined_Type:
+ case Value::Null_Type:
+ ctx->throwTypeError();
+ case Value::Boolean_Type:
+ return ctx->engine->newBooleanObject(value);
+ case Value::String_Type:
+ return ctx->engine->newStringObject(ctx, value);
+ break;
+ case Value::Object_Type:
+ Q_UNREACHABLE();
+ case Value::Integer_Type:
+ default: // double
+ return ctx->engine->newNumberObject(value);
+ }
+}
+
+String *__qmljs_convert_to_string(ExecutionContext *ctx, const Value &value)
+{
+ switch (value.type()) {
+ case Value::Undefined_Type:
+ return ctx->engine->id_undefined;
+ case Value::Null_Type:
+ return ctx->engine->id_null;
+ case Value::Boolean_Type:
+ if (value.booleanValue())
+ return ctx->engine->id_true;
+ else
+ return ctx->engine->id_false;
+ case Value::String_Type:
+ return value.stringValue();
+ case Value::Object_Type: {
+ Value prim = __qmljs_to_primitive(value, STRING_HINT);
+ if (prim.isPrimitive())
+ return __qmljs_convert_to_string(ctx, prim);
+ else
+ ctx->throwTypeError();
+ }
+ case Value::Integer_Type:
+ return __qmljs_string_from_number(ctx, value.int_32).stringValue();
+ default: // double
+ return __qmljs_string_from_number(ctx, value.doubleValue()).stringValue();
+ } // switch
+}
+
+void __qmljs_set_property(ExecutionContext *ctx, const Value &object, String *name, const Value &value)
+{
+ Object *o = object.toObject(ctx);
+ o->put(ctx, name, value);
+}
+
+void __qmljs_get_element(ExecutionContext *ctx, Value *result, const Value &object, const Value &index)
+{
+ uint idx = index.asArrayIndex();
+
+ Object *o = object.asObject();
+ if (!o) {
+ if (idx < UINT_MAX) {
+ if (String *str = object.asString()) {
+ if (idx >= (uint)str->toQString().length()) {
+ if (result)
+ *result = Value::undefinedValue();
+ return;
+ }
+ const QString s = str->toQString().mid(idx, 1);
+ if (result)
+ *result = Value::fromString(ctx, s);
+ return;
+ }
+ }
+
+ o = __qmljs_convert_to_object(ctx, object);
+ }
+
+ if (idx < UINT_MAX) {
+ uint pidx = o->propertyIndexFromArrayIndex(idx);
+ if (pidx < UINT_MAX) {
+ if (!o->arrayAttributes || o->arrayAttributes[pidx].isData()) {
+ if (result)
+ *result = o->arrayData[pidx].value;
+ return;
+ }
+ }
+
+ Value res = o->getIndexed(ctx, idx);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ String *name = index.toString(ctx);
+ Value res = o->get(ctx, name);
+ if (result)
+ *result = res;
+}
+
+void __qmljs_set_element(ExecutionContext *ctx, const Value &object, const Value &index, const Value &value)
+{
+ Object *o = object.toObject(ctx);
+
+ uint idx = index.asArrayIndex();
+ if (idx < UINT_MAX) {
+ uint pidx = o->propertyIndexFromArrayIndex(idx);
+ if (pidx < UINT_MAX) {
+ if (o->arrayAttributes && !o->arrayAttributes[pidx].isEmpty() && !o->arrayAttributes[pidx].isWritable()) {
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return;
+ }
+
+ Property *p = o->arrayData + pidx;
+ if (!o->arrayAttributes || o->arrayAttributes[pidx].isData()) {
+ p->value = value;
+ return;
+ }
+
+ if (o->arrayAttributes[pidx].isAccessor()) {
+ FunctionObject *setter = p->setter();
+ if (!setter) {
+ if (ctx->strictMode)
+ ctx->throwTypeError();
+ return;
+ }
+
+ Value args[1];
+ args[0] = value;
+ setter->call(ctx, Value::fromObject(o), args, 1);
+ return;
+ }
+ }
+ o->putIndexed(ctx, idx, value);
+ return;
+ }
+
+ String *name = index.toString(ctx);
+ o->put(ctx, name, value);
+}
+
+void __qmljs_foreach_iterator_object(ExecutionContext *ctx, Value *result, const Value &in)
+{
+ Object *o = 0;
+ if (!in.isNull() && !in.isUndefined())
+ o = in.toObject(ctx);
+ Object *it = ctx->engine->newForEachIteratorObject(ctx, o);
+ *result = Value::fromObject(it);
+}
+
+void __qmljs_foreach_next_property_name(Value *result, const Value &foreach_iterator)
+{
+ assert(foreach_iterator.isObject());
+
+ ForEachIteratorObject *it = static_cast<ForEachIteratorObject *>(foreach_iterator.objectValue());
+ assert(it->asForeachIteratorObject());
+
+ *result = it->nextPropertyName();
+}
+
+
+void __qmljs_set_activation_property(ExecutionContext *ctx, String *name, const Value &value)
+{
+ ctx->setProperty(name, value);
+}
+
+void __qmljs_get_property(ExecutionContext *ctx, Value *result, const Value &object, String *name)
+{
+ Value res;
+ Managed *m = object.asManaged();
+ if (m) {
+ res = m->get(ctx, name);
+ } else {
+ m = __qmljs_convert_to_object(ctx, object);
+ res = m->get(ctx, name);
+ }
+ if (result)
+ *result = res;
+}
+
+void __qmljs_get_activation_property(ExecutionContext *ctx, Value *result, String *name)
+{
+ *result = ctx->getProperty(name);
+}
+
+void __qmljs_get_global_lookup(ExecutionContext *ctx, Value *result, int lookupIndex)
+{
+ Lookup *l = ctx->lookups + lookupIndex;
+ l->lookupGlobal(l, ctx, result);
+}
+
+void __qmljs_get_property_lookup(ExecutionContext *ctx, Value *result, const Value &object, int lookupIndex)
+{
+ Lookup *l = ctx->lookups + lookupIndex;
+ l->lookupProperty(l, ctx, result, object);
+}
+
+void __qmljs_set_property_lookup(ExecutionContext *ctx, const Value &object, int lookupIndex, const Value &value)
+{
+ Object *o = object.toObject(ctx);
+ Lookup *l = ctx->lookups + lookupIndex;
+
+ PropertyAttributes attrs;
+ Property *p = l->setterLookup(o, &attrs);
+ if (p && (l->index != ArrayObject::LengthPropertyIndex || !o->isArrayObject())) {
+ o->putValue(ctx, p, attrs, value);
+ return;
+ }
+
+ o->put(ctx, l->name, value);
+}
+
+
+uint __qmljs_equal(const Value &x, const Value &y)
+{
+ if (x.type() == y.type()) {
+ switch (x.type()) {
+ case Value::Undefined_Type:
+ return true;
+ case Value::Null_Type:
+ return true;
+ case Value::Boolean_Type:
+ return x.booleanValue() == y.booleanValue();
+ break;
+ case Value::Integer_Type:
+ return x.integerValue() == y.integerValue();
+ case Value::String_Type:
+ return x.stringValue()->isEqualTo(y.stringValue());
+ case Value::Object_Type:
+ if (x.objectValue()->externalComparison || y.objectValue()->externalComparison)
+ return x.objectValue()->externalComparison && y.objectValue()->externalComparison
+ && x.objectValue()->internalClass->engine->externalResourceComparison(x, y);
+ return x.objectValue() == y.objectValue();
+ default: // double
+ return x.doubleValue() == y.doubleValue();
+ }
+ // unreachable
+ } else {
+ if (x.isNumber() && y.isNumber())
+ return x.asDouble() == y.asDouble();
+ if (x.isNull() && y.isUndefined()) {
+ return true;
+ } else if (x.isUndefined() && y.isNull()) {
+ return true;
+ } else if (x.isNumber() && y.isString()) {
+ Value ny = Value::fromDouble(__qmljs_to_number(y));
+ return __qmljs_equal(x, ny);
+ } else if (x.isString() && y.isNumber()) {
+ Value nx = Value::fromDouble(__qmljs_to_number(x));
+ return __qmljs_equal(nx, y);
+ } else if (x.isBoolean()) {
+ Value nx = Value::fromDouble((double) x.booleanValue());
+ return __qmljs_equal(nx, y);
+ } else if (y.isBoolean()) {
+ Value ny = Value::fromDouble((double) y.booleanValue());
+ return __qmljs_equal(x, ny);
+ } else if ((x.isNumber() || x.isString()) && y.isObject()) {
+ Value py = __qmljs_to_primitive(y, PREFERREDTYPE_HINT);
+ return __qmljs_equal(x, py);
+ } else if (x.isObject() && (y.isNumber() || y.isString())) {
+ Value px = __qmljs_to_primitive(x, PREFERREDTYPE_HINT);
+ return __qmljs_equal(px, y);
+ }
+ }
+
+ return false;
+}
+
+Bool __qmljs_strict_equal(const Value &x, const Value &y)
+{
+ TRACE2(x, y);
+
+ if (x.isDouble() || y.isDouble())
+ return x.asDouble() == y.asDouble();
+ if (x.rawValue() == y.rawValue())
+ return true;
+ if (x.type() != y.type())
+ return false;
+ if (x.isString())
+ return x.stringValue()->isEqualTo(y.stringValue());
+ if (x.isObject() && x.objectValue()->externalComparison && y.objectValue()->externalComparison)
+ return x.objectValue()->internalClass->engine->externalResourceComparison(x, y);
+ return false;
+}
+
+
+void __qmljs_call_global_lookup(ExecutionContext *context, Value *result, uint index, Value *args, int argc)
+{
+ Lookup *l = context->lookups + index;
+ Value v;
+ l->lookupGlobal(l, context, &v);
+ FunctionObject *o = v.asFunctionObject();
+ if (!o)
+ context->throwTypeError();
+
+ Value thisObject = Value::undefinedValue();
+
+ if (o == context->engine->evalFunction && l->name->isEqualTo(context->engine->id_eval)) {
+ Value res = static_cast<EvalFunction *>(o)->evalCall(context, thisObject, args, argc, true);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ Value res = o->call(context, thisObject, args, argc);
+ if (result)
+ *result = res;
+}
+
+
+void __qmljs_call_activation_property(ExecutionContext *context, Value *result, String *name, Value *args, int argc)
+{
+ Object *base;
+ Value func = context->getPropertyAndBase(name, &base);
+ FunctionObject *o = func.asFunctionObject();
+ if (!o)
+ context->throwTypeError();
+
+ Value thisObject = base ? Value::fromObject(base) : Value::undefinedValue();
+
+ if (o == context->engine->evalFunction && name->isEqualTo(context->engine->id_eval)) {
+ Value res = static_cast<EvalFunction *>(o)->evalCall(context, thisObject, args, argc, true);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ Value res = o->call(context, thisObject, args, argc);
+ if (result)
+ *result = res;
+}
+
+void __qmljs_call_property(ExecutionContext *context, Value *result, const Value &thatObject, String *name, Value *args, int argc)
+{
+ Value thisObject = thatObject;
+ Managed *baseObject = thisObject.asManaged();
+ if (!baseObject) {
+ baseObject = __qmljs_convert_to_object(context, thisObject);
+ thisObject = Value::fromObject(static_cast<Object *>(baseObject));
+ }
+
+ Value func = baseObject->get(context, name);
+ FunctionObject *o = func.asFunctionObject();
+ if (!o)
+ context->throwTypeError();
+
+ Value res = o->call(context, thisObject, args, argc);
+ if (result)
+ *result = res;
+}
+
+void __qmljs_call_property_lookup(ExecutionContext *context, Value *result, const Value &thisObject, uint index, Value *args, int argc)
+{
+ Lookup *l = context->lookups + index;
+
+ Object *baseObject;
+ if (thisObject.isObject())
+ baseObject = thisObject.objectValue();
+ else if (thisObject.isString())
+ baseObject = context->engine->stringPrototype;
+ else
+ baseObject = __qmljs_convert_to_object(context, thisObject);
+
+ PropertyAttributes attrs;
+ Property *p = l->lookup(baseObject, &attrs);
+ if (!p)
+ context->throwTypeError();
+ Value func = attrs.isData() ? p->value : baseObject->getValue(context, p, attrs);
+ FunctionObject *o = func.asFunctionObject();
+ if (!o)
+ context->throwTypeError();
+
+ Value res = o->call(context, thisObject, args, argc);
+ if (result)
+ *result = res;
+}
+
+void __qmljs_call_element(ExecutionContext *context, Value *result, const Value &that, const Value &index, Value *args, int argc)
+{
+ Object *baseObject = that.toObject(context);
+ Value thisObject = Value::fromObject(baseObject);
+
+ Value func = baseObject->get(context, index.toString(context));
+ Object *o = func.asObject();
+ if (!o)
+ context->throwTypeError();
+
+ Value res = o->call(context, thisObject, args, argc);
+ if (result)
+ *result = res;
+}
+
+void __qmljs_call_value(ExecutionContext *context, Value *result, const Value *thisObject, const Value &func, Value *args, int argc)
+{
+ Object *o = func.asObject();
+ if (!o)
+ context->throwTypeError();
+ Value res = o->call(context, thisObject ? *thisObject : Value::undefinedValue(), args, argc);
+ if (result)
+ *result = res;
+}
+
+
+void __qmljs_construct_global_lookup(ExecutionContext *context, Value *result, uint index, Value *args, int argc)
+{
+ Lookup *l = context->lookups + index;
+ Value func;
+ l->lookupGlobal(l, context, &func);
+
+ if (Object *f = func.asObject()) {
+ Value res = f->construct(context, args, argc);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ context->throwTypeError();
+}
+
+
+void __qmljs_construct_activation_property(ExecutionContext *context, Value *result, String *name, Value *args, int argc)
+{
+ Value func = context->getProperty(name);
+ __qmljs_construct_value(context, result, func, args, argc);
+}
+
+void __qmljs_construct_value(ExecutionContext *context, Value *result, const Value &func, Value *args, int argc)
+{
+ if (Object *f = func.asObject()) {
+ Value res = f->construct(context, args, argc);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ context->throwTypeError();
+}
+
+void __qmljs_construct_property(ExecutionContext *context, Value *result, const Value &base, String *name, Value *args, int argc)
+{
+ Object *thisObject = base.toObject(context);
+
+ Value func = thisObject->get(context, name);
+ if (Object *f = func.asObject()) {
+ Value res = f->construct(context, args, argc);
+ if (result)
+ *result = res;
+ return;
+ }
+
+ context->throwTypeError();
+}
+
+void __qmljs_throw(ExecutionContext *context, const Value &value)
+{
+ if (context->engine->debugger)
+ context->engine->debugger->aboutToThrow(value);
+
+#if USE(LIBUNWIND_DEBUG)
+ printf("about to throw exception. walking stack first with libunwind:\n");
+ unw_cursor_t cursor; unw_context_t uc;
+ unw_word_t ip, sp;
+
+ unw_getcontext(&uc);
+ unw_init_local(&cursor, &uc);
+ while (unw_step(&cursor) > 0) {
+ unw_get_reg(&cursor, UNW_REG_IP, &ip);
+ unw_get_reg(&cursor, UNW_REG_SP, &sp);
+ printf("ip = %lx, sp = %lx ", (long) ip, (long) sp);
+ void * const addr = (void*)ip;
+ char **symbol = backtrace_symbols(&addr, 1);
+ printf("%s", symbol[0]);
+ free(symbol);
+ printf("\n");
+ }
+ printf("stack walked. throwing exception now...\n");
+#endif
+
+ throw Exception(context, value);
+}
+
+void __qmljs_builtin_typeof(ExecutionContext *ctx, Value *result, const Value &value)
+{
+ if (!result)
+ return;
+ String *res = 0;
+ switch (value.type()) {
+ case Value::Undefined_Type:
+ res = ctx->engine->id_undefined;
+ break;
+ case Value::Null_Type:
+ res = ctx->engine->id_object;
+ break;
+ case Value::Boolean_Type:
+ res = ctx->engine->id_boolean;
+ break;
+ case Value::String_Type:
+ res = ctx->engine->id_string;
+ break;
+ case Value::Object_Type:
+ if (value.objectValue()->asFunctionObject())
+ res = ctx->engine->id_function;
+ else
+ res = ctx->engine->id_object; // ### implementation-defined
+ break;
+ default:
+ res = ctx->engine->id_number;
+ break;
+ }
+ *result = Value::fromString(res);
+}
+
+void __qmljs_builtin_typeof_name(ExecutionContext *context, Value *result, String *name)
+{
+ Value res;
+ __qmljs_builtin_typeof(context, &res, context->getPropertyNoThrow(name));
+ if (result)
+ *result = res;
+}
+
+void __qmljs_builtin_typeof_member(ExecutionContext *context, Value *result, const Value &base, String *name)
+{
+ Object *obj = base.toObject(context);
+ Value res;
+ __qmljs_builtin_typeof(context, &res, obj->get(context, name));
+ if (result)
+ *result = res;
+}
+
+void __qmljs_builtin_typeof_element(ExecutionContext *context, Value *result, const Value &base, const Value &index)
+{
+ String *name = index.toString(context);
+ Object *obj = base.toObject(context);
+ Value res;
+ __qmljs_builtin_typeof(context, &res, obj->get(context, name));
+ if (result)
+ *result = res;
+}
+
+void __qmljs_builtin_post_increment(Value *result, Value *val)
+{
+ if (val->isInteger() && val->integerValue() < INT_MAX) {
+ if (result)
+ *result = *val;
+ val->int_32 += 1;
+ return;
+ }
+
+ double d = __qmljs_to_number(*val);
+ *val = Value::fromDouble(d + 1);
+ if (result)
+ *result = Value::fromDouble(d);
+}
+
+void __qmljs_builtin_post_increment_name(ExecutionContext *context, Value *result, String *name)
+{
+ Value v = context->getProperty(name);
+
+ if (v.isInteger() && v.integerValue() < INT_MAX) {
+ if (result)
+ *result = v;
+ v.int_32 += 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d + 1);
+ }
+
+ context->setProperty(name, v);
+}
+
+void __qmljs_builtin_post_increment_member(ExecutionContext *context, Value *result, const Value &base, String *name)
+{
+ Object *o = base.toObject(context);
+
+ Value v = o->get(context, name);
+
+ if (v.isInteger() && v.integerValue() < INT_MAX) {
+ if (result)
+ *result = v;
+ v.int_32 += 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d + 1);
+ }
+
+ o->put(context, name, v);
+}
+
+void __qmljs_builtin_post_increment_element(ExecutionContext *context, Value *result, const Value &base, const Value *index)
+{
+ Object *o = base.toObject(context);
+
+ uint idx = index->asArrayIndex();
+
+ if (idx == UINT_MAX) {
+ String *s = index->toString(context);
+ return __qmljs_builtin_post_increment_member(context, result, base, s);
+ }
+
+ Value v = o->getIndexed(context, idx);
+
+ if (v.isInteger() && v.integerValue() < INT_MAX) {
+ if (result)
+ *result = v;
+ v.int_32 += 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d + 1);
+ }
+
+ o->putIndexed(context, idx, v);
+}
+
+void __qmljs_builtin_post_decrement(Value *result, Value *val)
+{
+ if (val->isInteger() && val->integerValue() > INT_MIN) {
+ if (result)
+ *result = *val;
+ val->int_32 -= 1;
+ return;
+ }
+
+ double d = __qmljs_to_number(*val);
+ *val = Value::fromDouble(d - 1);
+ if (result)
+ *result = Value::fromDouble(d);
+}
+
+void __qmljs_builtin_post_decrement_name(ExecutionContext *context, Value *result, String *name)
+{
+ Value v = context->getProperty(name);
+
+ if (v.isInteger() && v.integerValue() > INT_MIN) {
+ if (result)
+ *result = v;
+ v.int_32 -= 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d - 1);
+ }
+
+ context->setProperty(name, v);
+}
+
+void __qmljs_builtin_post_decrement_member(ExecutionContext *context, Value *result, const Value &base, String *name)
+{
+ Object *o = base.toObject(context);
+
+ Value v = o->get(context, name);
+
+ if (v.isInteger() && v.integerValue() > INT_MIN) {
+ if (result)
+ *result = v;
+ v.int_32 -= 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d - 1);
+ }
+
+ o->put(context, name, v);
+}
+
+void __qmljs_builtin_post_decrement_element(ExecutionContext *context, Value *result, const Value &base, const Value &index)
+{
+ Object *o = base.toObject(context);
+
+ uint idx = index.asArrayIndex();
+
+ if (idx == UINT_MAX) {
+ String *s = index.toString(context);
+ return __qmljs_builtin_post_decrement_member(context, result, base, s);
+ }
+
+ Value v = o->getIndexed(context, idx);
+
+ if (v.isInteger() && v.integerValue() > INT_MIN) {
+ if (result)
+ *result = v;
+ v.int_32 -= 1;
+ } else {
+ double d = __qmljs_to_number(v);
+ if (result)
+ *result = Value::fromDouble(d);
+ v = Value::fromDouble(d - 1);
+ }
+
+ o->putIndexed(context, idx, v);
+}
+
+void __qmljs_builtin_throw(ExecutionContext *context, const Value &val)
+{
+ __qmljs_throw(context, val);
+}
+
+ExecutionContext *__qmljs_builtin_push_with_scope(const Value &o, ExecutionContext *ctx)
+{
+ Object *obj = o.toObject(ctx);
+ return ctx->engine->newWithContext(obj);
+}
+
+ExecutionContext *__qmljs_builtin_push_catch_scope(String *exceptionVarName, const Value &exceptionValue, ExecutionContext *ctx)
+{
+ return ctx->engine->newCatchContext(exceptionVarName, exceptionValue);
+}
+
+ExecutionContext *__qmljs_builtin_pop_scope(ExecutionContext *ctx)
+{
+ return ctx->engine->popContext();
+}
+
+void __qmljs_builtin_declare_var(ExecutionContext *ctx, bool deletable, String *name)
+{
+ ctx->createMutableBinding(name, deletable);
+}
+
+void __qmljs_builtin_define_property(ExecutionContext *ctx, const Value &object, String *name, Value *val)
+{
+ Object *o = object.asObject();
+ assert(o);
+
+ uint idx = name->asArrayIndex();
+ Property *pd = (idx != UINT_MAX) ? o->arrayInsert(idx) : o->insertMember(name, Attr_Data);
+ pd->value = val ? *val : Value::undefinedValue();
+}
+
+void __qmljs_builtin_define_array(ExecutionContext *ctx, Value *array, Value *values, uint length)
+{
+ ArrayObject *a = ctx->engine->newArrayObject(ctx);
+
+ // ### FIXME: We need to allocate the array data to avoid crashes other places
+ // This should rather be done when required
+ a->arrayReserve(length);
+ if (length) {
+ a->arrayDataLen = length;
+ Property *pd = a->arrayData;
+ for (uint i = 0; i < length; ++i) {
+ if (values[i].isDeleted()) {
+ a->ensureArrayAttributes();
+ pd->value = Value::undefinedValue();
+ a->arrayAttributes[i].clear();
+ } else {
+ pd->value = values[i];
+ }
+ ++pd;
+ }
+ a->setArrayLengthUnchecked(length);
+ }
+ *array = Value::fromObject(a);
+}
+
+void __qmljs_builtin_define_getter_setter(ExecutionContext *ctx, const Value &object, String *name, const Value *getter, const Value *setter)
+{
+ Object *o = object.asObject();
+ assert(o);
+
+ uint idx = name->asArrayIndex();
+ Property *pd = (idx != UINT_MAX) ? o->arrayInsert(idx, Attr_Accessor) : o->insertMember(name, Attr_Accessor);
+ pd->setGetter(getter ? getter->asFunctionObject() : 0);
+ pd->setSetter(setter ? setter->asFunctionObject() : 0);
+}
+
+void __qmljs_increment(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ if (value.isInteger())
+ *result = Value::fromInt32(value.integerValue() + 1);
+ else {
+ double d = __qmljs_to_number(value);
+ *result = Value::fromDouble(d + 1);
+ }
+}
+
+void __qmljs_decrement(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ if (value.isInteger())
+ *result = Value::fromInt32(value.integerValue() - 1);
+ else {
+ double d = __qmljs_to_number(value);
+ *result = Value::fromDouble(d - 1);
+ }
+}
+
+} // extern "C"
+
+
+} // namespace VM
+} // namespace QQmlJS
diff --git a/src/qml/qml/v4vm/qv4runtime.h b/src/qml/qml/v4vm/qv4runtime.h
new file mode 100644
index 0000000000..6e64c44e69
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4runtime.h
@@ -0,0 +1,745 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_RUNTIME_H
+#define QMLJS_RUNTIME_H
+
+#include "qv4global.h"
+#include "qv4value.h"
+#include "qv4math.h"
+
+
+#include <QtCore/QString>
+#include <QtCore/qnumeric.h>
+#include <QtCore/QDebug>
+
+#include <cmath>
+#include <cassert>
+
+#include <wtf/MathExtras.h>
+
+#ifdef DO_TRACE_INSTR
+# define TRACE1(x) fprintf(stderr, " %s\n", __FUNCTION__);
+# define TRACE2(x, y) fprintf(stderr, " %s\n", __FUNCTION__);
+#else
+# define TRACE1(x)
+# define TRACE2(x, y)
+#endif // TRACE1
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+enum TypeHint {
+ PREFERREDTYPE_HINT,
+ NUMBER_HINT,
+ STRING_HINT
+};
+
+struct Function;
+struct Object;
+struct String;
+struct ExecutionContext;
+struct FunctionObject;
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct DateObject;
+struct RegExpObject;
+struct ArrayObject;
+struct ErrorObject;
+struct ExecutionEngine;
+
+struct Q_V4_EXPORT Exception {
+ explicit Exception(ExecutionContext *throwingContext, const Value &exceptionValue);
+ ~Exception();
+
+ void accept(ExecutionContext *catchingContext);
+
+ void partiallyUnwindContext(ExecutionContext *catchingContext);
+
+ Value value() const { return exception; }
+
+private:
+ ExecutionContext *throwingContext;
+ bool accepted;
+ PersistentValue exception;
+};
+
+extern "C" {
+
+// context
+void __qmljs_call_activation_property(ExecutionContext *, Value *result, String *name, Value *args, int argc);
+void __qmljs_call_property(ExecutionContext *context, Value *result, const Value &that, String *name, Value *args, int argc);
+void __qmljs_call_property_lookup(ExecutionContext *context, Value *result, const Value &thisObject, uint index, Value *args, int argc);
+void __qmljs_call_element(ExecutionContext *context, Value *result, const Value &that, const Value &index, Value *args, int argc);
+void __qmljs_call_value(ExecutionContext *context, Value *result, const Value *thisObject, const Value &func, Value *args, int argc);
+
+void __qmljs_construct_activation_property(ExecutionContext *, Value *result, String *name, Value *args, int argc);
+void __qmljs_construct_property(ExecutionContext *context, Value *result, const Value &base, String *name, Value *args, int argc);
+void __qmljs_construct_value(ExecutionContext *context, Value *result, const Value &func, Value *args, int argc);
+
+void __qmljs_builtin_typeof(ExecutionContext *ctx, Value *result, const Value &val);
+void __qmljs_builtin_typeof_name(ExecutionContext *context, Value* result, String *name);
+void __qmljs_builtin_typeof_member(ExecutionContext* context, Value* result, const Value &base, String *name);
+void __qmljs_builtin_typeof_element(ExecutionContext* context, Value *result, const Value &base, const Value &index);
+
+void __qmljs_builtin_post_increment(Value *result, Value *val);
+void __qmljs_builtin_post_increment_name(ExecutionContext *context, Value *result, String *name);
+void __qmljs_builtin_post_increment_member(ExecutionContext *context, Value *result, const Value &base, String *name);
+void __qmljs_builtin_post_increment_element(ExecutionContext *context, Value *result, const Value &base, const Value *index);
+
+void __qmljs_builtin_post_decrement(Value *result, Value *val);
+void __qmljs_builtin_post_decrement_name(ExecutionContext *context, Value *result, String *name);
+void __qmljs_builtin_post_decrement_member(ExecutionContext *context, Value *result, const Value &base, String *name);
+void __qmljs_builtin_post_decrement_element(ExecutionContext *context, Value *result, const Value &base, const Value &index);
+
+void Q_NORETURN __qmljs_builtin_throw(ExecutionContext *context, const Value &val);
+void Q_NORETURN __qmljs_builtin_rethrow(ExecutionContext *context);
+ExecutionContext *__qmljs_builtin_push_with_scope(const Value &o, ExecutionContext *ctx);
+ExecutionContext *__qmljs_builtin_push_catch_scope(String *exceptionVarName, const QQmlJS::VM::Value &exceptionValue, ExecutionContext *ctx);
+ExecutionContext *__qmljs_builtin_pop_scope(ExecutionContext *ctx);
+void __qmljs_builtin_declare_var(ExecutionContext *ctx, bool deletable, String *name);
+void __qmljs_builtin_define_property(ExecutionContext *ctx, const Value &object, String *name, Value *val);
+void __qmljs_builtin_define_array(ExecutionContext *ctx, Value *array, QQmlJS::VM::Value *values, uint length);
+void __qmljs_builtin_define_getter_setter(ExecutionContext *ctx, const Value &object, String *name, const Value *getter, const Value *setter);
+
+// constructors
+void __qmljs_init_closure(ExecutionContext *ctx, Value *result, VM::Function *clos);
+VM::Function *__qmljs_register_function(ExecutionContext *ctx, String *name,
+ bool hasDirectEval,
+ bool usesArgumentsObject, bool isStrict,
+ bool hasNestedFunctions,
+ String **formals, unsigned formalCount,
+ String **locals, unsigned localCount);
+
+
+// strings
+double __qmljs_string_to_number(const String *string);
+Value __qmljs_string_from_number(ExecutionContext *ctx, double number);
+String *__qmljs_string_concat(ExecutionContext *ctx, String *first, String *second);
+
+// objects
+Value __qmljs_object_default_value(Object *object, int typeHint);
+void __qmljs_set_activation_property(ExecutionContext *ctx, String *name, const Value& value);
+void __qmljs_set_property(ExecutionContext *ctx, const Value &object, String *name, const Value &value);
+void __qmljs_get_property(ExecutionContext *ctx, Value *result, const Value &object, String *name);
+void __qmljs_get_activation_property(ExecutionContext *ctx, Value *result, String *name);
+
+void __qmljs_get_global_lookup(ExecutionContext *ctx, Value *result, int lookupIndex);
+void __qmljs_call_global_lookup(ExecutionContext *context, Value *result, uint index, Value *args, int argc);
+void __qmljs_construct_global_lookup(ExecutionContext *context, Value *result, uint index, Value *args, int argc);
+void __qmljs_get_property_lookup(ExecutionContext *ctx, Value *result, const Value &object, int lookupIndex);
+void __qmljs_set_property_lookup(ExecutionContext *ctx, const Value &object, int lookupIndex, const Value &value);
+
+
+void __qmljs_get_element(ExecutionContext *ctx, Value *retval, const Value &object, const Value &index);
+void __qmljs_set_element(ExecutionContext *ctx, const Value &object, const Value &index, const Value &value);
+
+// For each
+void __qmljs_foreach_iterator_object(ExecutionContext *ctx, Value *result, const Value &in);
+void __qmljs_foreach_next_property_name(Value *result, const Value &foreach_iterator);
+
+// type conversion and testing
+Value __qmljs_to_primitive(const Value &value, int typeHint);
+Bool __qmljs_to_boolean(const Value &value);
+double __qmljs_to_number(const Value &value);
+Value __qmljs_to_string(const Value &value, ExecutionContext *ctx);
+Q_V4_EXPORT String *__qmljs_convert_to_string(ExecutionContext *ctx, const Value &value);
+Value __qmljs_to_object(ExecutionContext *ctx, const Value &value);
+Object *__qmljs_convert_to_object(ExecutionContext *ctx, const Value &value);
+
+Bool __qmljs_equal(const Value &x, const Value &y);
+Bool __qmljs_strict_equal(const Value &x, const Value &y);
+
+// unary operators
+typedef void (*UnaryOpName)(Value *, const Value &);
+void __qmljs_uplus(Value *result, const Value &value);
+void __qmljs_uminus(Value *result, const Value &value);
+void __qmljs_compl(Value *result, const Value &value);
+void __qmljs_not(Value *result, const Value &value);
+void __qmljs_increment(Value *result, const Value &value);
+void __qmljs_decrement(Value *result, const Value &value);
+
+void __qmljs_delete_subscript(ExecutionContext *ctx, Value *result, const Value &base, const Value &index);
+void __qmljs_delete_member(ExecutionContext *ctx, Value *result, const Value &base, String *name);
+void __qmljs_delete_name(ExecutionContext *ctx, Value *result, String *name);
+
+void Q_NORETURN __qmljs_throw(ExecutionContext*, const Value &value);
+
+// binary operators
+typedef void (*BinOp)(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+
+void __qmljs_instanceof(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_in(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_bit_or(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_bit_xor(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_bit_and(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_add(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_sub(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_mul(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_div(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_mod(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_shl(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_shr(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_ushr(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_gt(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_lt(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_ge(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_le(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_eq(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_ne(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+void __qmljs_se(ExecutionContext *, Value *result, const Value &left, const Value &right);
+void __qmljs_sne(ExecutionContext *, Value *result, const Value &left, const Value &right);
+
+void __qmljs_add_helper(ExecutionContext *ctx, Value *result, const Value &left, const Value &right);
+
+
+typedef void (*InplaceBinOpName)(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_bit_and_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_bit_or_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_bit_xor_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_add_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_sub_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_mul_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_div_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_mod_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_shl_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_shr_name(ExecutionContext *ctx, String *name, const Value &value);
+void __qmljs_inplace_ushr_name(ExecutionContext *ctx, String *name, const Value &value);
+
+typedef void (*InplaceBinOpElement)(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_bit_and_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_bit_or_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_bit_xor_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_add_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_sub_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_mul_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_div_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_mod_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_shl_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_shr_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+void __qmljs_inplace_ushr_element(ExecutionContext *ctx, const Value &base, const Value &index, const Value &rhs);
+
+typedef void (*InplaceBinOpMember)(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_bit_and_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_bit_or_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_bit_xor_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_add_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_sub_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_mul_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_div_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_mod_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_shl_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_shr_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+void __qmljs_inplace_ushr_member(ExecutionContext *ctx, const Value &base, String *name, const Value &rhs);
+
+typedef Bool (*CmpOp)(ExecutionContext *ctx, const Value &left, const Value &right);
+Bool __qmljs_cmp_gt(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_lt(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_ge(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_le(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_eq(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_ne(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_se(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_sne(ExecutionContext *, const Value &left, const Value &right);
+Bool __qmljs_cmp_instanceof(ExecutionContext *ctx, const Value &left, const Value &right);
+Bool __qmljs_cmp_in(ExecutionContext *ctx, const Value &left, const Value &right);
+
+// type conversion and testing
+inline Value __qmljs_to_primitive(const Value &value, int typeHint)
+{
+ Object *o = value.asObject();
+ if (!o)
+ return value;
+ return __qmljs_object_default_value(o, typeHint);
+}
+
+inline double __qmljs_to_number(const Value &value)
+{
+ switch (value.type()) {
+ case Value::Undefined_Type:
+ return std::numeric_limits<double>::quiet_NaN();
+ case Value::Null_Type:
+ return 0;
+ case Value::Boolean_Type:
+ return (value.booleanValue() ? 1. : 0.);
+ case Value::Integer_Type:
+ return value.int_32;
+ case Value::String_Type:
+ return __qmljs_string_to_number(value.stringValue());
+ case Value::Object_Type: {
+ Value prim = __qmljs_to_primitive(value, NUMBER_HINT);
+ return __qmljs_to_number(prim);
+ }
+ default: // double
+ return value.doubleValue();
+ }
+}
+
+inline Value __qmljs_to_string(const Value &value, ExecutionContext *ctx)
+{
+ if (value.isString())
+ return value;
+ return Value::fromString(__qmljs_convert_to_string(ctx, value));
+}
+
+inline Value __qmljs_to_object(ExecutionContext *ctx, const Value &value)
+{
+ if (value.isObject())
+ return value;
+ return Value::fromObject(__qmljs_convert_to_object(ctx, value));
+}
+
+
+inline void __qmljs_uplus(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ *result = value;
+ if (result->tryIntegerConversion())
+ return;
+
+ double n = __qmljs_to_number(value);
+ *result = Value::fromDouble(n);
+}
+
+inline void __qmljs_uminus(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ // +0 != -0, so we need to convert to double when negating 0
+ if (value.isInteger() && value.integerValue())
+ *result = Value::fromInt32(-value.integerValue());
+ else {
+ double n = __qmljs_to_number(value);
+ *result = Value::fromDouble(-n);
+ }
+}
+
+inline void __qmljs_compl(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ int n;
+ if (value.isConvertibleToInt())
+ n = value.int_32;
+ else
+ n = Value::toInt32(__qmljs_to_number(value));
+
+ *result = Value::fromInt32(~n);
+}
+
+inline void __qmljs_not(Value *result, const Value &value)
+{
+ TRACE1(value);
+
+ bool b = value.toBoolean();
+ *result = Value::fromBoolean(!b);
+}
+
+// binary operators
+inline void __qmljs_bit_or(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right)) {
+ *result = Value::fromInt32(left.integerValue() | right.integerValue());
+ return;
+ }
+
+ int lval = Value::toInt32(__qmljs_to_number(left));
+ int rval = Value::toInt32(__qmljs_to_number(right));
+ *result = Value::fromInt32(lval | rval);
+}
+
+inline void __qmljs_bit_xor(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right)) {
+ *result = Value::fromInt32(left.integerValue() ^ right.integerValue());
+ return;
+ }
+
+ int lval = Value::toInt32(__qmljs_to_number(left));
+ int rval = Value::toInt32(__qmljs_to_number(right));
+ *result = Value::fromInt32(lval ^ rval);
+}
+
+inline void __qmljs_bit_and(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right)) {
+ *result = Value::fromInt32(left.integerValue() & right.integerValue());
+ return;
+ }
+
+ int lval = Value::toInt32(__qmljs_to_number(left));
+ int rval = Value::toInt32(__qmljs_to_number(right));
+ *result = Value::fromInt32(lval & rval);
+}
+
+inline void __qmljs_add(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+#ifdef QMLJS_INLINE_MATH
+ if (Value::integerCompatible(left, right)) {
+ *result = add_int32(left.integerValue(), right.integerValue());
+ return;
+ }
+#endif
+
+ if (Value::bothDouble(left, right)) {
+ *result = Value::fromDouble(left.doubleValue() + right.doubleValue());
+ return;
+ }
+
+ __qmljs_add_helper(ctx, result, left, right);
+}
+
+inline void __qmljs_sub(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+#ifdef QMLJS_INLINE_MATH
+ if (Value::integerCompatible(left, right)) {
+ *result = sub_int32(left.integerValue(), right.integerValue());
+ return;
+ }
+#endif
+
+ double lval = __qmljs_to_number(left);
+ double rval = __qmljs_to_number(right);
+ *result = Value::fromDouble(lval - rval);
+}
+
+inline void __qmljs_mul(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+#ifdef QMLJS_INLINE_MATH
+ if (Value::integerCompatible(left, right)) {
+ *result = mul_int32(left.integerValue(), right.integerValue());
+ return;
+ }
+#endif
+
+ double lval = __qmljs_to_number(left);
+ double rval = __qmljs_to_number(right);
+ *result = Value::fromDouble(lval * rval);
+}
+
+inline void __qmljs_div(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ double lval = __qmljs_to_number(left);
+ double rval = __qmljs_to_number(right);
+ *result = Value::fromDouble(lval / rval);
+}
+
+inline void __qmljs_mod(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right) && right.integerValue() != 0) {
+ int intRes = left.integerValue() % right.integerValue();
+ if (intRes != 0 || left.integerValue() >= 0) {
+ *result = Value::fromInt32(intRes);
+ return;
+ }
+ }
+
+ double lval = __qmljs_to_number(left);
+ double rval = __qmljs_to_number(right);
+ *result = Value::fromDouble(fmod(lval, rval));
+}
+
+inline void __qmljs_shl(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right)) {
+ *result = Value::fromInt32(left.integerValue() << ((uint(right.integerValue()) & 0x1f)));
+ return;
+ }
+
+ int lval = Value::toInt32(__qmljs_to_number(left));
+ unsigned rval = Value::toUInt32(__qmljs_to_number(right)) & 0x1f;
+ *result = Value::fromInt32(lval << rval);
+}
+
+inline void __qmljs_shr(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ if (Value::integerCompatible(left, right)) {
+ *result = Value::fromInt32(left.integerValue() >> ((uint(right.integerValue()) & 0x1f)));
+ return;
+ }
+
+ int lval = Value::toInt32(__qmljs_to_number(left));
+ unsigned rval = Value::toUInt32(__qmljs_to_number(right)) & 0x1f;
+ *result = Value::fromInt32(lval >> rval);
+}
+
+inline void __qmljs_ushr(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ uint res;
+ if (Value::integerCompatible(left, right)) {
+ res = uint(left.integerValue()) >> (uint(right.integerValue()) & 0x1f);
+ } else {
+ unsigned lval = Value::toUInt32(__qmljs_to_number(left));
+ unsigned rval = Value::toUInt32(__qmljs_to_number(right)) & 0x1f;
+ res = lval >> rval;
+ }
+
+ if (res > INT_MAX)
+ *result = Value::fromDouble(res);
+ else
+ *result = Value::fromInt32(res);
+}
+
+inline void __qmljs_gt(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(__qmljs_cmp_gt(ctx, left, right));
+}
+
+inline void __qmljs_lt(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(__qmljs_cmp_lt(ctx, left, right));
+}
+
+inline void __qmljs_ge(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(__qmljs_cmp_ge(ctx, left, right));
+}
+
+inline void __qmljs_le(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(__qmljs_cmp_le(ctx, left, right));
+}
+
+inline void __qmljs_eq(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(__qmljs_cmp_eq(ctx, left, right));
+}
+
+inline void __qmljs_ne(ExecutionContext *ctx, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ *result = Value::fromBoolean(!__qmljs_cmp_eq(ctx, left, right));
+}
+
+inline void __qmljs_se(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ bool r = __qmljs_strict_equal(left, right);
+ *result = Value::fromBoolean(r);
+}
+
+inline void __qmljs_sne(ExecutionContext *, Value *result, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ bool r = ! __qmljs_strict_equal(left, right);
+ *result = Value::fromBoolean(r);
+}
+
+inline Bool __qmljs_cmp_gt(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+ if (Value::integerCompatible(left, right))
+ return left.integerValue() > right.integerValue();
+
+ Value l = __qmljs_to_primitive(left, NUMBER_HINT);
+ Value r = __qmljs_to_primitive(right, NUMBER_HINT);
+
+ if (Value::bothDouble(l, r)) {
+ return l.doubleValue() > r.doubleValue();
+ } else if (l.isString() && r.isString()) {
+ return r.stringValue()->compare(l.stringValue());
+ } else {
+ double dl = __qmljs_to_number(l);
+ double dr = __qmljs_to_number(r);
+ return dl > dr;
+ }
+}
+
+inline Bool __qmljs_cmp_lt(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+ if (Value::integerCompatible(left, right))
+ return left.integerValue() < right.integerValue();
+
+ Value l = __qmljs_to_primitive(left, NUMBER_HINT);
+ Value r = __qmljs_to_primitive(right, NUMBER_HINT);
+
+ if (Value::bothDouble(l, r)) {
+ return l.doubleValue() < r.doubleValue();
+ } else if (l.isString() && r.isString()) {
+ return l.stringValue()->compare(r.stringValue());
+ } else {
+ double dl = __qmljs_to_number(l);
+ double dr = __qmljs_to_number(r);
+ return dl < dr;
+ }
+}
+
+inline Bool __qmljs_cmp_ge(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+ if (Value::integerCompatible(left, right))
+ return left.integerValue() >= right.integerValue();
+
+ Value l = __qmljs_to_primitive(left, NUMBER_HINT);
+ Value r = __qmljs_to_primitive(right, NUMBER_HINT);
+
+ if (Value::bothDouble(l, r)) {
+ return l.doubleValue() >= r.doubleValue();
+ } else if (l.isString() && r.isString()) {
+ return !l.stringValue()->compare(r.stringValue());
+ } else {
+ double dl = __qmljs_to_number(l);
+ double dr = __qmljs_to_number(r);
+ return dl >= dr;
+ }
+}
+
+inline Bool __qmljs_cmp_le(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+ if (Value::integerCompatible(left, right))
+ return left.integerValue() <= right.integerValue();
+
+ Value l = __qmljs_to_primitive(left, NUMBER_HINT);
+ Value r = __qmljs_to_primitive(right, NUMBER_HINT);
+
+ if (Value::bothDouble(l, r)) {
+ return l.doubleValue() <= r.doubleValue();
+ } else if (l.isString() && r.isString()) {
+ return !r.stringValue()->compare(l.stringValue());
+ } else {
+ double dl = __qmljs_to_number(l);
+ double dr = __qmljs_to_number(r);
+ return dl <= dr;
+ }
+}
+
+inline Bool __qmljs_cmp_eq(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ // need to test for doubles first as NaN != NaN
+ if (Value::bothDouble(left, right))
+ return left.doubleValue() == right.doubleValue();
+ if (left.val == right.val)
+ return true;
+ if (left.isString() && right.isString())
+ return left.stringValue()->isEqualTo(right.stringValue());
+
+ return __qmljs_equal(left, right);
+}
+
+inline Bool __qmljs_cmp_ne(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ return !__qmljs_cmp_eq(0, left, right);
+}
+
+inline Bool __qmljs_cmp_se(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ return __qmljs_strict_equal(left, right);
+}
+
+inline Bool __qmljs_cmp_sne(ExecutionContext *, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ return ! __qmljs_strict_equal(left, right);
+}
+
+inline Bool __qmljs_cmp_instanceof(ExecutionContext *ctx, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ Value v;
+ __qmljs_instanceof(ctx, &v, left, right);
+ return v.booleanValue();
+}
+
+inline uint __qmljs_cmp_in(ExecutionContext *ctx, const Value &left, const Value &right)
+{
+ TRACE2(left, right);
+
+ Value v;
+ __qmljs_in(ctx, &v, left, right);
+ return v.booleanValue();
+}
+
+} // extern "C"
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QMLJS_RUNTIME_H
diff --git a/src/qml/qml/v4vm/qv4sparsearray.cpp b/src/qml/qml/v4vm/qv4sparsearray.cpp
new file mode 100644
index 0000000000..2c9075ba5b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4sparsearray.cpp
@@ -0,0 +1,464 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4sparsearray.h"
+#include "qv4runtime.h"
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <stdlib.h>
+
+#ifdef QT_QMAP_DEBUG
+# include <qstring.h>
+# include <qvector.h>
+#endif
+
+namespace QQmlJS {
+namespace VM {
+
+bool ArrayElementLessThan::operator()(const Property &p1, const Property &p2) const
+{
+ Value v1 = p1.value;
+ Value v2 = p2.value;
+
+ if (v1.isUndefined())
+ return false;
+ if (v2.isUndefined())
+ return true;
+ if (!m_comparefn.isUndefined()) {
+ Value args[] = { v1, v2 };
+ Value result = Value::undefinedValue();
+ __qmljs_call_value(m_context, &result, /*thisObject*/0, m_comparefn, args, 2);
+ return result.toNumber() <= 0;
+ }
+ return v1.toString(m_context)->toQString() < v2.toString(m_context)->toQString();
+}
+
+
+const SparseArrayNode *SparseArrayNode::nextNode() const
+{
+ const SparseArrayNode *n = this;
+ if (n->right) {
+ n = n->right;
+ while (n->left)
+ n = n->left;
+ } else {
+ const SparseArrayNode *y = n->parent();
+ while (y && n == y->right) {
+ n = y;
+ y = n->parent();
+ }
+ n = y;
+ }
+ return n;
+}
+
+const SparseArrayNode *SparseArrayNode::previousNode() const
+{
+ const SparseArrayNode *n = this;
+ if (n->left) {
+ n = n->left;
+ while (n->right)
+ n = n->right;
+ } else {
+ const SparseArrayNode *y = n->parent();
+ while (y && n == y->left) {
+ n = y;
+ y = n->parent();
+ }
+ n = y;
+ }
+ return n;
+}
+
+SparseArrayNode *SparseArrayNode::copy(SparseArray *d) const
+{
+ SparseArrayNode *n = d->createNode(size_left, 0, false);
+ n->value = value;
+ n->setColor(color());
+ if (left) {
+ n->left = left->copy(d);
+ n->left->setParent(n);
+ } else {
+ n->left = 0;
+ }
+ if (right) {
+ n->right = right->copy(d);
+ n->right->setParent(n);
+ } else {
+ n->right = 0;
+ }
+ return n;
+}
+
+/*
+ x y
+ \ / \
+ y --> x b
+ / \ \
+ a b a
+*/
+void SparseArray::rotateLeft(SparseArrayNode *x)
+{
+ SparseArrayNode *&root = header.left;
+ SparseArrayNode *y = x->right;
+ x->right = y->left;
+ if (y->left != 0)
+ y->left->setParent(x);
+ y->setParent(x->parent());
+ if (x == root)
+ root = y;
+ else if (x == x->parent()->left)
+ x->parent()->left = y;
+ else
+ x->parent()->right = y;
+ y->left = x;
+ x->setParent(y);
+ y->size_left += x->size_left;
+}
+
+
+/*
+ x y
+ / / \
+ y --> a x
+ / \ /
+ a b b
+*/
+void SparseArray::rotateRight(SparseArrayNode *x)
+{
+ SparseArrayNode *&root = header.left;
+ SparseArrayNode *y = x->left;
+ x->left = y->right;
+ if (y->right != 0)
+ y->right->setParent(x);
+ y->setParent(x->parent());
+ if (x == root)
+ root = y;
+ else if (x == x->parent()->right)
+ x->parent()->right = y;
+ else
+ x->parent()->left = y;
+ y->right = x;
+ x->setParent(y);
+ x->size_left -= y->size_left;
+}
+
+
+void SparseArray::rebalance(SparseArrayNode *x)
+{
+ SparseArrayNode *&root = header.left;
+ x->setColor(SparseArrayNode::Red);
+ while (x != root && x->parent()->color() == SparseArrayNode::Red) {
+ if (x->parent() == x->parent()->parent()->left) {
+ SparseArrayNode *y = x->parent()->parent()->right;
+ if (y && y->color() == SparseArrayNode::Red) {
+ x->parent()->setColor(SparseArrayNode::Black);
+ y->setColor(SparseArrayNode::Black);
+ x->parent()->parent()->setColor(SparseArrayNode::Red);
+ x = x->parent()->parent();
+ } else {
+ if (x == x->parent()->right) {
+ x = x->parent();
+ rotateLeft(x);
+ }
+ x->parent()->setColor(SparseArrayNode::Black);
+ x->parent()->parent()->setColor(SparseArrayNode::Red);
+ rotateRight (x->parent()->parent());
+ }
+ } else {
+ SparseArrayNode *y = x->parent()->parent()->left;
+ if (y && y->color() == SparseArrayNode::Red) {
+ x->parent()->setColor(SparseArrayNode::Black);
+ y->setColor(SparseArrayNode::Black);
+ x->parent()->parent()->setColor(SparseArrayNode::Red);
+ x = x->parent()->parent();
+ } else {
+ if (x == x->parent()->left) {
+ x = x->parent();
+ rotateRight(x);
+ }
+ x->parent()->setColor(SparseArrayNode::Black);
+ x->parent()->parent()->setColor(SparseArrayNode::Red);
+ rotateLeft(x->parent()->parent());
+ }
+ }
+ }
+ root->setColor(SparseArrayNode::Black);
+}
+
+void SparseArray::deleteNode(SparseArrayNode *z)
+{
+ SparseArrayNode *&root = header.left;
+ SparseArrayNode *y = z;
+ SparseArrayNode *x;
+ SparseArrayNode *x_parent;
+ if (y->left == 0) {
+ x = y->right;
+ if (y == mostLeftNode) {
+ if (x)
+ mostLeftNode = x; // It cannot have (left) children due the red black invariant.
+ else
+ mostLeftNode = y->parent();
+ }
+ } else {
+ if (y->right == 0) {
+ x = y->left;
+ } else {
+ y = y->right;
+ while (y->left != 0)
+ y = y->left;
+ x = y->right;
+ }
+ }
+ if (y != z) {
+ z->left->setParent(y);
+ y->left = z->left;
+ if (y != z->right) {
+ x_parent = y->parent();
+ if (x)
+ x->setParent(y->parent());
+ y->parent()->left = x;
+ y->right = z->right;
+ z->right->setParent(y);
+ } else {
+ x_parent = y;
+ }
+ if (root == z)
+ root = y;
+ else if (z->parent()->left == z)
+ z->parent()->left = y;
+ else
+ z->parent()->right = y;
+ y->setParent(z->parent());
+ // Swap the colors
+ SparseArrayNode::Color c = y->color();
+ y->setColor(z->color());
+ z->setColor(c);
+ y = z;
+ } else {
+ x_parent = y->parent();
+ if (x)
+ x->setParent(y->parent());
+ if (root == z)
+ root = x;
+ else if (z->parent()->left == z)
+ z->parent()->left = x;
+ else
+ z->parent()->right = x;
+ }
+ if (y->color() != SparseArrayNode::Red) {
+ while (x != root && (x == 0 || x->color() == SparseArrayNode::Black)) {
+ if (x == x_parent->left) {
+ SparseArrayNode *w = x_parent->right;
+ if (w->color() == SparseArrayNode::Red) {
+ w->setColor(SparseArrayNode::Black);
+ x_parent->setColor(SparseArrayNode::Red);
+ rotateLeft(x_parent);
+ w = x_parent->right;
+ }
+ if ((w->left == 0 || w->left->color() == SparseArrayNode::Black) &&
+ (w->right == 0 || w->right->color() == SparseArrayNode::Black)) {
+ w->setColor(SparseArrayNode::Red);
+ x = x_parent;
+ x_parent = x_parent->parent();
+ } else {
+ if (w->right == 0 || w->right->color() == SparseArrayNode::Black) {
+ if (w->left)
+ w->left->setColor(SparseArrayNode::Black);
+ w->setColor(SparseArrayNode::Red);
+ rotateRight(w);
+ w = x_parent->right;
+ }
+ w->setColor(x_parent->color());
+ x_parent->setColor(SparseArrayNode::Black);
+ if (w->right)
+ w->right->setColor(SparseArrayNode::Black);
+ rotateLeft(x_parent);
+ break;
+ }
+ } else {
+ SparseArrayNode *w = x_parent->left;
+ if (w->color() == SparseArrayNode::Red) {
+ w->setColor(SparseArrayNode::Black);
+ x_parent->setColor(SparseArrayNode::Red);
+ rotateRight(x_parent);
+ w = x_parent->left;
+ }
+ if ((w->right == 0 || w->right->color() == SparseArrayNode::Black) &&
+ (w->left == 0 || w->left->color() == SparseArrayNode::Black)) {
+ w->setColor(SparseArrayNode::Red);
+ x = x_parent;
+ x_parent = x_parent->parent();
+ } else {
+ if (w->left == 0 || w->left->color() == SparseArrayNode::Black) {
+ if (w->right)
+ w->right->setColor(SparseArrayNode::Black);
+ w->setColor(SparseArrayNode::Red);
+ rotateLeft(w);
+ w = x_parent->left;
+ }
+ w->setColor(x_parent->color());
+ x_parent->setColor(SparseArrayNode::Black);
+ if (w->left)
+ w->left->setColor(SparseArrayNode::Black);
+ rotateRight(x_parent);
+ break;
+ }
+ }
+ }
+ if (x)
+ x->setColor(SparseArrayNode::Black);
+ }
+ free(y);
+ --numEntries;
+}
+
+void SparseArray::recalcMostLeftNode()
+{
+ mostLeftNode = &header;
+ while (mostLeftNode->left)
+ mostLeftNode = mostLeftNode->left;
+}
+
+static inline int qMapAlignmentThreshold()
+{
+ // malloc on 32-bit platforms should return pointers that are 8-byte
+ // aligned or more while on 64-bit platforms they should be 16-byte aligned
+ // or more
+ return 2 * sizeof(void*);
+}
+
+static inline void *qMapAllocate(int alloc, int alignment)
+{
+ return alignment > qMapAlignmentThreshold()
+ ? qMallocAligned(alloc, alignment)
+ : ::malloc(alloc);
+}
+
+static inline void qMapDeallocate(SparseArrayNode *node, int alignment)
+{
+ if (alignment > qMapAlignmentThreshold())
+ qFreeAligned(node);
+ else
+ ::free(node);
+}
+
+SparseArrayNode *SparseArray::createNode(uint sl, SparseArrayNode *parent, bool left)
+{
+ SparseArrayNode *node = static_cast<SparseArrayNode *>(qMapAllocate(sizeof(SparseArrayNode), Q_ALIGNOF(SparseArrayNode)));
+ Q_CHECK_PTR(node);
+
+ node->p = (quintptr)parent;
+ node->left = 0;
+ node->right = 0;
+ node->size_left = sl;
+ node->value = UINT_MAX;
+ ++numEntries;
+
+ if (parent) {
+ if (left) {
+ parent->left = node;
+ if (parent == mostLeftNode)
+ mostLeftNode = node;
+ } else {
+ parent->right = node;
+ }
+ node->setParent(parent);
+ rebalance(node);
+ }
+ return node;
+}
+
+void SparseArray::freeTree(SparseArrayNode *root, int alignment)
+{
+ if (root->left)
+ freeTree(root->left, alignment);
+ if (root->right)
+ freeTree(root->right, alignment);
+ qMapDeallocate(root, alignment);
+}
+
+SparseArray::SparseArray()
+ : numEntries(0)
+{
+ header.p = 0;
+ header.left = 0;
+ header.right = 0;
+ mostLeftNode = &header;
+}
+
+SparseArray::SparseArray(const SparseArray &other)
+{
+ header.p = 0;
+ header.right = 0;
+ if (other.header.left) {
+ header.left = other.header.left->copy(this);
+ header.left->setParent(&header);
+ recalcMostLeftNode();
+ }
+}
+
+SparseArrayNode *SparseArray::insert(uint akey)
+{
+ SparseArrayNode *n = root();
+ SparseArrayNode *y = end();
+ bool left = true;
+ uint s = akey;
+ while (n) {
+ y = n;
+ if (s == n->size_left) {
+ return n;
+ } else if (s < n->size_left) {
+ left = true;
+ n = n->left;
+ } else {
+ left = false;
+ s -= n->size_left;
+ n = n->right;
+ }
+ }
+
+ return createNode(s, y, left);
+}
+
+
+}
+}
diff --git a/src/qml/qml/v4vm/qv4sparsearray.h b/src/qml/qml/v4vm/qv4sparsearray.h
new file mode 100644
index 0000000000..a6f7d40c38
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4sparsearray.h
@@ -0,0 +1,369 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4ARRAY_H
+#define QV4ARRAY_H
+
+#include "qv4global.h"
+#include <QtCore/qmap.h>
+#include <qv4value.h>
+#include <qv4property.h>
+#include <assert.h>
+
+#ifdef Q_MAP_DEBUG
+#include <QtCore/qdebug.h>
+#endif
+
+#include <new>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct SparseArray;
+
+class ArrayElementLessThan
+{
+public:
+ inline ArrayElementLessThan(ExecutionContext *context, Object *thisObject, const Value &comparefn)
+ : m_context(context), thisObject(thisObject), m_comparefn(comparefn) {}
+
+ bool operator()(const Property &v1, const Property &v2) const;
+
+private:
+ ExecutionContext *m_context;
+ Object *thisObject;
+ Value m_comparefn;
+};
+
+
+struct SparseArrayNode
+{
+ quintptr p;
+ SparseArrayNode *left;
+ SparseArrayNode *right;
+ uint size_left;
+ uint value;
+
+ enum Color { Red = 0, Black = 1 };
+ enum { Mask = 3 }; // reserve the second bit as well
+
+ const SparseArrayNode *nextNode() const;
+ SparseArrayNode *nextNode() { return const_cast<SparseArrayNode *>(const_cast<const SparseArrayNode *>(this)->nextNode()); }
+ const SparseArrayNode *previousNode() const;
+ SparseArrayNode *previousNode() { return const_cast<SparseArrayNode *>(const_cast<const SparseArrayNode *>(this)->previousNode()); }
+
+ Color color() const { return Color(p & 1); }
+ void setColor(Color c) { if (c == Black) p |= Black; else p &= ~Black; }
+ SparseArrayNode *parent() const { return reinterpret_cast<SparseArrayNode *>(p & ~Mask); }
+ void setParent(SparseArrayNode *pp) { p = (p & Mask) | quintptr(pp); }
+
+ uint key() const {
+ uint k = size_left;
+ const SparseArrayNode *n = this;
+ while (SparseArrayNode *p = n->parent()) {
+ if (p && p->right == n)
+ k += p->size_left;
+ n = p;
+ }
+ return k;
+ }
+
+ SparseArrayNode *copy(SparseArray *d) const;
+
+ SparseArrayNode *lowerBound(uint key);
+ SparseArrayNode *upperBound(uint key);
+};
+
+
+inline SparseArrayNode *SparseArrayNode::lowerBound(uint akey)
+{
+ SparseArrayNode *n = this;
+ SparseArrayNode *last = 0;
+ while (n) {
+ if (akey <= n->size_left) {
+ last = n;
+ n = n->left;
+ } else {
+ akey -= n->size_left;
+ n = n->right;
+ }
+ }
+ return last;
+}
+
+
+inline SparseArrayNode *SparseArrayNode::upperBound(uint akey)
+{
+ SparseArrayNode *n = this;
+ SparseArrayNode *last = 0;
+ while (n) {
+ if (akey < n->size_left) {
+ last = n;
+ n = n->left;
+ } else {
+ akey -= n->size_left;
+ n = n->right;
+ }
+ }
+ return last;
+}
+
+
+
+struct Q_V4_EXPORT SparseArray
+{
+ SparseArray();
+ ~SparseArray() {
+ if (root())
+ freeTree(header.left, Q_ALIGNOF(SparseArrayNode));
+ }
+
+ SparseArray(const SparseArray &other);
+private:
+ SparseArray &operator=(const SparseArray &other);
+
+ int numEntries;
+ SparseArrayNode header;
+ SparseArrayNode *mostLeftNode;
+
+ void rotateLeft(SparseArrayNode *x);
+ void rotateRight(SparseArrayNode *x);
+ void rebalance(SparseArrayNode *x);
+ void recalcMostLeftNode();
+
+ SparseArrayNode *root() const { return header.left; }
+
+ void deleteNode(SparseArrayNode *z);
+
+
+public:
+ SparseArrayNode *createNode(uint sl, SparseArrayNode *parent, bool left);
+ void freeTree(SparseArrayNode *root, int alignment);
+
+ SparseArrayNode *findNode(uint akey) const;
+
+ uint pop_front();
+ void push_front(uint at);
+ uint pop_back(uint len);
+ void push_back(uint at, uint len);
+
+ QList<int> keys() const;
+
+ const SparseArrayNode *end() const { return &header; }
+ SparseArrayNode *end() { return &header; }
+ const SparseArrayNode *begin() const { if (root()) return mostLeftNode; return end(); }
+ SparseArrayNode *begin() { if (root()) return mostLeftNode; return end(); }
+
+ SparseArrayNode *erase(SparseArrayNode *n);
+
+ SparseArrayNode *lowerBound(uint key);
+ const SparseArrayNode *lowerBound(uint key) const;
+ SparseArrayNode *upperBound(uint key);
+ const SparseArrayNode *upperBound(uint key) const;
+ SparseArrayNode *insert(uint akey);
+
+ // STL compatibility
+ typedef uint key_type;
+ typedef int mapped_type;
+ typedef qptrdiff difference_type;
+ typedef int size_type;
+
+#ifdef Q_MAP_DEBUG
+ void dump() const;
+#endif
+};
+
+inline SparseArrayNode *SparseArray::findNode(uint akey) const
+{
+ SparseArrayNode *n = root();
+
+ while (n) {
+ if (akey == n->size_left) {
+ return n;
+ } else if (akey < n->size_left) {
+ n = n->left;
+ } else {
+ akey -= n->size_left;
+ n = n->right;
+ }
+ }
+
+ return 0;
+}
+
+inline uint SparseArray::pop_front()
+{
+ uint idx = UINT_MAX ;
+
+ SparseArrayNode *n = findNode(0);
+ if (n) {
+ idx = n->value;
+ deleteNode(n);
+ // adjust all size_left indices on the path to leftmost item by 1
+ SparseArrayNode *n = root();
+ while (n) {
+ n->size_left -= 1;
+ n = n->left;
+ }
+ }
+ return idx;
+}
+
+inline void SparseArray::push_front(uint value)
+{
+ // adjust all size_left indices on the path to leftmost item by 1
+ SparseArrayNode *n = root();
+ while (n) {
+ n->size_left += 1;
+ n = n->left;
+ }
+ n = insert(0);
+ n->value = value;
+}
+
+inline uint SparseArray::pop_back(uint len)
+{
+ uint idx = UINT_MAX;
+ if (!len)
+ return idx;
+
+ SparseArrayNode *n = findNode(len - 1);
+ if (n) {
+ idx = n->value;
+ deleteNode(n);
+ }
+ return idx;
+}
+
+inline void SparseArray::push_back(uint index, uint len)
+{
+ SparseArrayNode *n = insert(len);
+ n->value = index;
+}
+
+#ifdef Q_MAP_DEBUG
+
+void SparseArray::dump() const
+{
+ const_iterator it = begin();
+ qDebug() << "map dump:";
+ while (it != end()) {
+ const SparseArrayNode *n = it.i;
+ int depth = 0;
+ while (n && n != root()) {
+ ++depth;
+ n = n->parent();
+ }
+ QByteArray space(4*depth, ' ');
+ qDebug() << space << (it.i->color() == SparseArrayNode::Red ? "Red " : "Black") << it.i << it.i->left << it.i->right
+ << it.key() << it.value();
+ ++it;
+ }
+ qDebug() << "---------";
+}
+#endif
+
+
+inline SparseArrayNode *SparseArray::erase(SparseArrayNode *n)
+{
+ if (n == end())
+ return n;
+
+ SparseArrayNode *next = n->nextNode();
+ deleteNode(n);
+ return next;
+}
+
+inline QList<int> SparseArray::keys() const
+{
+ QList<int> res;
+ res.reserve(numEntries);
+ SparseArrayNode *n = mostLeftNode;
+ while (n != end()) {
+ res.append(n->key());
+ n = n->nextNode();
+ }
+ return res;
+}
+
+inline const SparseArrayNode *SparseArray::lowerBound(uint akey) const
+{
+ const SparseArrayNode *lb = root()->lowerBound(akey);
+ if (!lb)
+ lb = end();
+ return lb;
+}
+
+
+inline SparseArrayNode *SparseArray::lowerBound(uint akey)
+{
+ SparseArrayNode *lb = root()->lowerBound(akey);
+ if (!lb)
+ lb = end();
+ return lb;
+}
+
+
+inline const SparseArrayNode *SparseArray::upperBound(uint akey) const
+{
+ const SparseArrayNode *ub = root()->upperBound(akey);
+ if (!ub)
+ ub = end();
+ return ub;
+}
+
+
+inline SparseArrayNode *SparseArray::upperBound(uint akey)
+{
+ SparseArrayNode *ub = root()->upperBound(akey);
+ if (!ub)
+ ub = end();
+ return ub;
+}
+
+}
+}
+
+QT_END_NAMESPACE
+
+#endif // QMAP_H
diff --git a/src/qml/qml/v4vm/qv4string.cpp b/src/qml/qml/v4vm/qv4string.cpp
new file mode 100644
index 0000000000..cb17547f07
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4string.cpp
@@ -0,0 +1,242 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4string.h"
+#include "qv4identifier.h"
+#include "qv4runtime.h"
+#include "qv4objectproto.h"
+#include "qv4stringobject.h"
+#include <QtCore/QHash>
+
+namespace QQmlJS {
+namespace VM {
+
+static uint toArrayIndex(const QChar *ch, const QChar *end, bool *ok)
+{
+ *ok = false;
+ uint i = ch->unicode() - '0';
+ if (i > 9)
+ return UINT_MAX;
+ ++ch;
+ // reject "01", "001", ...
+ if (i == 0 && ch != end)
+ return UINT_MAX;
+
+ while (ch < end) {
+ uint x = ch->unicode() - '0';
+ if (x > 9)
+ return UINT_MAX;
+ uint n = i*10 + x;
+ if (n < i)
+ // overflow
+ return UINT_MAX;
+ i = n;
+ ++ch;
+ }
+ *ok = true;
+ return i;
+}
+
+const ManagedVTable String::static_vtbl =
+{
+ call,
+ construct,
+ 0 /*markObjects*/,
+ destroy,
+ hasInstance,
+ get,
+ getIndexed,
+ put,
+ putIndexed,
+ query,
+ queryIndexed,
+ deleteProperty,
+ deleteIndexedProperty,
+ "String",
+};
+
+void String::destroy(Managed *that)
+{
+ static_cast<String*>(that)->~String();
+}
+
+Value String::get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty)
+{
+ String *that = static_cast<String *>(m);
+ if (name == ctx->engine->id_length) {
+ if (hasProperty)
+ *hasProperty = true;
+ return Value::fromInt32(that->_text.length());
+ }
+ PropertyAttributes attrs;
+ Property *pd = ctx->engine->stringPrototype->__getPropertyDescriptor__(name, &attrs);
+ if (!pd || attrs.isGeneric()) {
+ if (hasProperty)
+ *hasProperty = false;
+ return Value::undefinedValue();
+ }
+ if (hasProperty)
+ *hasProperty = true;
+ return ctx->engine->stringPrototype->getValue(Value::fromString(that), ctx, pd, attrs);
+}
+
+Value String::getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty)
+{
+ String *that = static_cast<String *>(m);
+ if (index < that->_text.length()) {
+ if (hasProperty)
+ *hasProperty = true;
+ return Value::fromString(ctx, that->toQString().mid(index, 1));
+ }
+ PropertyAttributes attrs;
+ Property *pd = ctx->engine->stringPrototype->__getPropertyDescriptor__(index, &attrs);
+ if (!pd || attrs.isGeneric()) {
+ if (hasProperty)
+ *hasProperty = false;
+ return Value::undefinedValue();
+ }
+ if (hasProperty)
+ *hasProperty = true;
+ return ctx->engine->stringPrototype->getValue(Value::fromString(that), ctx, pd, attrs);
+}
+
+void String::put(Managed *m, ExecutionContext *ctx, String *name, const Value &value)
+{
+ String *that = static_cast<String *>(m);
+ Object *o = ctx->engine->newStringObject(ctx, Value::fromString(that));
+ o->put(ctx, name, value);
+}
+
+void String::putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value)
+{
+ String *that = static_cast<String *>(m);
+ Object *o = ctx->engine->newStringObject(ctx, Value::fromString(that));
+ o->putIndexed(ctx, index, value);
+}
+
+PropertyAttributes String::query(Managed *m, ExecutionContext *ctx, String *name)
+{
+ return Attr_Invalid;
+}
+
+PropertyAttributes String::queryIndexed(Managed *m, ExecutionContext *ctx, uint index)
+{
+ String *that = static_cast<String *>(m);
+ return (index < that->_text.length()) ? Attr_NotConfigurable|Attr_NotWritable : Attr_Invalid;
+}
+
+bool String::deleteProperty(Managed *m, ExecutionContext *ctx, String *name)
+{
+ return false;
+}
+
+bool String::deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index)
+{
+ return false;
+}
+
+uint String::toUInt(bool *ok) const
+{
+ *ok = true;
+
+ if (subtype == StringType_Unknown)
+ createHashValue();
+ if (subtype >= StringType_UInt)
+ return stringHash;
+
+ // ### this conversion shouldn't be required
+ double d = __qmljs_string_to_number(this);
+ uint l = (uint)d;
+ if (d == l)
+ return l;
+ *ok = false;
+ return UINT_MAX;
+}
+
+void String::makeIdentifierImpl(const ExecutionContext *ctx)
+{
+ ctx->engine->identifierCache->toIdentifier(this);
+}
+
+void String::createHashValue() const
+{
+ const QChar *ch = _text.constData();
+ const QChar *end = ch + _text.length();
+
+ // array indices get their number as hash value
+ bool ok;
+ stringHash = toArrayIndex(ch, end, &ok);
+ if (ok) {
+ subtype = (stringHash == UINT_MAX) ? StringType_UInt : StringType_ArrayIndex;
+ return;
+ }
+
+ uint h = 0xffffffff;
+ while (ch < end) {
+ h = 31 * h + ch->unicode();
+ ++ch;
+ }
+
+ stringHash = h;
+ subtype = StringType_Regular;
+}
+
+uint String::createHashValue(const QChar *ch, int length)
+{
+ const QChar *end = ch + length;
+
+ // array indices get their number as hash value
+ bool ok;
+ uint stringHash = toArrayIndex(ch, end, &ok);
+ if (ok)
+ return stringHash;
+
+ uint h = 0xffffffff;
+ while (ch < end) {
+ h = 31 * h + ch->unicode();
+ ++ch;
+ }
+
+ return h;
+}
+
+}
+}
diff --git a/src/qml/qml/v4vm/qv4string.h b/src/qml/qml/v4vm/qv4string.h
new file mode 100644
index 0000000000..f4eeb7d0a8
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4string.h
@@ -0,0 +1,136 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4STRING_H
+#define QV4STRING_H
+
+#include <QtCore/qstring.h>
+#include "qv4managed.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct ExecutionEngine;
+
+struct String : public Managed {
+ enum StringType {
+ StringType_Unknown,
+ StringType_Regular,
+ StringType_UInt,
+ StringType_ArrayIndex
+ };
+
+ String(const QString &text)
+ : _text(text), stringHash(UINT_MAX), identifier(UINT_MAX)
+ { vtbl = &static_vtbl; type = Type_String; subtype = StringType_Unknown; }
+ ~String() { _data = 0; }
+
+ inline bool isEqualTo(const String *other) const {
+ if (this == other)
+ return true;
+ if (hashValue() != other->hashValue())
+ return false;
+ if (identifier != UINT_MAX && identifier == other->identifier)
+ return true;
+ if (subtype >= StringType_UInt && subtype == other->subtype)
+ return true;
+
+ return toQString() == other->toQString();
+ }
+ inline bool compare(const String *other) {
+ return toQString() < other->toQString();
+ }
+
+ inline const QString &toQString() const {
+ return _text;
+ }
+
+ inline unsigned hashValue() const {
+ if (subtype == StringType_Unknown)
+ createHashValue();
+
+ return stringHash;
+ }
+ uint asArrayIndex() const {
+ if (subtype == StringType_Unknown)
+ createHashValue();
+ if (subtype == StringType_ArrayIndex)
+ return stringHash;
+ return UINT_MAX;
+ }
+ uint toUInt(bool *ok) const;
+
+ void makeIdentifier(const ExecutionContext *ctx) {
+ if (identifier != UINT_MAX)
+ return;
+ makeIdentifierImpl(ctx);
+ }
+
+ void makeIdentifierImpl(const ExecutionContext *ctx);
+
+ void createHashValue() const;
+ static uint createHashValue(const QChar *ch, int length);
+
+ QString _text;
+ mutable uint stringHash;
+ mutable uint identifier;
+
+protected:
+ static void destroy(Managed *);
+ static Value get(Managed *m, ExecutionContext *ctx, String *name, bool *hasProperty);
+ static Value getIndexed(Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty);
+ static void put(Managed *m, ExecutionContext *ctx, String *name, const Value &value);
+ static void putIndexed(Managed *m, ExecutionContext *ctx, uint index, const Value &value);
+ static PropertyAttributes query(Managed *m, ExecutionContext *ctx, String *name);
+ static PropertyAttributes queryIndexed(Managed *m, ExecutionContext *ctx, uint index);
+ static bool deleteProperty(Managed *m, ExecutionContext *ctx, String *name);
+ static bool deleteIndexedProperty(Managed *m, ExecutionContext *ctx, uint index);
+
+ static const ManagedVTable static_vtbl;
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/qv4stringobject.cpp b/src/qml/qml/v4vm/qv4stringobject.cpp
new file mode 100644
index 0000000000..42b9b422ec
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4stringobject.cpp
@@ -0,0 +1,726 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+
+#include "qv4stringobject.h"
+#include "qv4regexpobject.h"
+#include "qv4objectproto.h"
+#include "qv4mm.h"
+#include <QtCore/qnumeric.h>
+#include <QtCore/qmath.h>
+#include <QtCore/QDateTime>
+#include <QtCore/QStringList>
+#include <QtCore/QDebug>
+#include <cmath>
+#include <qmath.h>
+#include <qnumeric.h>
+#include <cassert>
+
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+#include <qv4jsir_p.h>
+#include <qv4codegen_p.h>
+#include <qv4isel_masm_p.h>
+
+#ifndef Q_OS_WIN
+# include <time.h>
+# ifndef Q_OS_VXWORKS
+# include <sys/time.h>
+# else
+# include "qplatformdefs.h"
+# endif
+#else
+# include <windows.h>
+#endif
+
+using namespace QQmlJS::VM;
+
+DEFINE_MANAGED_VTABLE(StringObject);
+
+StringObject::StringObject(ExecutionContext *ctx, const Value &value)
+ : Object(ctx->engine), value(value)
+{
+ vtbl = &static_vtbl;
+ type = Type_StringObject;
+
+ tmpProperty.value = Value::undefinedValue();
+
+ assert(value.isString());
+ defineReadonlyProperty(ctx->engine->id_length, Value::fromUInt32(value.stringValue()->toQString().length()));
+}
+
+Property *StringObject::getIndex(uint index) const
+{
+ QString str = value.stringValue()->toQString();
+ if (index >= (uint)str.length())
+ return 0;
+ String *result = internalClass->engine->newString(str.mid(index, 1));
+ tmpProperty.value = Value::fromString(result);
+ return &tmpProperty;
+}
+
+void StringObject::markObjects(Managed *that)
+{
+ StringObject *o = static_cast<StringObject *>(that);
+ o->value.stringValue()->mark();
+ Object::markObjects(that);
+}
+
+DEFINE_MANAGED_VTABLE(StringCtor);
+
+StringCtor::StringCtor(ExecutionContext *scope)
+ : FunctionObject(scope)
+{
+ vtbl = &static_vtbl;
+}
+
+Value StringCtor::construct(Managed *, ExecutionContext *ctx, Value *argv, int argc)
+{
+ Value value;
+ if (argc)
+ value = Value::fromString(argv[0].toString(ctx));
+ else
+ value = Value::fromString(ctx, QString());
+ return Value::fromObject(ctx->engine->newStringObject(ctx, value));
+}
+
+Value StringCtor::call(Managed *, ExecutionContext *parentCtx, const Value &thisObject, Value *argv, int argc)
+{
+ Value value;
+ if (argc)
+ value = Value::fromString(argv[0].toString(parentCtx));
+ else
+ value = Value::fromString(parentCtx, QString());
+ return value;
+}
+
+void StringPrototype::init(ExecutionContext *ctx, const Value &ctor)
+{
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_prototype, Value::fromObject(this));
+ ctor.objectValue()->defineReadonlyProperty(ctx->engine->id_length, Value::fromInt32(1));
+ ctor.objectValue()->defineDefaultProperty(ctx, QStringLiteral("fromCharCode"), method_fromCharCode, 1);
+
+ defineDefaultProperty(ctx, QStringLiteral("constructor"), ctor);
+ defineDefaultProperty(ctx, QStringLiteral("toString"), method_toString);
+ defineDefaultProperty(ctx, QStringLiteral("valueOf"), method_toString); // valueOf and toString are identical
+ defineDefaultProperty(ctx, QStringLiteral("charAt"), method_charAt, 1);
+ defineDefaultProperty(ctx, QStringLiteral("charCodeAt"), method_charCodeAt, 1);
+ defineDefaultProperty(ctx, QStringLiteral("concat"), method_concat, 1);
+ defineDefaultProperty(ctx, QStringLiteral("indexOf"), method_indexOf, 1);
+ defineDefaultProperty(ctx, QStringLiteral("lastIndexOf"), method_lastIndexOf, 1);
+ defineDefaultProperty(ctx, QStringLiteral("localeCompare"), method_localeCompare, 1);
+ defineDefaultProperty(ctx, QStringLiteral("match"), method_match, 1);
+ defineDefaultProperty(ctx, QStringLiteral("replace"), method_replace, 2);
+ defineDefaultProperty(ctx, QStringLiteral("search"), method_search, 1);
+ defineDefaultProperty(ctx, QStringLiteral("slice"), method_slice, 2);
+ defineDefaultProperty(ctx, QStringLiteral("split"), method_split, 2);
+ defineDefaultProperty(ctx, QStringLiteral("substr"), method_substr, 2);
+ defineDefaultProperty(ctx, QStringLiteral("substring"), method_substring, 2);
+ defineDefaultProperty(ctx, QStringLiteral("toLowerCase"), method_toLowerCase);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleLowerCase"), method_toLocaleLowerCase);
+ defineDefaultProperty(ctx, QStringLiteral("toUpperCase"), method_toUpperCase);
+ defineDefaultProperty(ctx, QStringLiteral("toLocaleUpperCase"), method_toLocaleUpperCase);
+ defineDefaultProperty(ctx, QStringLiteral("trim"), method_trim);
+}
+
+static QString getThisString(ExecutionContext *ctx)
+{
+ String* str = 0;
+ Value thisObject = ctx->thisObject;
+ if (StringObject *thisString = thisObject.asStringObject())
+ str = thisString->value.stringValue();
+ else if (thisObject.isUndefined() || thisObject.isNull())
+ ctx->throwTypeError();
+ else
+ str = ctx->thisObject.toString(ctx);
+ return str->toQString();
+}
+
+static QString getThisString(ExecutionContext *context, Value thisObject)
+{
+ if (thisObject.isString())
+ return thisObject.stringValue()->toQString();
+
+ String* str = 0;
+ if (StringObject *thisString = thisObject.asStringObject())
+ str = thisString->value.stringValue();
+ else if (thisObject.isUndefined() || thisObject.isNull())
+ context->throwTypeError();
+ else
+ str = thisObject.toString(context);
+ return str->toQString();
+}
+
+Value StringPrototype::method_toString(SimpleCallContext *context)
+{
+ if (context->thisObject.isString())
+ return context->thisObject;
+
+ StringObject *o = context->thisObject.asStringObject();
+ if (!o)
+ context->throwTypeError();
+ return o->value;
+}
+
+Value StringPrototype::method_charAt(SimpleCallContext *context)
+{
+ const QString str = getThisString(context, context->thisObject);
+
+ int pos = 0;
+ if (context->argumentCount > 0)
+ pos = (int) context->arguments[0].toInteger();
+
+ QString result;
+ if (pos >= 0 && pos < str.length())
+ result += str.at(pos);
+
+ return Value::fromString(context, result);
+}
+
+Value StringPrototype::method_charCodeAt(SimpleCallContext *context)
+{
+ const QString str = getThisString(context, context->thisObject);
+
+ int pos = 0;
+ if (context->argumentCount > 0)
+ pos = (int) context->arguments[0].toInteger();
+
+
+ if (pos >= 0 && pos < str.length())
+ return Value::fromInt32(str.at(pos).unicode());
+
+ return Value::fromDouble(qSNaN());
+}
+
+Value StringPrototype::method_concat(SimpleCallContext *context)
+{
+ QString value = getThisString(context, context->thisObject);
+
+ for (int i = 0; i < context->argumentCount; ++i) {
+ Value v = __qmljs_to_string(context->arguments[i], context);
+ assert(v.isString());
+ value += v.stringValue()->toQString();
+ }
+
+ return Value::fromString(context, value);
+}
+
+Value StringPrototype::method_indexOf(SimpleCallContext *context)
+{
+ QString value = getThisString(context, context->thisObject);
+
+ QString searchString;
+ if (context->argumentCount)
+ searchString = context->arguments[0].toString(context)->toQString();
+
+ int pos = 0;
+ if (context->argumentCount > 1)
+ pos = (int) context->arguments[1].toInteger();
+
+ int index = -1;
+ if (! value.isEmpty())
+ index = value.indexOf(searchString, qMin(qMax(pos, 0), value.length()));
+
+ return Value::fromDouble(index);
+}
+
+Value StringPrototype::method_lastIndexOf(SimpleCallContext *context)
+{
+ const QString value = getThisString(context, context->thisObject);
+
+ QString searchString;
+ if (context->argumentCount) {
+ Value v = __qmljs_to_string(context->arguments[0], context);
+ searchString = v.stringValue()->toQString();
+ }
+
+ Value posArg = context->argumentCount > 1 ? context->arguments[1] : Value::undefinedValue();
+ double position = __qmljs_to_number(posArg);
+ if (isnan(position))
+ position = +qInf();
+ else
+ position = trunc(position);
+
+ int pos = trunc(qMin(qMax(position, 0.0), double(value.length())));
+ if (!searchString.isEmpty() && pos == value.length())
+ --pos;
+ if (searchString.isNull() && pos == 0)
+ return Value::fromDouble(-1);
+ int index = value.lastIndexOf(searchString, pos);
+ return Value::fromDouble(index);
+}
+
+Value StringPrototype::method_localeCompare(SimpleCallContext *context)
+{
+ const QString value = getThisString(context, context->thisObject);
+ const QString that = (context->argumentCount ? context->arguments[0] : Value::undefinedValue()).toString(context)->toQString();
+ return Value::fromDouble(QString::localeAwareCompare(value, that));
+}
+
+Value StringPrototype::method_match(SimpleCallContext *context)
+{
+ if (context->thisObject.isUndefined() || context->thisObject.isNull())
+ context->throwTypeError();
+
+ String *s = context->thisObject.toString(context);
+
+ Value regexp = context->argumentCount ? context->arguments[0] : Value::undefinedValue();
+ RegExpObject *rx = regexp.asRegExpObject();
+ if (!rx)
+ rx = context->engine->regExpCtor.asFunctionObject()->construct(context, &regexp, 1).asRegExpObject();
+
+ if (!rx)
+ // ### CHECK
+ context->throwTypeError();
+
+ bool global = rx->global;
+
+ // ### use the standard builtin function, not the one that might be redefined in the proto
+ FunctionObject *exec = context->engine->regExpPrototype->get(context, context->engine->newString(QStringLiteral("exec")), 0).asFunctionObject();
+
+ Value arg = Value::fromString(s);
+ if (!global)
+ return exec->call(context, Value::fromObject(rx), &arg, 1);
+
+ String *lastIndex = context->engine->newString(QStringLiteral("lastIndex"));
+ rx->put(context, lastIndex, Value::fromInt32(0));
+ ArrayObject *a = context->engine->newArrayObject(context);
+
+ double previousLastIndex = 0;
+ uint n = 0;
+ while (1) {
+ Value result = exec->call(context, Value::fromObject(rx), &arg, 1);
+ if (result.isNull())
+ break;
+ assert(result.isObject());
+ double thisIndex = rx->get(context, lastIndex, 0).toInteger();
+ if (previousLastIndex == thisIndex) {
+ previousLastIndex = thisIndex + 1;
+ rx->put(context, lastIndex, Value::fromDouble(previousLastIndex));
+ } else {
+ previousLastIndex = thisIndex;
+ }
+ Value matchStr = result.objectValue()->getIndexed(context, 0, (bool *)0);
+ a->arraySet(n, matchStr);
+ ++n;
+ }
+ if (!n)
+ return Value::nullValue();
+
+ return Value::fromObject(a);
+
+}
+
+static QString makeReplacementString(const QString &input, const QString& replaceValue, uint* matchOffsets, int captureCount)
+{
+ QString result;
+ result.reserve(replaceValue.length());
+ for (int i = 0; i < replaceValue.length(); ++i) {
+ if (replaceValue.at(i) == QLatin1Char('$') && i < replaceValue.length() - 1) {
+ char ch = replaceValue.at(++i).toLatin1();
+ uint substStart = JSC::Yarr::offsetNoMatch;
+ uint substEnd = JSC::Yarr::offsetNoMatch;
+ if (ch == '$') {
+ result += ch;
+ continue;
+ } else if (ch == '&') {
+ substStart = matchOffsets[0];
+ substEnd = matchOffsets[1];
+ } else if (ch == '`') {
+ substStart = 0;
+ substEnd = matchOffsets[0];
+ } else if (ch == '\'') {
+ substStart = matchOffsets[1];
+ substEnd = input.length();
+ } else if (ch >= '1' && ch <= '9') {
+ char capture = ch - '0';
+ if (capture > 0 && capture < captureCount) {
+ substStart = matchOffsets[capture * 2];
+ substEnd = matchOffsets[capture * 2 + 1];
+ }
+ } else if (ch == '0' && i < replaceValue.length() - 1) {
+ int capture = (ch - '0') * 10;
+ ch = replaceValue.at(++i).toLatin1();
+ capture += ch - '0';
+ if (capture > 0 && capture < captureCount) {
+ substStart = matchOffsets[capture * 2];
+ substEnd = matchOffsets[capture * 2 + 1];
+ }
+ }
+ if (substStart != JSC::Yarr::offsetNoMatch && substEnd != JSC::Yarr::offsetNoMatch)
+ result += input.midRef(substStart, substEnd - substStart);
+ } else {
+ result += replaceValue.at(i);
+ }
+ }
+ return result;
+}
+
+Value StringPrototype::method_replace(SimpleCallContext *ctx)
+{
+ QString string;
+ if (StringObject *thisString = ctx->thisObject.asStringObject())
+ string = thisString->value.stringValue()->toQString();
+ else
+ string = ctx->thisObject.toString(ctx)->toQString();
+
+ int numCaptures = 0;
+ QVarLengthArray<uint, 16> matchOffsets;
+ int numStringMatches = 0;
+
+ Value searchValue = ctx->argument(0);
+ RegExpObject *regExp = searchValue.asRegExpObject();
+ if (regExp) {
+ uint offset = 0;
+ while (true) {
+ int oldSize = matchOffsets.size();
+ matchOffsets.resize(matchOffsets.size() + regExp->value->captureCount() * 2);
+ if (regExp->value->match(string, offset, matchOffsets.data() + oldSize) == JSC::Yarr::offsetNoMatch) {
+ matchOffsets.resize(oldSize);
+ break;
+ }
+ if (!regExp->global)
+ break;
+ offset = qMax(offset + 1, matchOffsets[oldSize + 1]);
+ }
+ if (regExp->global)
+ regExp->lastIndexProperty(ctx)->value = Value::fromUInt32(0);
+ numStringMatches = matchOffsets.size() / (regExp->value->captureCount() * 2);
+ numCaptures = regExp->value->captureCount();
+ } else {
+ numCaptures = 1;
+ QString searchString = searchValue.toString(ctx)->toQString();
+ int idx = string.indexOf(searchString);
+ if (idx != -1) {
+ numStringMatches = 1;
+ matchOffsets.resize(2);
+ matchOffsets[0] = idx;
+ matchOffsets[1] = idx + searchString.length();
+ }
+ }
+
+ QString result = string;
+ Value replaceValue = ctx->argument(1);
+ if (FunctionObject* searchCallback = replaceValue.asFunctionObject()) {
+ int replacementDelta = 0;
+ int argc = numCaptures + 2;
+ Value *args = (Value*)alloca((numCaptures + 2) * sizeof(Value));
+ for (int i = 0; i < numStringMatches; ++i) {
+ for (int k = 0; k < numCaptures; ++k) {
+ int idx = (i * numCaptures + k) * 2;
+ uint start = matchOffsets[idx];
+ uint end = matchOffsets[idx + 1];
+ Value entry = Value::undefinedValue();
+ if (start != JSC::Yarr::offsetNoMatch && end != JSC::Yarr::offsetNoMatch)
+ entry = Value::fromString(ctx, string.mid(start, end - start));
+ args[k] = entry;
+ }
+ uint matchStart = matchOffsets[i * numCaptures * 2];
+ uint matchEnd = matchOffsets[i * numCaptures * 2 + 1];
+ args[numCaptures] = Value::fromUInt32(matchStart);
+ args[numCaptures + 1] = Value::fromString(ctx, string);
+ Value replacement = searchCallback->call(ctx, Value::undefinedValue(), args, argc);
+ QString replacementString = replacement.toString(ctx)->toQString();
+ result.replace(replacementDelta + matchStart, matchEnd - matchStart, replacementString);
+ replacementDelta += replacementString.length() - matchEnd + matchStart;
+ }
+ } else {
+ QString newString = replaceValue.toString(ctx)->toQString();
+ int replacementDelta = 0;
+
+ for (int i = 0; i < numStringMatches; ++i) {
+ int baseIndex = i * numCaptures * 2;
+ uint matchStart = matchOffsets[baseIndex];
+ uint matchEnd = matchOffsets[baseIndex + 1];
+ if (matchStart == JSC::Yarr::offsetNoMatch)
+ continue;
+
+ QString replacement = makeReplacementString(string, newString, matchOffsets.data() + baseIndex, numCaptures);
+ result.replace(replacementDelta + matchStart, matchEnd - matchStart, replacement);
+ replacementDelta += replacement.length() - matchEnd + matchStart;
+ }
+ }
+
+ return Value::fromString(ctx, result);
+}
+
+Value StringPrototype::method_search(SimpleCallContext *ctx)
+{
+ QString string;
+ if (StringObject *thisString = ctx->thisObject.asStringObject())
+ string = thisString->value.stringValue()->toQString();
+ else
+ string = ctx->thisObject.toString(ctx)->toQString();
+
+ Value regExpValue = ctx->argument(0);
+ RegExpObject *regExp = regExpValue.asRegExpObject();
+ if (!regExp) {
+ regExpValue = ctx->engine->regExpCtor.asFunctionObject()->construct(ctx, &regExpValue, 1);
+ regExp = regExpValue.asRegExpObject();
+ }
+ uint* matchOffsets = (uint*)alloca(regExp->value->captureCount() * 2 * sizeof(uint));
+ uint result = regExp->value->match(string, /*offset*/0, matchOffsets);
+ if (result == JSC::Yarr::offsetNoMatch)
+ return Value::fromInt32(-1);
+ return Value::fromUInt32(result);
+}
+
+Value StringPrototype::method_slice(SimpleCallContext *ctx)
+{
+ const QString text = getThisString(ctx);
+ const double length = text.length();
+
+ double start = ctx->argument(0).toInteger();
+ double end = ctx->argument(1).isUndefined()
+ ? length : ctx->argument(1).toInteger();
+
+ if (start < 0)
+ start = qMax(length + start, 0.);
+ else
+ start = qMin(start, length);
+
+ if (end < 0)
+ end = qMax(length + end, 0.);
+ else
+ end = qMin(end, length);
+
+ const int intStart = int(start);
+ const int intEnd = int(end);
+
+ int count = qMax(0, intEnd - intStart);
+ return Value::fromString(ctx, text.mid(intStart, count));
+}
+
+Value StringPrototype::method_split(SimpleCallContext *ctx)
+{
+ QString text;
+ if (StringObject *thisObject = ctx->thisObject.asStringObject())
+ text = thisObject->value.stringValue()->toQString();
+ else
+ text = ctx->thisObject.toString(ctx)->toQString();
+
+ Value separatorValue = ctx->argumentCount > 0 ? ctx->argument(0) : Value::undefinedValue();
+ Value limitValue = ctx->argumentCount > 1 ? ctx->argument(1) : Value::undefinedValue();
+
+ ArrayObject* array = ctx->engine->newArrayObject(ctx);
+ Value result = Value::fromObject(array);
+
+ if (separatorValue.isUndefined()) {
+ if (limitValue.isUndefined()) {
+ array->push_back(Value::fromString(ctx, text));
+ return result;
+ }
+ return Value::fromString(ctx, text.left(limitValue.toInteger()));
+ }
+
+ uint limit = limitValue.isUndefined() ? UINT_MAX : limitValue.toUInt32();
+
+ if (limit == 0)
+ return result;
+
+ if (RegExpObject* re = separatorValue.asRegExpObject()) {
+ if (re->value->pattern().isEmpty()) {
+ re = 0;
+ separatorValue = Value::fromString(ctx, QString());
+ }
+ }
+
+ if (RegExpObject* re = separatorValue.asRegExpObject()) {
+ uint offset = 0;
+ uint* matchOffsets = (uint*)alloca(re->value->captureCount() * 2 * sizeof(uint));
+ while (true) {
+ uint result = re->value->match(text, offset, matchOffsets);
+ if (result == JSC::Yarr::offsetNoMatch)
+ break;
+
+ array->push_back(Value::fromString(ctx, text.mid(offset, matchOffsets[0] - offset)));
+ offset = qMax(offset + 1, matchOffsets[1]);
+
+ if (array->arrayLength() >= limit)
+ break;
+
+ for (int i = 1; i < re->value->captureCount(); ++i) {
+ uint start = matchOffsets[i * 2];
+ uint end = matchOffsets[i * 2 + 1];
+ array->push_back(Value::fromString(ctx, text.mid(start, end - start)));
+ if (array->arrayLength() >= limit)
+ break;
+ }
+ }
+ if (array->arrayLength() < limit)
+ array->push_back(Value::fromString(ctx, text.mid(offset)));
+ } else {
+ QString separator = separatorValue.toString(ctx)->toQString();
+ if (separator.isEmpty()) {
+ for (uint i = 0; i < qMin(limit, uint(text.length())); ++i)
+ array->push_back(Value::fromString(ctx, text.mid(i, 1)));
+ return result;
+ }
+
+ int start = 0;
+ int end;
+ while ((end = text.indexOf(separator, start)) != -1) {
+ array->push_back(Value::fromString(ctx, text.mid(start, end - start)));
+ start = end + separator.size();
+ if (array->arrayLength() >= limit)
+ break;
+ }
+ if (array->arrayLength() < limit && start != -1)
+ array->push_back(Value::fromString(ctx, text.mid(start)));
+ }
+ return result;
+}
+
+Value StringPrototype::method_substr(SimpleCallContext *context)
+{
+ const QString value = getThisString(context, context->thisObject);
+
+ double start = 0;
+ if (context->argumentCount > 0)
+ start = context->arguments[0].toInteger();
+
+ double length = +qInf();
+ if (context->argumentCount > 1)
+ length = context->arguments[1].toInteger();
+
+ double count = value.length();
+ if (start < 0)
+ start = qMax(count + start, 0.0);
+
+ length = qMin(qMax(length, 0.0), count - start);
+
+ qint32 x = Value::toInt32(start);
+ qint32 y = Value::toInt32(length);
+ return Value::fromString(context, value.mid(x, y));
+}
+
+Value StringPrototype::method_substring(SimpleCallContext *context)
+{
+ QString value = getThisString(context, context->thisObject);
+ int length = value.length();
+
+ double start = 0;
+ double end = length;
+
+ if (context->argumentCount > 0)
+ start = context->arguments[0].toInteger();
+
+ Value endValue = context->argumentCount > 1 ? context->arguments[1] : Value::undefinedValue();
+ if (!endValue.isUndefined())
+ end = endValue.toInteger();
+
+ if (isnan(start) || start < 0)
+ start = 0;
+
+ if (isnan(end) || end < 0)
+ end = 0;
+
+ if (start > length)
+ start = length;
+
+ if (end > length)
+ end = length;
+
+ if (start > end) {
+ double was = start;
+ start = end;
+ end = was;
+ }
+
+ qint32 x = (int)start;
+ qint32 y = (int)(end - start);
+ return Value::fromString(context, value.mid(x, y));
+}
+
+Value StringPrototype::method_toLowerCase(SimpleCallContext *ctx)
+{
+ QString value = getThisString(ctx);
+ return Value::fromString(ctx, value.toLower());
+}
+
+Value StringPrototype::method_toLocaleLowerCase(SimpleCallContext *ctx)
+{
+ return method_toLowerCase(ctx);
+}
+
+Value StringPrototype::method_toUpperCase(SimpleCallContext *ctx)
+{
+ QString value = getThisString(ctx);
+ return Value::fromString(ctx, value.toUpper());
+}
+
+Value StringPrototype::method_toLocaleUpperCase(SimpleCallContext *ctx)
+{
+ return method_toUpperCase(ctx);
+}
+
+Value StringPrototype::method_fromCharCode(SimpleCallContext *context)
+{
+ QString str(context->argumentCount, Qt::Uninitialized);
+ QChar *ch = str.data();
+ for (int i = 0; i < context->argumentCount; ++i) {
+ *ch = QChar(context->arguments[i].toUInt16());
+ ++ch;
+ }
+ return Value::fromString(context, str);
+}
+
+Value StringPrototype::method_trim(SimpleCallContext *ctx)
+{
+ if (ctx->thisObject.isNull() || ctx->thisObject.isUndefined())
+ ctx->throwTypeError();
+
+ QString s = __qmljs_to_string(ctx->thisObject, ctx).stringValue()->toQString();
+ const QChar *chars = s.constData();
+ int start, end;
+ for (start = 0; start < s.length(); ++start) {
+ if (!chars[start].isSpace() && chars[start].unicode() != 0xfeff)
+ break;
+ }
+ for (end = s.length() - 1; end >= start; --end) {
+ if (!chars[end].isSpace() && chars[end].unicode() != 0xfeff)
+ break;
+ }
+
+ return Value::fromString(ctx, QString(chars + start, end - start + 1));
+}
diff --git a/src/qml/qml/v4vm/qv4stringobject.h b/src/qml/qml/v4vm/qv4stringobject.h
new file mode 100644
index 0000000000..ded26c501b
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4stringobject.h
@@ -0,0 +1,108 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4STRINGOBJECT_P_H
+#define QV4STRINGOBJECT_P_H
+
+#include "qv4object.h"
+#include "qv4functionobject.h"
+#include <QtCore/qnumeric.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct StringObject: Object {
+ Value value;
+ mutable Property tmpProperty;
+ StringObject(ExecutionContext *ctx, const Value &value);
+
+ Property *getIndex(uint index) const;
+
+protected:
+ static const ManagedVTable static_vtbl;
+ static void markObjects(Managed *that);
+};
+
+struct StringCtor: FunctionObject
+{
+ StringCtor(ExecutionContext *scope);
+
+ static Value construct(Managed *that, ExecutionContext *context, Value *args, int argc);
+ static Value call(Managed *that, ExecutionContext *, const Value &, Value *, int);
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+struct StringPrototype: StringObject
+{
+ StringPrototype(ExecutionContext *ctx): StringObject(ctx, Value::fromString(ctx, QString())) {}
+ void init(ExecutionContext *ctx, const Value &ctor);
+
+ static Value method_toString(SimpleCallContext *context);
+ static Value method_charAt(SimpleCallContext *context);
+ static Value method_charCodeAt(SimpleCallContext *context);
+ static Value method_concat(SimpleCallContext *context);
+ static Value method_indexOf(SimpleCallContext *context);
+ static Value method_lastIndexOf(SimpleCallContext *context);
+ static Value method_localeCompare(SimpleCallContext *context);
+ static Value method_match(SimpleCallContext *context);
+ static Value method_replace(SimpleCallContext *ctx);
+ static Value method_search(SimpleCallContext *ctx);
+ static Value method_slice(SimpleCallContext *ctx);
+ static Value method_split(SimpleCallContext *ctx);
+ static Value method_substr(SimpleCallContext *context);
+ static Value method_substring(SimpleCallContext *context);
+ static Value method_toLowerCase(SimpleCallContext *ctx);
+ static Value method_toLocaleLowerCase(SimpleCallContext *ctx);
+ static Value method_toUpperCase(SimpleCallContext *ctx);
+ static Value method_toLocaleUpperCase(SimpleCallContext *ctx);
+ static Value method_fromCharCode(SimpleCallContext *context);
+ static Value method_trim(SimpleCallContext *ctx);
+};
+
+} // end of namespace VM
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4ECMAOBJECTS_P_H
diff --git a/src/qml/qml/v4vm/qv4syntaxchecker.cpp b/src/qml/qml/v4vm/qv4syntaxchecker.cpp
new file mode 100644
index 0000000000..fcda486af2
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4syntaxchecker.cpp
@@ -0,0 +1,119 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4syntaxchecker_p.h"
+
+using namespace QQmlJS;
+
+SyntaxChecker::SyntaxChecker()
+ : Lexer(&m_engine)
+ , m_stateStack(128)
+{
+}
+
+void QQmlJS::SyntaxChecker::clearText()
+{
+ m_code.clear();
+ m_tokens.clear();
+}
+
+void SyntaxChecker::appendText(const QString &text)
+{
+ m_code += text;
+}
+
+QString SyntaxChecker::text() const
+{
+ return m_code;
+}
+
+bool SyntaxChecker::canEvaluate()
+{
+ int yyaction = 0;
+ int yytoken = -1;
+ int yytos = -1;
+
+ setCode(m_code, 1);
+
+ m_tokens.clear();
+ m_tokens.append(T_FEED_JS_PROGRAM);
+
+ do {
+ if (++yytos == m_stateStack.size())
+ m_stateStack.resize(m_stateStack.size() * 2);
+
+ m_stateStack[yytos] = yyaction;
+
+again:
+ if (yytoken == -1 && action_index[yyaction] != -TERMINAL_COUNT) {
+ if (m_tokens.isEmpty())
+ yytoken = lex();
+ else
+ yytoken = m_tokens.takeFirst();
+ }
+
+ yyaction = t_action(yyaction, yytoken);
+ if (yyaction > 0) {
+ if (yyaction == ACCEPT_STATE) {
+ --yytos;
+ return true;
+ }
+ yytoken = -1;
+ } else if (yyaction < 0) {
+ const int ruleno = -yyaction - 1;
+ yytos -= rhs[ruleno];
+ yyaction = nt_action(m_stateStack[yytos], lhs[ruleno] - TERMINAL_COUNT);
+ }
+ } while (yyaction);
+
+ const int errorState = m_stateStack[yytos];
+ if (t_action(errorState, T_AUTOMATIC_SEMICOLON) && canInsertAutomaticSemicolon(yytoken)) {
+ yyaction = errorState;
+ m_tokens.prepend(yytoken);
+ yytoken = T_SEMICOLON;
+ goto again;
+ }
+
+ if (yytoken != EOF_SYMBOL)
+ return true;
+
+ return false;
+}
diff --git a/src/qml/qml/v4vm/qv4syntaxchecker_p.h b/src/qml/qml/v4vm/qv4syntaxchecker_p.h
new file mode 100644
index 0000000000..38e123762e
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4syntaxchecker_p.h
@@ -0,0 +1,73 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4SYNTAXCHECKER_P_H
+#define QV4SYNTAXCHECKER_P_H
+
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsengine_p.h>
+
+#include <QtCore/QVector>
+#include <QtCore/QString>
+#include <QtCore/QList>
+
+namespace QQmlJS {
+
+class SyntaxChecker: Lexer
+{
+public:
+ SyntaxChecker();
+
+ QString text() const;
+ void clearText();
+ void appendText(const QString &text);
+
+ bool canEvaluate();
+
+private:
+ Engine m_engine;
+ QVector<int> m_stateStack;
+ QList<int> m_tokens;
+ QString m_code;
+};
+
+} // end of QQmlJS namespace
+
+#endif // QV4SYNTAXCHECKER_P_H
diff --git a/src/qml/qml/v4vm/qv4unwindhelper.cpp b/src/qml/qml/v4vm/qv4unwindhelper.cpp
new file mode 100644
index 0000000000..8a50b0bb53
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4unwindhelper.cpp
@@ -0,0 +1,37 @@
+#include <qv4unwindhelper.h>
+
+#include <wtf/Platform.h>
+
+#if CPU(X86_64) && (OS(LINUX) || OS(MAC_OS_X))
+# define USE_DW2_HELPER
+#elif CPU(X86) && OS(LINUX)
+# define USE_DW2_HELPER
+#elif CPU(ARM) && OS(LINUX)
+# define USE_ARM_HELPER
+#elif OS(WINDOWS)
+ // SJLJ will unwind on Windows
+# define USE_NULL_HELPER
+#elif OS(IOS)
+ // SJLJ will unwind on iOS
+# define USE_NULL_HELPER
+#else
+# warning "Unsupported/untested platform!"
+# define USE_NULL_HELPER
+#endif
+
+#ifdef USE_DW2_HELPER
+# include <qv4unwindhelper_p-dw2.h>
+#endif // USE_DW2_HELPER
+
+#ifdef USE_ARM_HELPER
+# include <qv4unwindhelper_p-arm.h>
+#endif // USE_ARM_HELPER
+
+#ifdef USE_NULL_HELPER
+using namespace QQmlJS::VM;
+void UnwindHelper::registerFunction(Function *function) {Q_UNUSED(function);}
+void UnwindHelper::registerFunctions(QVector<Function *> functions) {Q_UNUSED(functions);}
+void UnwindHelper::deregisterFunction(Function *function) {Q_UNUSED(function);}
+void UnwindHelper::deregisterFunctions(QVector<Function *> functions) {Q_UNUSED(functions);}
+#endif // USE_NULL_HELPER
+
diff --git a/src/qml/qml/v4vm/qv4unwindhelper.h b/src/qml/qml/v4vm/qv4unwindhelper.h
new file mode 100644
index 0000000000..9f6462d644
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4unwindhelper.h
@@ -0,0 +1,27 @@
+#ifndef QV4UNWINDHELPER_H
+#define QV4UNWINDHELPER_H
+
+#include <QtCore/QVector>
+
+namespace QQmlJS {
+namespace VM {
+
+struct Function;
+
+class UnwindHelper
+{
+public:
+ static void registerFunction(Function *function);
+ static void registerFunctions(QVector<Function *> functions);
+ static void deregisterFunction(Function *function);
+ static void deregisterFunctions(QVector<Function *> functions);
+#ifdef Q_PROCESSOR_ARM
+ static int unwindInfoSize();
+ static void writeARMUnwindInfo(void *codeAddr, int codeSize);
+#endif
+};
+
+} // VM namespace
+} // QQmlJS namespace
+
+#endif // QV4UNWINDHELPER_H
diff --git a/src/qml/qml/v4vm/qv4unwindhelper_p-arm.h b/src/qml/qml/v4vm/qv4unwindhelper_p-arm.h
new file mode 100644
index 0000000000..6938fa5189
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4unwindhelper_p-arm.h
@@ -0,0 +1,176 @@
+#ifndef QV4UNWINDHELPER_PDW2_H
+#define QV4UNWINDHELPER_PDW2_H
+
+#include <qv4unwindhelper.h>
+#include <qv4functionobject.h>
+#include <wtf/Platform.h>
+
+#include <QMap>
+#include <QMutex>
+
+#define __USE_GNU
+#include <dlfcn.h>
+
+#if USE(LIBUNWIND_DEBUG)
+#include <libunwind.h>
+#endif
+#include <execinfo.h>
+
+namespace QQmlJS {
+namespace VM {
+
+static void *removeThumbBit(void *addr)
+{
+ return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(addr) & ~1u);
+}
+
+static QMutex functionProtector;
+static QMap<quintptr, Function*> allFunctions;
+
+static Function *lookupFunction(void *pc)
+{
+ quintptr key = reinterpret_cast<quintptr>(pc);
+ QMap<quintptr, Function*>::ConstIterator it = allFunctions.lowerBound(key);
+ if (it != allFunctions.begin() && allFunctions.count() > 0)
+ --it;
+ if (it == allFunctions.end())
+ return 0;
+
+ quintptr codeStart = reinterpret_cast<quintptr>(removeThumbBit((*it)->codeRef.code().executableAddress()));
+ if (key < codeStart || key >= codeStart + (*it)->codeSize)
+ return 0;
+ return *it;
+}
+
+
+/* Program:
+vsp = r4 (REG_TO_SP r4)
+vsp -= 8 * 4 -- > vsp = vsp - (7 << 2) - 4
+pop r12, r10, r9, r8, r7, r6, r5, r4
+pop r4
+pop lr
+pop r0, r1, r2, r3
+*/
+
+#define REG_TO_SP 0b10010000
+#define VSP_MINUS 0b01000000
+#define POP_REG_MULTI 0b10000000
+#define POP_R4_MULTI 0b10100000
+#define POP_R4_R14_MULTI 0b10101000
+#define POP_R0_TO_R3 0b10110001
+#define FINISH 0b10110000
+
+#define MK_UW_WORD(first, second, third, fourth) \
+ (((first) << 24) | \
+ ((second) << 16) | \
+ ((third) << 8) | \
+ (fourth))
+
+static unsigned int extbl[] = {
+ MK_UW_WORD(0x80 | // High bit set to indicate that this isn't a PREL31
+ 2, // Choose personality routine #2
+ 2, // Number of 4 byte words used to encode remaining unwind instructions
+ REG_TO_SP | 4, // Encoded program from above.
+ VSP_MINUS | 7),
+ MK_UW_WORD(POP_REG_MULTI | 1, 0b01111111,
+ POP_R4_R14_MULTI,
+ POP_R0_TO_R3),
+ MK_UW_WORD(0b00001111,
+ FINISH,
+ FINISH,
+ FINISH)
+};
+
+static unsigned write_prel31(unsigned *addr, void *ptr)
+{
+ int delta = (char *)ptr - (char*)addr;
+ if (delta < 0)
+ delta |= (1 << 30);
+ else
+ delta &= ~(1 << 30);
+ *addr = ((unsigned)delta) & 0x7fffffffU;
+}
+
+void UnwindHelper::deregisterFunction(Function *function)
+{
+ QMutexLocker locker(&functionProtector);
+ allFunctions.remove(reinterpret_cast<quintptr>(function->code));
+}
+
+void UnwindHelper::deregisterFunctions(QVector<Function *> functions)
+{
+ QMutexLocker locker(&functionProtector);
+ foreach (Function *f, functions)
+ allFunctions.remove(reinterpret_cast<quintptr>(f->code));
+}
+
+void UnwindHelper::registerFunction(Function *function)
+{
+ QMutexLocker locker(&functionProtector);
+ allFunctions.insert(reinterpret_cast<quintptr>(function->code), function);
+}
+
+void UnwindHelper::registerFunctions(QVector<Function *> functions)
+{
+ QMutexLocker locker(&functionProtector);
+ foreach (Function *f, functions)
+ allFunctions.insert(reinterpret_cast<quintptr>(f->code), f);
+}
+
+int UnwindHelper::unwindInfoSize()
+{
+ return 2 * sizeof(unsigned int) // 2 extbl entries
+ + sizeof(extbl);
+}
+
+void UnwindHelper::writeARMUnwindInfo(void *codeAddr, int codeSize)
+{
+ unsigned int *exidx = (unsigned int *)((char *)codeAddr + codeSize);
+
+ unsigned char *exprog = (unsigned char *)((unsigned char *)codeAddr + codeSize + 8);
+
+ write_prel31(exidx, codeAddr);
+ exidx[1] = 4; // PREL31 offset to extbl, which follows right afterwards
+
+ memcpy(exprog, extbl, sizeof(extbl));
+
+#if USE(LIBUNWIND_DEBUG)
+ unw_dyn_info_t *info = (unw_dyn_info_t*)malloc(sizeof(unw_dyn_info_t));
+ info->start_ip = (unw_word_t)codeAddr;
+ info->end_ip = info->start_ip + codeSize;
+ info->gp = 0;
+ info->format = UNW_INFO_FORMAT_ARM_EXIDX;
+ info->u.rti.name_ptr = 0;
+ info->u.rti.segbase = 0;
+ info->u.rti.table_len = 8;
+ info->u.rti.table_data = (unw_word_t)exidx;
+ _U_dyn_register(info);
+#endif
+}
+
+}
+}
+
+extern "C" Q_DECL_EXPORT void *__gnu_Unwind_Find_exidx(void *pc, int *entryCount)
+{
+ typedef void *(*Old_Unwind_Find_exidx)(void*, int*);
+ static Old_Unwind_Find_exidx oldFunction = 0;
+ static ptrdiff_t *exidx = (ptrdiff_t*)malloc(2 * sizeof(uintptr_t));
+ if (!oldFunction)
+ oldFunction = (Old_Unwind_Find_exidx)dlsym(RTLD_NEXT, "__gnu_Unwind_Find_exidx");
+
+ {
+ QMutexLocker locker(&QQmlJS::VM::functionProtector);
+ QQmlJS::VM::Function *function = QQmlJS::VM::lookupFunction(pc);
+ if (function) {
+ *entryCount = 1;
+ void * codeStart = QQmlJS::VM::removeThumbBit(function->codeRef.code().executableAddress());
+ // At the end of the function we store our synthetic exception table entry.
+ return (char *)codeStart + function->codeSize;
+ }
+ }
+
+ return oldFunction(pc, entryCount);
+}
+
+#endif // QV4UNWINDHELPER_PDW2_H
diff --git a/src/qml/qml/v4vm/qv4unwindhelper_p-dw2.h b/src/qml/qml/v4vm/qv4unwindhelper_p-dw2.h
new file mode 100644
index 0000000000..10c99f539c
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4unwindhelper_p-dw2.h
@@ -0,0 +1,189 @@
+#ifndef QV4UNWINDHELPER_PDW2_H
+#define QV4UNWINDHELPER_PDW2_H
+
+#include <qv4unwindhelper.h>
+#include <qv4functionobject.h>
+#include <wtf/Platform.h>
+
+#include <QMap>
+#include <QMutex>
+
+#define __USE_GNU
+#include <dlfcn.h>
+
+namespace QQmlJS {
+namespace VM {
+
+namespace {
+#if CPU(X86_64)
+// Generated by fdegen
+static const unsigned char cie_fde_data[] = {
+ 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x0, 0x8, 0x78, 0x10, 0xc, 0x7, 0x8,
+ 0x90, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0,
+ 0x18, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x41, 0x13, 0x7e, 0x86,
+ 0x2, 0x43, 0xd, 0x6, 0x8c, 0x3, 0x8e, 0x4,
+ 0x0, 0x0, 0x0, 0x0
+};
+static const int fde_offset = 20;
+static const int initial_location_offset = 28;
+static const int address_range_offset = 36;
+#elif CPU(X86) && OS(LINUX)
+static const unsigned char cie_fde_data[] = {
+ 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x0, 0x4, 0x7c, 0x8, 0xc, 0x4, 0x4,
+ 0x88, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0,
+ 0x18, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x41, 0x13, 0x7e, 0x85,
+ 0x2, 0x43, 0xd, 0x5, 0x86, 0x3, 0x87, 0x4,
+ 0x0, 0x0, 0x0, 0x0,
+};
+static const int fde_offset = 20;
+static const int initial_location_offset = 28;
+static const int address_range_offset = 32;
+#endif
+} // anonymous namespace
+
+static QMutex functionProtector;
+static QMap<quintptr, Function*> allFunctions;
+
+static Function *lookupFunction(void *pc)
+{
+ quintptr key = reinterpret_cast<quintptr>(pc);
+ QMap<quintptr, Function*>::ConstIterator it = allFunctions.lowerBound(key);
+ if (it != allFunctions.begin() && allFunctions.count() > 0)
+ --it;
+ if (it == allFunctions.end())
+ return 0;
+
+ quintptr codeStart = reinterpret_cast<quintptr>((*it)->code);
+ if (key < codeStart || key >= codeStart + (*it)->codeSize)
+ return 0;
+ return *it;
+}
+
+namespace {
+void writeIntPtrValue(unsigned char *addr, intptr_t val)
+{
+ addr[0] = (val >> 0) & 0xff;
+ addr[1] = (val >> 8) & 0xff;
+ addr[2] = (val >> 16) & 0xff;
+ addr[3] = (val >> 24) & 0xff;
+#if QT_POINTER_SIZE == 8
+ addr[4] = (val >> 32) & 0xff;
+ addr[5] = (val >> 40) & 0xff;
+ addr[6] = (val >> 48) & 0xff;
+ addr[7] = (val >> 56) & 0xff;
+#endif
+}
+} // anonymous namespace
+
+static void ensureUnwindInfo(Function *f)
+{
+ if (!f->unwindInfo.isEmpty())
+ return;
+ QByteArray info;
+ info.resize(sizeof(cie_fde_data));
+
+ unsigned char *cie_and_fde = reinterpret_cast<unsigned char *>(info.data());
+ memcpy(cie_and_fde, cie_fde_data, sizeof(cie_fde_data));
+
+ intptr_t ptr = static_cast<char *>(f->codeRef.code().executableAddress()) - static_cast<char *>(0);
+ writeIntPtrValue(cie_and_fde + initial_location_offset, ptr);
+
+ writeIntPtrValue(cie_and_fde + address_range_offset, f->codeSize);
+
+ f->unwindInfo = info;
+}
+
+#if defined(Q_OS_DARWIN)
+extern "C" void __register_frame(void *fde);
+extern "C" void __deregister_frame(void *fde);
+#endif
+
+static void registerFunctionUnlocked(Function *f)
+{
+ allFunctions.insert(reinterpret_cast<quintptr>(f->code), f);
+#if defined(Q_OS_DARWIN)
+ ensureUnwindInfo(f);
+ __register_frame(f->unwindInfo.data() + fde_offset);
+#endif
+}
+
+static void deregisterFunctionUnlocked(Function *f)
+{
+ allFunctions.remove(reinterpret_cast<quintptr>(f->code));
+#if defined(Q_OS_DARWIN)
+ if (!f->unwindInfo.isEmpty())
+ __deregister_frame(f->unwindInfo.data() + fde_offset);
+#endif
+}
+
+void UnwindHelper::registerFunction(Function *function)
+{
+ QMutexLocker locker(&functionProtector);
+ registerFunctionUnlocked(function);
+}
+
+void UnwindHelper::registerFunctions(QVector<Function *> functions)
+{
+ QMutexLocker locker(&functionProtector);
+ foreach (Function *f, functions)
+ registerFunctionUnlocked(f);
+}
+
+void UnwindHelper::deregisterFunction(Function *function)
+{
+ QMutexLocker locker(&functionProtector);
+ deregisterFunctionUnlocked(function);
+}
+
+void UnwindHelper::deregisterFunctions(QVector<Function *> functions)
+{
+ QMutexLocker locker(&functionProtector);
+ foreach (Function *f, functions)
+ deregisterFunctionUnlocked(f);
+}
+
+} // VM namespace
+} // QQmlJS namespace
+
+#if defined(Q_OS_LINUX)
+extern "C" {
+
+struct bases
+{
+ void *tbase;
+ void *dbase;
+ void *func;
+};
+
+Q_V4_EXPORT void *_Unwind_Find_FDE(void *pc, struct bases *bases)
+{
+ typedef void *(*Old_Unwind_Find_FDE)(void *pc, struct bases *bases);
+ static Old_Unwind_Find_FDE oldFunction = 0;
+ if (!oldFunction)
+ oldFunction = (Old_Unwind_Find_FDE)dlsym(RTLD_NEXT, "_Unwind_Find_FDE");
+
+ {
+ QMutexLocker locker(&QQmlJS::VM::functionProtector);
+ QQmlJS::VM::Function *function = QQmlJS::VM::lookupFunction(pc);
+ if (function) {
+ bases->tbase = 0;
+ bases->dbase = 0;
+ bases->func = reinterpret_cast<void*>(function->code);
+ QQmlJS::VM::ensureUnwindInfo(function);
+ return function->unwindInfo.data() + QQmlJS::VM::fde_offset;
+ }
+ }
+
+ return oldFunction(pc, bases);
+}
+
+}
+#endif
+
+#endif // QV4UNWINDHELPER_PDW2_H
diff --git a/src/qml/qml/v4vm/qv4util.h b/src/qml/qml/v4vm/qv4util.h
new file mode 100644
index 0000000000..c43d85eca7
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4util.h
@@ -0,0 +1,74 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QV4UTIL_H
+#define QV4UTIL_H
+
+#include "qv4global.h"
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+
+template <typename T>
+struct TemporaryAssignment
+{
+ TemporaryAssignment(T &var, const T& temporaryValue)
+ : variable(var)
+ , savedValue(var)
+ {
+ variable = temporaryValue;
+ }
+ ~TemporaryAssignment()
+ {
+ variable = savedValue;
+ }
+ T &variable;
+ T savedValue;
+private:
+ TemporaryAssignment(const TemporaryAssignment<T>&);
+ TemporaryAssignment operator=(const TemporaryAssignment<T>&);
+};
+
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif // QV4UTIL_H
diff --git a/src/qml/qml/v4vm/qv4v8.cpp b/src/qml/qml/v4vm/qv4v8.cpp
new file mode 100644
index 0000000000..d81f23f30f
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4v8.cpp
@@ -0,0 +1,2141 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4v8.h"
+
+#include "qv4engine.h"
+#include "qv4runtime.h"
+#include "qv4mm.h"
+#include "qv4managed.h"
+#include "qv4functionobject.h"
+#include "qv4value.h"
+#include "qv4isel_masm_p.h"
+#include "qv4globalobject.h"
+#include "qv4regexpobject.h"
+#include "qv4dateobject.h"
+#include "qv4numberobject.h"
+#include "qv4booleanobject.h"
+#include "qv4stringobject.h"
+#include "qv4objectproto.h"
+#include <QThreadStorage>
+
+using namespace QQmlJS;
+using namespace QQmlJS::VM;
+
+namespace v8 {
+
+#define currentEngine() Isolate::GetCurrent()->GetCurrentContext()->GetEngine()
+
+#define Q_D(obj) QQmlJS::VM::Value *d = reinterpret_cast<QQmlJS::VM::Value*>(obj)
+
+#define ValuePtr(obj) reinterpret_cast<QQmlJS::VM::Value*>(obj)
+#define ConstValuePtr(obj) reinterpret_cast<const QQmlJS::VM::Value*>(obj)
+
+void *gcProtect(void *handle)
+{
+ Q_D(handle);
+ if (VM::Managed *m = d->asManaged()) {
+ currentEngine()->memoryManager->protect(m);
+ return currentEngine()->memoryManager;
+ }
+}
+
+void gcProtect(void *memoryManager, void *handle)
+{
+ Q_D(handle);
+ if (VM::Managed *m = d->asManaged())
+ if (memoryManager)
+ static_cast<VM::MemoryManager *>(memoryManager)->protect(m);
+}
+
+void gcUnprotect(void *memoryManager, void *handle)
+{
+ Q_D(handle);
+ if (VM::Managed *m = d->asManaged())
+ if (memoryManager)
+ static_cast<VM::MemoryManager *>(memoryManager)->unprotect(m);
+}
+
+struct V8AccessorGetter: FunctionObject {
+ AccessorGetter getter;
+ Persistent<Value> data;
+ Persistent<String> name;
+
+ V8AccessorGetter(ExecutionContext *scope, const Handle<String> &name, const AccessorGetter &getter, Handle<Value> data)
+ : FunctionObject(scope)
+ {
+ vtbl = &static_vtbl;
+ this->getter = getter;
+ this->data = Persistent<Value>::New(data);
+ this->name = Persistent<String>::New(name);
+ }
+
+ using Object::construct;
+
+ static VM::Value call(Managed *that, ExecutionContext *context, const VM::Value &thisObject, VM::Value *args, int argc)
+ {
+ V8AccessorGetter *getter = static_cast<V8AccessorGetter*>(that);
+ AccessorInfo info(thisObject, getter->data);
+ VM::Value result = VM::Value::undefinedValue();
+ try {
+ result = getter->getter(Local<String>::New(getter->name), info)->vmValue();
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(context);
+ }
+ return result;
+ }
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+DEFINE_MANAGED_VTABLE(V8AccessorGetter);
+
+struct V8AccessorSetter: FunctionObject {
+ AccessorSetter setter;
+ Persistent<Value> data;
+ Persistent<String> name;
+
+ V8AccessorSetter(ExecutionContext *scope, const Handle<String> &name, const AccessorSetter &setter, Handle<Value> data)
+ : FunctionObject(scope)
+ {
+ vtbl = &static_vtbl;
+ this->setter = setter;
+ this->data = Persistent<Value>::New(data);
+ this->name = Persistent<String>::New(name);
+ }
+
+ using Object::construct;
+
+ static VM::Value call(Managed *that, ExecutionContext *context, const VM::Value &thisObject, VM::Value *args, int argc)
+ {
+ if (!argc)
+ return VM::Value::undefinedValue();
+ V8AccessorSetter *setter = static_cast<V8AccessorSetter*>(that);
+ AccessorInfo info(thisObject, setter->data);
+ try {
+ setter->setter(Local<String>::New(setter->name), Local<Value>::New(Value::fromVmValue(args[0])), info);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(context);
+ }
+ return VM::Value::undefinedValue();
+ }
+
+protected:
+ static const ManagedVTable static_vtbl;
+};
+
+DEFINE_MANAGED_VTABLE(V8AccessorSetter);
+
+ScriptOrigin::ScriptOrigin(Handle<Value> resource_name, Handle<Integer> resource_line_offset, Handle<Integer> resource_column_offset)
+{
+ m_fileName = resource_name->ToString()->asQString();
+ m_lineNumber = resource_line_offset->ToInt32()->Value();
+ m_columnNumber = resource_column_offset->ToInt32()->Value();
+}
+
+Handle<Value> ScriptOrigin::ResourceName() const
+{
+ return Value::fromVmValue(VM::Value::fromString(currentEngine()->current, m_fileName));
+}
+
+Handle<Integer> ScriptOrigin::ResourceLineOffset() const
+{
+ return Integer::New(m_lineNumber);
+}
+
+Handle<Integer> ScriptOrigin::ResourceColumnOffset() const
+{
+ return Integer::New(m_columnNumber);
+}
+
+
+Local<Script> Script::New(Handle<String> source,
+ ScriptOrigin* origin,
+ ScriptData* pre_data,
+ Handle<String> script_data,
+ CompileFlags flags)
+{
+ Script *s = new Script;
+ s->m_script = source->ToString()->asQString();
+ if (origin)
+ s->m_origin = *origin;
+ s->m_flags = flags;
+ s->m_context = Handle<Context>();
+ return Local<Script>::New(Handle<Script>(s));
+}
+
+
+Local<Script> Script::New(Handle<String> source,
+ Handle<Value> file_name,
+ CompileFlags flags)
+{
+ ScriptOrigin origin(file_name);
+ return New(source, &origin, 0, Handle<String>(), flags);
+}
+
+Local<Script> Script::Compile(Handle<String> source, ScriptOrigin *origin, ScriptData *pre_data, Handle<String> script_data, Script::CompileFlags flags)
+{
+ Script *s = new Script;
+ s->m_script = source->ToString()->asQString();
+ if (origin)
+ s->m_origin = *origin;
+ s->m_flags = flags;
+ s->m_context = Context::GetCurrent();
+ return Local<Script>::New(Handle<Script>(s));
+}
+
+Local<Script> Script::Compile(Handle<String> source,
+ Handle<Value> file_name,
+ Handle<String> script_data,
+ CompileFlags flags)
+{
+ ScriptOrigin origin(file_name);
+ return Compile(source, &origin, 0, script_data, flags);
+}
+
+Local<Value> Script::Run()
+{
+ Handle<Context> context = m_context;
+ if (context.IsEmpty())
+ context = Context::GetCurrent();
+ ASSERT(context.get());
+ VM::ExecutionEngine *engine = context->GetEngine();
+ VM::ExecutionContext *ctx = engine->current;
+
+ VM::Value result = VM::Value::undefinedValue();
+ try {
+ QQmlJS::VM::Function *f = QQmlJS::VM::EvalFunction::parseSource(engine->rootContext, m_origin.m_fileName, m_script, QQmlJS::Codegen::EvalCode,
+ /*strictMode =*/ false, /*inheritContext =*/ false);
+ if (!f)
+ __qmljs_throw(engine->current, VM::Value::fromObject(engine->newSyntaxErrorObject(engine->current, 0)));
+
+ result = context->GetEngine()->run(f);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+
+ return Local<Value>::New(Value::fromVmValue(result));
+}
+
+Local<Value> Script::Run(Handle<Object> qml)
+{
+ Handle<Context> context = m_context;
+ if (context.IsEmpty())
+ context = Context::GetCurrent();
+ ASSERT(context.get());
+ VM::ExecutionEngine *engine = context->GetEngine();
+ VM::ExecutionContext *ctx = engine->current;
+
+ VM::Value result = VM::Value::undefinedValue();
+
+ try {
+
+ VM::EvalFunction *eval = new (engine->memoryManager) VM::EvalFunction(engine->rootContext, qml->vmValue().asObject());
+
+ VM::Value arg = VM::Value::fromString(engine->current, m_script);
+
+ result = eval->evalCall(engine->current, VM::Value::undefinedValue(), &arg, 1, /*directCall*/ false);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+ return Local<Value>::New(Value::fromVmValue(result));
+}
+
+Local<Value> Script::Id()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void Script::SetData(Handle<String> data)
+{
+ Q_UNIMPLEMENTED();
+}
+
+
+Local<String> Message::Get() const
+{
+ return Local<String>::New(Value::fromVmValue(VM::Value::fromString(currentEngine()->current, m_message)));
+}
+
+Handle<Value> Message::GetScriptResourceName() const
+{
+ return Value::fromVmValue(VM::Value::fromString(currentEngine()->current, m_resourceName));
+}
+
+int Message::GetLineNumber() const
+{
+ return m_lineNumber;
+}
+
+
+Local<StackFrame> StackTrace::GetFrame(uint32_t index) const
+{
+ if (index >= (uint)frames.size())
+ return Local<StackFrame>();
+ return frames.at(index);
+}
+
+int StackTrace::GetFrameCount() const
+{
+ return frames.size();
+}
+
+Local<Array> StackTrace::AsArray()
+{
+ Q_UNIMPLEMENTED();
+ return Local<Array>();
+}
+
+Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit, StackTrace::StackTraceOptions options)
+{
+ StackTrace *trace = new StackTrace;
+ VM::ExecutionEngine *engine = currentEngine();
+ VM::ExecutionContext *current = engine->current;
+ while (current && frame_limit) {
+ if (CallContext *c = current->asCallContext()) {
+ StackFrame *frame = new StackFrame(Value::fromVmValue(VM::Value::fromString(engine->id_null)),
+ Value::fromVmValue(VM::Value::fromString(c->function->name)),
+ 0, 0);
+ trace->frames.append(frame);
+ --frame_limit;
+ }
+ current = current->parent;
+ }
+
+ return Local<StackTrace>::New(Handle<StackTrace>(trace));
+}
+
+
+int StackFrame::GetLineNumber() const
+{
+ return m_lineNumber;
+}
+
+int StackFrame::GetColumn() const
+{
+ return m_columnNumber;
+}
+
+Local<String> StackFrame::GetScriptName() const
+{
+ return Local<String>::New(m_scriptName);
+}
+
+Local<String> StackFrame::GetScriptNameOrSourceURL() const
+{
+ return Local<String>::New(m_scriptName);
+}
+
+Local<String> StackFrame::GetFunctionName() const
+{
+ return Local<String>::New(m_functionName);
+}
+
+StackFrame::StackFrame(Handle<String> script, Handle<String> function, int line, int column)
+ : m_lineNumber(line)
+ , m_columnNumber(column)
+{
+ m_scriptName = Persistent<String>::New(script);
+ m_functionName = Persistent<String>::New(function);
+}
+
+
+bool Value::IsUndefined() const
+{
+ return ConstValuePtr(this)->isUndefined();
+}
+
+bool Value::IsNull() const {
+ return ConstValuePtr(this)->isNull();
+}
+
+bool Value::IsTrue() const
+{
+ return ConstValuePtr(this)->isBoolean() && ConstValuePtr(this)->booleanValue();
+}
+
+bool Value::IsFalse() const
+{
+ return !IsTrue();
+}
+
+bool Value::IsString() const
+{
+ return ConstValuePtr(this)->isString();
+}
+
+bool Value::IsFunction() const
+{
+ return ConstValuePtr(this)->asFunctionObject();
+}
+
+bool Value::IsArray() const
+{
+ return ConstValuePtr(this)->asArrayObject();
+}
+
+bool Value::IsObject() const
+{
+ return ConstValuePtr(this)->isObject();
+}
+
+bool Value::IsBoolean() const
+{
+ return ConstValuePtr(this)->isBoolean();
+}
+
+bool Value::IsNumber() const
+{
+ return ConstValuePtr(this)->isNumber();
+}
+
+bool Value::IsExternal() const
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+bool Value::IsInt32() const
+{
+ return ConstValuePtr(this)->isInteger();
+}
+
+bool Value::IsUint32() const
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+bool Value::IsDate() const
+{
+ return ConstValuePtr(this)->asDateObject();
+}
+
+bool Value::IsBooleanObject() const
+{
+ return ConstValuePtr(this)->asBooleanObject();
+}
+
+bool Value::IsNumberObject() const
+{
+ return ConstValuePtr(this)->asNumberObject();
+}
+
+bool Value::IsStringObject() const
+{
+ return ConstValuePtr(this)->asStringObject();
+}
+
+bool Value::IsRegExp() const
+{
+ return ConstValuePtr(this)->asRegExpObject();
+}
+
+bool Value::IsError() const
+{
+ return ConstValuePtr(this)->asErrorObject();
+}
+
+Local<Boolean> Value::ToBoolean() const
+{
+ return Local<Boolean>::New(Value::fromVmValue(VM::Value::fromBoolean(ConstValuePtr(this)->toBoolean())));
+}
+
+Local<Number> Value::ToNumber() const
+{
+ return Local<Number>::New(Value::fromVmValue(VM::Value::fromDouble(ConstValuePtr(this)->toNumber())));
+}
+
+Local<String> Value::ToString() const
+{
+ return Local<String>::New(Value::fromVmValue(VM::Value::fromString(ConstValuePtr(this)->toString(currentEngine()->current))));
+}
+
+Local<Object> Value::ToObject() const
+{
+ return Local<Object>::New(Value::fromVmValue(QQmlJS::VM::Value::fromObject(ConstValuePtr(this)->toObject(currentEngine()->current))));
+}
+
+Local<Integer> Value::ToInteger() const
+{
+ return Local<Integer>::New(Value::fromVmValue(QQmlJS::VM::Value::fromDouble(ConstValuePtr(this)->toInteger())));
+}
+
+Local<Uint32> Value::ToUint32() const
+{
+ return Local<Uint32>::New(Value::fromVmValue(QQmlJS::VM::Value::fromUInt32(ConstValuePtr(this)->toUInt32())));
+}
+
+Local<Int32> Value::ToInt32() const
+{
+ return Local<Int32>::New(Value::fromVmValue(QQmlJS::VM::Value::fromInt32(ConstValuePtr(this)->toInt32())));
+}
+
+Local<Uint32> Value::ToArrayIndex() const
+{
+ return Local<Uint32>::New(Value::fromVmValue(QQmlJS::VM::Value::fromUInt32(ConstValuePtr(this)->asArrayIndex())));
+}
+
+bool Value::BooleanValue() const
+{
+ return ConstValuePtr(this)->toBoolean();
+}
+
+double Value::NumberValue() const
+{
+ return ConstValuePtr(this)->asDouble();
+}
+
+int64_t Value::IntegerValue() const
+{
+ return (int64_t)ConstValuePtr(this)->toInteger();
+}
+
+uint32_t Value::Uint32Value() const
+{
+ return ConstValuePtr(this)->toUInt32();
+}
+
+int32_t Value::Int32Value() const
+{
+ return ConstValuePtr(this)->toInt32();
+}
+
+bool Value::Equals(Handle<Value> that) const
+{
+ return __qmljs_equal(*ConstValuePtr(this), *ConstValuePtr(&that));
+}
+
+bool Value::StrictEquals(Handle<Value> that) const
+{
+ return __qmljs_strict_equal(*ConstValuePtr(this), *ConstValuePtr(&that));
+}
+
+VM::Value Value::vmValue() const
+{
+ return *ConstValuePtr(this);
+}
+
+Handle<Value> Value::fromVmValue(const VM::Value &vmValue)
+{
+ Handle<Value> res;
+ res.val = vmValue.val;
+ return res;
+}
+
+
+bool Boolean::Value() const
+{
+ return BooleanValue();
+}
+
+Handle<Boolean> Boolean::New(bool value)
+{
+ return Value::fromVmValue(VM::Value::fromBoolean(value));
+}
+
+
+int String::Length() const
+{
+ return asVMString()->toQString().length();
+}
+
+uint32_t String::Hash() const
+{
+ return asVMString()->hashValue();
+}
+
+
+String::CompleteHashData String::CompleteHash() const
+{
+ VM::String *s = asVMString();
+ CompleteHashData data;
+ data.hash = s->hashValue();
+ data.length = s->toQString().length();
+ data.symbol_id = s->identifier;
+ return data;
+}
+
+uint32_t String::ComputeHash(uint16_t *string, int length)
+{
+ return VM::String::createHashValue(reinterpret_cast<const QChar *>(string), length);
+}
+
+uint32_t String::ComputeHash(char *string, int length)
+{
+ // ### unefficient
+ QString s = QString::fromLatin1((char *)string, length);
+ return VM::String::createHashValue(s.constData(), s.length());
+}
+
+bool String::Equals(uint16_t *str, int length)
+{
+ return asQString() == QString(reinterpret_cast<QChar*>(str), length);
+}
+
+bool String::Equals(char *str, int length)
+{
+ return asQString() == QString::fromLatin1(str, length);
+}
+
+uint16_t String::GetCharacter(int index)
+{
+ return asQString().at(index).unicode();
+}
+
+int String::Write(uint16_t *buffer, int start, int length, int options) const
+{
+ if (length < 0)
+ length = asQString().length();
+ if (length == 0)
+ return 0;
+ if (asQString().length() < length)
+ length = asQString().length();
+ // ### do we use options?
+ memcpy(buffer + start, asQString().constData(), length*sizeof(QChar));
+ return length;
+}
+
+v8::Local<String> String::Empty()
+{
+ return Local<String>::New(v8::Value::fromVmValue(VM::Value::fromString(currentEngine()->current, QString())));
+}
+
+bool String::IsExternal() const
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+String::ExternalStringResource *String::GetExternalStringResource() const
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+String *String::Cast(v8::Value *obj)
+{
+ return static_cast<String *>(obj);
+}
+
+
+Local<String> String::New(const char *data, int length)
+{
+ QQmlJS::VM::Value v = QQmlJS::VM::Value::fromString(currentEngine()->current, QString::fromLatin1(data, length));
+ return Local<String>::New(v8::Value::fromVmValue(v));
+}
+
+Local<String> String::New(const uint16_t *data, int length)
+{
+ QQmlJS::VM::Value v = QQmlJS::VM::Value::fromString(currentEngine()->current, QString((const QChar *)data, length));
+ return Local<String>::New(v8::Value::fromVmValue(v));
+}
+
+Local<String> String::NewSymbol(const char *data, int length)
+{
+ QString str = QString::fromLatin1(data, length);
+ VM::String *vmString = currentEngine()->newIdentifier(str);
+ return New(vmString);
+}
+
+Local<String> String::New(VM::String *s)
+{
+ return Local<String>::New(v8::Value::fromVmValue(VM::Value::fromString(s)));
+}
+
+Local<String> String::NewExternal(String::ExternalStringResource *resource)
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+QString String::asQString() const
+{
+ return asVMString()->toQString();
+}
+
+VM::String *String::asVMString() const
+{
+ const VM::Value *v = ConstValuePtr(this);
+ ASSERT(v->isString());
+ return v->stringValue();
+}
+
+String::AsciiValue::AsciiValue(Handle<v8::Value> obj)
+{
+ str = obj->ToString()->asQString().toLatin1();
+}
+
+String::Value::Value(Handle<v8::Value> obj)
+{
+ str = obj->ToString()->asQString();
+}
+
+
+double Number::Value() const
+{
+ const VM::Value *v = ConstValuePtr(this);
+ assert(v->isNumber());
+ return v->asDouble();
+}
+
+Local<Number> Number::New(double value)
+{
+ return Local<Number>::New(Value::fromVmValue(VM::Value::fromDouble(value)));
+}
+
+Number *Number::Cast(v8::Value *obj)
+{
+ return static_cast<Number *>(obj);
+}
+
+Local<Integer> Integer::New(int32_t value)
+{
+ return Local<Integer>::New(Value::fromVmValue(VM::Value::fromInt32(value)));
+}
+
+Local<Integer> Integer::NewFromUnsigned(uint32_t value)
+{
+ return Local<Integer>::New(Value::fromVmValue(VM::Value::fromUInt32(value)));
+}
+
+Local<Integer> Integer::New(int32_t value, Isolate *)
+{
+ return New(value);
+}
+
+Local<Integer> Integer::NewFromUnsigned(uint32_t value, Isolate *)
+{
+ return NewFromUnsigned(value);
+}
+
+int64_t Integer::Value() const
+{
+ const VM::Value *v = ConstValuePtr(this);
+ assert(v->isNumber());
+ return (int64_t)v->asDouble();
+}
+
+Integer *Integer::Cast(v8::Value *obj)
+{
+ return static_cast<Integer *>(obj);
+}
+
+int32_t Int32::Value() const
+{
+ const VM::Value *v = ConstValuePtr(this);
+ assert(v->isInteger());
+ return v->int_32;
+}
+
+uint32_t Uint32::Value() const
+{
+ const VM::Value *v = ConstValuePtr(this);
+ assert(v->isNumber());
+ return v->toUInt32();
+}
+
+
+struct ExternalResourceWrapper : public QQmlJS::VM::Object::ExternalResource
+{
+ ExternalResourceWrapper(v8::Object::ExternalResource *wrapped)
+ {
+ this->wrapped = wrapped;
+ }
+
+ virtual ~ExternalResourceWrapper()
+ {
+ wrapped->Dispose();
+ }
+
+ v8::Object::ExternalResource *wrapped;
+};
+
+
+bool Object::Set(Handle<Value> key, Handle<Value> value, PropertyAttribute attribs)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ QQmlJS::VM::ExecutionContext *ctx = currentEngine()->current;
+ bool result = true;
+ try {
+ o->put(ctx, ValuePtr(&key)->toString(ctx), *ValuePtr(&value));
+ // ### attribs
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ result = false;
+ }
+ return result;
+}
+
+bool Object::Set(uint32_t index, Handle<Value> value)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ QQmlJS::VM::ExecutionContext *ctx = currentEngine()->current;
+ bool result = true;
+ try {
+ o->putIndexed(ctx, index, *ValuePtr(&value));
+ // ### attribs
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ result = false;
+ }
+ return result;
+}
+
+Local<Value> Object::Get(Handle<Value> key)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ QQmlJS::VM::ExecutionContext *ctx = currentEngine()->current;
+ QQmlJS::VM::Value prop = VM::Value::undefinedValue();
+ try {
+ prop = o->get(ctx, ValuePtr(&key)->toString(ctx));
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+ return Local<Value>::New(Value::fromVmValue(prop));
+}
+
+Local<Value> Object::Get(uint32_t key)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ QQmlJS::VM::ExecutionContext *ctx = currentEngine()->current;
+ QQmlJS::VM::Value prop = VM::Value::undefinedValue();
+ try {
+ prop = o->getIndexed(ctx, key);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+ return Local<Value>::New(Value::fromVmValue(prop));
+}
+
+bool Object::Has(Handle<String> key)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ return o->__hasProperty__(ValuePtr(&key)->asString());
+}
+
+bool Object::Delete(Handle<String> key)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ bool result = false;
+ ExecutionContext *ctx = currentEngine()->current;
+ try {
+ result = o->deleteProperty(ctx, ValuePtr(&key)->asString());
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+ return result;
+}
+
+bool Object::Has(uint32_t index)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ if (!o)
+ return false;
+ return o->__hasProperty__(index);
+}
+
+bool Object::Delete(uint32_t index)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ ExecutionContext *ctx = currentEngine()->current;
+ bool result = false;
+ try {
+ result = o->deleteIndexedProperty(ctx, index);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(ctx);
+ }
+ return result;
+}
+
+bool Object::SetAccessor(Handle<String> name, AccessorGetter getter, AccessorSetter setter, Handle<Value> data, AccessControl settings, PropertyAttribute attribute)
+{
+ VM::ExecutionEngine *engine = currentEngine();
+
+ VM::FunctionObject *wrappedGetter = 0;
+ if (getter) {
+ wrappedGetter = new (engine->memoryManager) V8AccessorGetter(engine->rootContext, name, getter, data);
+ }
+ VM::FunctionObject *wrappedSetter = 0;
+ if (setter) {
+ wrappedSetter = new (engine->memoryManager) V8AccessorSetter(engine->rootContext, name, setter, data);
+ }
+
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ PropertyAttributes attrs = Attr_Accessor;
+ attrs.setConfigurable(!(attribute & DontDelete));
+ attrs.setEnumerable(!(attribute & DontEnum));
+ VM::Property *pd = o->insertMember(name->asVMString(), attrs);
+ pd->setGetter(wrappedGetter);
+ pd->setSetter(wrappedSetter);
+ return true;
+}
+
+Local<Array> Object::GetPropertyNames()
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+
+ VM::ArrayObject *array = currentEngine()->newArrayObject(currentEngine()->current)->asArrayObject();
+ ObjectIterator it(currentEngine()->current, o, ObjectIterator::WithProtoChain|ObjectIterator::EnumberableOnly);
+ while (1) {
+ VM::Value v = it.nextPropertyNameAsString();
+ if (v.isNull())
+ break;
+ array->push_back(v);
+ }
+ return Local<Array>::New(Value::fromVmValue(VM::Value::fromObject(array)));
+}
+
+Local<Array> Object::GetOwnPropertyNames()
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ VM::Value arg = VM::Value::fromObject(o);
+ ArrayObject *array = currentEngine()->newArrayObject(currentEngine()->current)->asArrayObject();
+ ObjectIterator it(currentEngine()->current, o, ObjectIterator::EnumberableOnly);
+ while (1) {
+ VM::Value v = it.nextPropertyNameAsString();
+ if (v.isNull())
+ break;
+ array->push_back(v);
+ }
+ return Local<Array>::New(Value::fromVmValue(VM::Value::fromObject(array)));
+}
+
+Local<Value> Object::GetPrototype()
+{
+ Local<Value> result;
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ return Local<Value>::New(Value::fromVmValue(QQmlJS::VM::Value::fromObject(o->prototype)));
+}
+
+bool Object::SetPrototype(Handle<Value> prototype)
+{
+ QQmlJS::VM::Object *p = ConstValuePtr(&prototype)->asObject();
+ if (!p)
+ return false;
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ o->prototype = p;
+ return true;
+}
+
+Local<Value> Object::GetInternalField(int index)
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void Object::SetInternalField(int index, Handle<Value> value)
+{
+ Q_UNIMPLEMENTED();
+}
+
+void Object::SetExternalResource(Object::ExternalResource *resource)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ if (!o)
+ return;
+ o->externalResource = new ExternalResourceWrapper(resource);
+}
+
+Object::ExternalResource *Object::GetExternalResource()
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ if (!o || !o->externalResource)
+ return 0;
+ return static_cast<ExternalResourceWrapper*>(o->externalResource)->wrapped;
+}
+
+bool Object::HasOwnProperty(Handle<String> key)
+{
+ QQmlJS::VM::Object *o = ConstValuePtr(this)->asObject();
+ assert(o);
+ QQmlJS::VM::ExecutionContext *ctx = currentEngine()->current;
+ return o->__getOwnProperty__(ValuePtr(&key)->toString(ctx));
+}
+
+int Object::GetIdentityHash()
+{
+ return (quintptr)ConstValuePtr(this)->asObject() >> 2;
+}
+
+bool Object::SetHiddenValue(Handle<String> key, Handle<Value> value)
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+Local<Value> Object::GetHiddenValue(Handle<String> key)
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+Local<Object> Object::Clone()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+bool Object::IsCallable()
+{
+ return ConstValuePtr(this)->asFunctionObject();
+}
+
+Local<Value> Object::CallAsFunction(Handle<Object> recv, int argc, Handle<Value> argv[])
+{
+ VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ if (!f)
+ return Local<Value>();
+ VM::Value retval = f->call(currentEngine()->current, recv->vmValue(),
+ reinterpret_cast<QQmlJS::VM::Value*>(argv),
+ argc);
+ return Local<Value>::New(Value::fromVmValue(retval));
+}
+
+Local<Value> Object::CallAsConstructor(int argc, Handle<Value> argv[])
+{
+ VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ if (!f)
+ return Local<Value>();
+ VM::Value retval = f->construct(currentEngine()->current,
+ reinterpret_cast<QQmlJS::VM::Value*>(argv),
+ argc);
+ return Local<Value>::New(Value::fromVmValue(retval));
+}
+
+Local<Object> Object::New()
+{
+ VM::Object *o = currentEngine()->newObject();
+ return Local<Object>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Object *Object::Cast(Value *obj)
+{
+ return static_cast<Object *>(obj);
+}
+
+
+uint32_t Array::Length() const
+{
+ VM::ArrayObject *a = ConstValuePtr(this)->asArrayObject();
+ if (!a)
+ return 0;
+ return a->arrayLength();
+}
+
+Local<Array> Array::New(int length)
+{
+ VM::ArrayObject *a = currentEngine()->newArrayObject(currentEngine()->current);
+ if (length < 0x1000)
+ a->arrayReserve(length);
+
+ return Local<Array>::New(Value::fromVmValue(VM::Value::fromObject(a)));
+}
+
+Array *Array::Cast(Value *obj)
+{
+ return static_cast<Array *>(obj);
+}
+
+
+Local<Object> Function::NewInstance() const
+{
+ VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ assert(f);
+ VM::ExecutionContext *context = currentEngine()->current;
+ QQmlJS::VM::Value result = VM::Value::undefinedValue();
+ try {
+ result = f->construct(context, 0, 0);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(context);
+ }
+ return Local<Object>::New(Value::fromVmValue(result));
+}
+
+Local<Object> Function::NewInstance(int argc, Handle<Value> argv[]) const
+{
+ VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ assert(f);
+ VM::ExecutionContext *context = currentEngine()->current;
+ QQmlJS::VM::Value result = VM::Value::undefinedValue();
+ try {
+ result = f->construct(context, reinterpret_cast<QQmlJS::VM::Value*>(argv), argc);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(context);
+ }
+ return Local<Object>::New(Value::fromVmValue(result));
+}
+
+Local<Value> Function::Call(Handle<Object> thisObj, int argc, Handle<Value> argv[])
+{
+ QQmlJS::VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ if (!f)
+ return Local<Value>();
+ VM::ExecutionContext *context = currentEngine()->current;
+ QQmlJS::VM::Value result = VM::Value::undefinedValue();
+ try {
+ result = f->call(context, *ConstValuePtr(&thisObj),
+ reinterpret_cast<QQmlJS::VM::Value*>(argv), argc);
+ } catch (VM::Exception &e) {
+ Isolate::GetCurrent()->setException(e.value());
+ e.accept(context);
+ }
+ return Local<Value>::New(Value::fromVmValue(result));
+}
+
+Handle<Value> Function::GetName() const
+{
+ QQmlJS::VM::FunctionObject *f = ConstValuePtr(this)->asFunctionObject();
+ if (!f)
+ return Handle<Value>();
+ return Value::fromVmValue(VM::Value::fromString(f->name));
+}
+
+ScriptOrigin Function::GetScriptOrigin() const
+{
+ Q_UNIMPLEMENTED();
+ return ScriptOrigin();
+}
+
+Function *Function::Cast(Value *obj)
+{
+ return static_cast<Function *>(obj);
+}
+
+
+Local<Value> Date::New(double time)
+{
+ VM::Object *o = currentEngine()->newDateObject(VM::Value::fromDouble(time));
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+double Date::NumberValue() const
+{
+ DateObject *d = ConstValuePtr(this)->asDateObject();
+ assert(d);
+ return d->value.doubleValue();
+}
+
+Date *Date::Cast(Value *obj)
+{
+ return static_cast<Date *>(obj);
+}
+
+void Date::DateTimeConfigurationChangeNotification()
+{
+ Q_UNIMPLEMENTED();
+}
+
+
+Local<Value> NumberObject::New(double value)
+{
+ VM::Object *o = currentEngine()->newNumberObject(VM::Value::fromDouble(value));
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+double NumberObject::NumberValue() const
+{
+ VM::NumberObject *n = ConstValuePtr(this)->asNumberObject();
+ assert(n);
+ return n->value.doubleValue();
+}
+
+NumberObject *NumberObject::Cast(Value *obj)
+{
+ return static_cast<NumberObject *>(obj);
+}
+
+Local<Value> BooleanObject::New(bool value)
+{
+ VM::Object *o = currentEngine()->newBooleanObject(VM::Value::fromBoolean(value));
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+bool BooleanObject::BooleanValue() const
+{
+ VM::BooleanObject *b = ConstValuePtr(this)->asBooleanObject();
+ assert(b);
+ return b->value.booleanValue();
+}
+
+BooleanObject *BooleanObject::Cast(Value *obj)
+{
+ return static_cast<BooleanObject *>(obj);
+}
+
+Local<Value> StringObject::New(Handle<String> value)
+{
+ VM::Object *o = currentEngine()->newStringObject(currentEngine()->current, VM::Value::fromString(value->vmValue().asString()));
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<String> StringObject::StringValue() const
+{
+ VM::StringObject *s = ConstValuePtr(this)->asStringObject();
+ assert(s);
+ return Local<String>::New(Value::fromVmValue(s->value));
+}
+
+StringObject *StringObject::Cast(Value *obj)
+{
+ return static_cast<StringObject *>(obj);
+}
+
+Local<RegExp> RegExp::New(Handle<String> pattern, RegExp::Flags flags)
+{
+ int f = 0;
+ if (flags & kGlobal)
+ f |= V4IR::RegExp::RegExp_Global;
+ if (flags & kIgnoreCase)
+ f |= V4IR::RegExp::RegExp_IgnoreCase;
+ if (flags & kMultiline)
+ f |= V4IR::RegExp::RegExp_Multiline;
+ VM::Object *o = currentEngine()->newRegExpObject(pattern->asQString(), f);
+ return Local<RegExp>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<String> RegExp::GetSource() const
+{
+ RegExpObject *re = ConstValuePtr(this)->asRegExpObject();
+ assert(re);
+ return Local<String>::New(Value::fromVmValue(VM::Value::fromString(currentEngine()->current, re->value->pattern())));
+}
+
+RegExp::Flags RegExp::GetFlags() const
+{
+ RegExpObject *re = ConstValuePtr(this)->asRegExpObject();
+ assert(re);
+
+ int f = 0;
+ if (re->global)
+ f |= kGlobal;
+ if (re->value->ignoreCase())
+ f |= kIgnoreCase;
+ if (re->value->multiLine())
+ f |= kMultiline;
+
+ return (RegExp::Flags)f;
+}
+
+RegExp *RegExp::Cast(Value *obj)
+{
+ return static_cast<RegExp *>(obj);
+}
+
+struct VoidStarWrapper : public VM::Object::ExternalResource
+{
+ void *data;
+};
+
+Local<Value> External::Wrap(void *data)
+{
+ return New(data);
+}
+
+void *External::Unwrap(Handle<v8::Value> obj)
+{
+ return obj.As<External>()->Value();
+}
+
+Local<External> External::New(void *value)
+{
+ VM::Object *o = currentEngine()->newObject();
+ VoidStarWrapper *wrapper = new VoidStarWrapper;
+ wrapper->data = value;
+ o->externalResource = wrapper;
+ return Local<v8::External>::New(v8::Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+External *External::Cast(v8::Value *obj)
+{
+ return static_cast<External *>(obj);
+}
+
+void *External::Value() const
+{
+ VM::Object *o = ConstValuePtr(this)->asObject();
+ if (!o || !o->externalResource)
+ return 0;
+ return static_cast<VoidStarWrapper*>(o->externalResource)->data;
+}
+
+
+void Template::Set(Handle<String> name, Handle<Value> value, PropertyAttribute attributes)
+{
+ Property p;
+ p.name = Persistent<String>::New(name);
+ p.value = Persistent<Value>::New(value);
+ p.attributes = attributes;
+ m_properties << p;
+}
+
+void Template::Set(const char *name, Handle<Value> value)
+{
+ Set(String::New(name), value);
+}
+
+
+Arguments::Arguments(const VM::Value *args, int argc, const VM::Value &thisObject, bool isConstructor, const Persistent<Value> &data)
+{
+ for (int i = 0; i < argc; ++i)
+ m_args << Persistent<Value>::New(Value::fromVmValue(args[i]));
+ m_thisObject = Persistent<Object>::New(Value::fromVmValue(thisObject));
+ m_isConstructor = isConstructor;
+ m_data = Persistent<Value>::New(data);
+}
+
+int Arguments::Length() const
+{
+ return m_args.size();
+}
+
+Local<Value> Arguments::operator [](int i) const
+{
+ return Local<Value>::New(m_args.at(i));
+}
+
+Local<Object> Arguments::This() const
+{
+ return Local<Object>::New(m_thisObject);
+}
+
+Local<Object> Arguments::Holder() const
+{
+ // ### FIXME.
+ return Local<Object>::New(m_thisObject);
+}
+
+bool Arguments::IsConstructCall() const
+{
+ return m_isConstructor;
+}
+
+Local<Value> Arguments::Data() const
+{
+ return Local<Value>::New(m_data);
+}
+
+Isolate *Arguments::GetIsolate() const
+{
+ return Isolate::GetCurrent();
+}
+
+
+AccessorInfo::AccessorInfo(const VM::Value &thisObject, const Persistent<Value> &data)
+{
+ m_this = Persistent<Object>::New(Value::fromVmValue(thisObject));
+ m_data = data;
+}
+
+Isolate *AccessorInfo::GetIsolate() const
+{
+ return Isolate::GetCurrent();
+}
+
+Local<Value> AccessorInfo::Data() const
+{
+ return Local<Value>::New(m_data);
+}
+
+Local<Object> AccessorInfo::This() const
+{
+ return Local<Object>::New(m_this);
+}
+
+Local<Object> AccessorInfo::Holder() const
+{
+ // ### FIXME
+ return Local<Object>::New(m_this);
+}
+
+template <typename BaseClass>
+class V4V8Object : public BaseClass
+{
+public:
+ V4V8Object(VM::ExecutionEngine *engine, ObjectTemplate *tmpl)
+ : BaseClass(engine->rootContext)
+ {
+ this->vtbl = &static_vtbl;
+ m_template = Persistent<ObjectTemplate>(tmpl);
+ if (m_template.IsEmpty())
+ m_template = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+
+ foreach (const ObjectTemplate::Accessor &acc, m_template->m_accessors) {
+ PropertyAttributes attrs = Attr_Accessor;
+ attrs.setConfigurable(!(acc.attribute & DontDelete));
+ attrs.setEnumerable(!(acc.attribute & DontEnum));
+ VM::Property *pd = this->insertMember(acc.name->asVMString(), attrs);
+ *pd = VM::Property::fromAccessor(acc.getter->vmValue().asFunctionObject(),
+ acc.setter->vmValue().asFunctionObject());
+ }
+
+ initProperties(m_template.get());
+ }
+
+ void initProperties(Template *tmpl)
+ {
+ foreach (const Template::Property &p, tmpl->m_properties) {
+ PropertyAttributes attrs = Attr_Data;
+ attrs.setConfigurable(!(p.attributes & DontDelete));
+ attrs.setEnumerable(!(p.attributes & DontEnum));
+ attrs.setWritable(!(p.attributes & ReadOnly));
+ VM::Property *pd = this->insertMember(p.name->asVMString(), attrs);
+ *pd = VM::Property::fromValue(p.value->vmValue());
+ }
+ }
+
+ Persistent<ObjectTemplate> m_template;
+
+protected:
+ AccessorInfo namedAccessorInfo()
+ {
+ // ### thisObject?
+ return AccessorInfo(VM::Value::fromObject(this), m_template->m_namedPropertyData);
+ }
+ AccessorInfo fallbackAccessorInfo()
+ {
+ // ### thisObject?
+ return AccessorInfo(VM::Value::fromObject(this), m_template->m_fallbackPropertyData);
+ }
+ AccessorInfo indexedAccessorInfo()
+ {
+ // ### thisObject?
+ return AccessorInfo(VM::Value::fromObject(this), m_template->m_namedPropertyData);
+ }
+
+ static const ManagedVTable static_vtbl;
+
+ static VM::Value get(VM::Managed *m, ExecutionContext *ctx, VM::String *name, bool *hasProperty)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_namedPropertyGetter) {
+ Handle<Value> result = that->m_template->m_namedPropertyGetter(String::New(name), that->namedAccessorInfo());
+ if (!result.IsEmpty()) {
+ if (hasProperty)
+ *hasProperty = true;
+ return result->vmValue();
+ }
+ }
+
+ bool hasProp = false;
+ VM::Value result = BaseClass::get(m, ctx, name, &hasProp);
+
+ if (!hasProp && that->m_template->m_fallbackPropertyGetter) {
+ Handle<Value> fallbackResult = that->m_template->m_fallbackPropertyGetter(String::New(name), that->fallbackAccessorInfo());
+ if (!fallbackResult.IsEmpty()) {
+ if (hasProperty)
+ *hasProperty = true;
+ return fallbackResult->vmValue();
+ }
+ }
+
+ if (hasProperty)
+ *hasProperty = hasProp;
+ return result;
+ }
+
+ static VM::Value getIndexed(VM::Managed *m, ExecutionContext *ctx, uint index, bool *hasProperty)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_indexedPropertyGetter) {
+ Handle<Value> result = that->m_template->m_indexedPropertyGetter(index, that->indexedAccessorInfo());
+ if (!result.IsEmpty()) {
+ if (hasProperty)
+ *hasProperty = true;
+ return result->vmValue();
+ }
+ }
+ return BaseClass::getIndexed(m, ctx, index, hasProperty);
+ }
+
+ static void put(VM::Managed *m, ExecutionContext *ctx, VM::String *name, const VM::Value &value)
+ {
+ Local<Value> v8Value = Local<Value>::New(Value::fromVmValue(value));
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_namedPropertySetter) {
+ Handle<Value> result = that->m_template->m_namedPropertySetter(String::New(name), v8Value, that->namedAccessorInfo());
+ if (!result.IsEmpty())
+ return;
+ }
+ PropertyAttributes attrs;
+ Property *pd = that->__getOwnProperty__(name, &attrs);
+ if (pd)
+ that->putValue(ctx, pd, attrs, value);
+ else if (that->m_template->m_fallbackPropertySetter)
+ that->m_template->m_fallbackPropertySetter(String::New(name), v8Value, that->fallbackAccessorInfo());
+ else
+ BaseClass::put(m, ctx, name, value);
+ }
+
+ static void putIndexed(VM::Managed *m, ExecutionContext *ctx, uint index, const VM::Value &value)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_indexedPropertySetter) {
+ Handle<Value> result = that->m_template->m_indexedPropertySetter(index, Local<Value>::New(Value::fromVmValue(value)), that->indexedAccessorInfo());
+ if (!result.IsEmpty())
+ return;
+ }
+ BaseClass::putIndexed(m, ctx, index, value);
+ }
+
+ static PropertyAttributes propertyAttributesToFlags(const Handle<Value> &attr)
+ {
+ PropertyAttributes flags;
+ int intAttr = attr->ToInt32()->Value();
+ flags.setWritable(!(intAttr & ReadOnly));
+ flags.setEnumerable(!(intAttr & DontEnum));
+ flags.setConfigurable(!(intAttr & DontDelete));
+ return flags;
+ }
+
+ static PropertyAttributes query(VM::Managed *m, ExecutionContext *ctx, VM::String *name)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_namedPropertyQuery) {
+ Handle<Value> result = that->m_template->m_namedPropertyQuery(String::New(name), that->namedAccessorInfo());
+ if (!result.IsEmpty())
+ return propertyAttributesToFlags(result);
+ }
+ PropertyAttributes flags = BaseClass::query(m, ctx, name);
+ if (flags.type() == PropertyAttributes::Generic && that->m_template->m_fallbackPropertySetter) {
+ Handle<Value> result = that->m_template->m_fallbackPropertyQuery(String::New(name), that->fallbackAccessorInfo());
+ if (!result.IsEmpty())
+ return propertyAttributesToFlags(result);
+ }
+
+ return flags;
+ }
+
+ static PropertyAttributes queryIndexed(VM::Managed *m, ExecutionContext *ctx, uint index)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_indexedPropertyQuery) {
+ Handle<Value> result = that->m_template->m_indexedPropertyQuery(index, that->indexedAccessorInfo());
+ if (!result.IsEmpty())
+ return propertyAttributesToFlags(result);
+ }
+
+ return BaseClass::queryIndexed(m, ctx, index);
+ }
+
+ static bool deleteProperty(VM::Managed *m, ExecutionContext *ctx, VM::String *name)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_namedPropertyDeleter) {
+ Handle<Boolean> result = that->m_template->m_namedPropertyDeleter(String::New(name), that->namedAccessorInfo());
+ if (!result.IsEmpty())
+ return result->Value();
+ }
+
+ bool result = BaseClass::deleteProperty(m, ctx, name);
+
+ if (that->m_template->m_fallbackPropertyDeleter) {
+ Handle<Boolean> interceptResult = that->m_template->m_fallbackPropertyDeleter(String::New(name), that->fallbackAccessorInfo());
+ if (!interceptResult.IsEmpty())
+ result = interceptResult->Value();
+ }
+
+ return result;
+ }
+
+ static bool deleteIndexedProperty(VM::Managed *m, ExecutionContext *ctx, uint index)
+ {
+ V4V8Object *that = static_cast<V4V8Object*>(m);
+ if (that->m_template->m_indexedPropertyDeleter) {
+ Handle<Boolean> result = that->m_template->m_indexedPropertyDeleter(index, that->indexedAccessorInfo());
+ if (!result.IsEmpty())
+ return result->Value();
+ }
+ return BaseClass::deleteIndexedProperty(m, ctx, index);
+ }
+};
+
+template<>
+DEFINE_MANAGED_VTABLE(V4V8Object<VM::Object>);
+template<>
+DEFINE_MANAGED_VTABLE(V4V8Object<VM::FunctionObject>);
+template<>
+DEFINE_MANAGED_VTABLE(V4V8Object<VM::FunctionPrototype>);
+
+struct V4V8Function : public V4V8Object<VM::FunctionObject>
+{
+ V4V8Function(VM::ExecutionEngine *engine, FunctionTemplate *functionTemplate)
+ : V4V8Object<VM::FunctionObject>(engine, 0)
+ {
+ vtbl = &static_vtbl;
+ m_functionTemplate = Persistent<FunctionTemplate>(functionTemplate);
+ initProperties(m_functionTemplate.get());
+ }
+
+protected:
+ static const ManagedVTable static_vtbl;
+
+ static VM::Value call(VM::Managed *m, ExecutionContext *context, const VM::Value &thisObject, VM::Value *args, int argc)
+ {
+ V4V8Function *that = static_cast<V4V8Function*>(m);
+ Arguments arguments(args, argc, thisObject, false, that->m_functionTemplate->m_data);
+ VM::Value result = VM::Value::undefinedValue();
+ if (that->m_functionTemplate->m_callback)
+ result = that->m_functionTemplate->m_callback(arguments)->vmValue();
+ return result;
+ }
+
+ static VM::Value construct(VM::Managed *m, ExecutionContext *context, VM::Value *args, int argc)
+ {
+ V4V8Function *that = static_cast<V4V8Function*>(m);
+ Arguments arguments(args, argc, VM::Value::undefinedValue(), true, that->m_functionTemplate->m_data);
+
+ VM::Object *obj = that->m_functionTemplate->m_instanceTemplate->NewInstance()->vmValue().asObject();
+ VM::Value proto = that->Managed::get(context, context->engine->id_prototype);
+ if (proto.isObject())
+ obj->prototype = proto.objectValue();
+
+ VM::Value result = VM::Value::undefinedValue();
+ if (that->m_functionTemplate->m_callback)
+ result = that->m_functionTemplate->m_callback(arguments)->vmValue();
+ if (result.isObject())
+ return result;
+ return VM::Value::fromObject(obj);
+
+ }
+
+ Persistent<FunctionTemplate> m_functionTemplate;
+};
+
+DEFINE_MANAGED_VTABLE(V4V8Function);
+
+FunctionTemplate::FunctionTemplate(InvocationCallback callback, Handle<Value> data)
+ : m_callback(callback)
+{
+ m_instanceTemplate = Local<ObjectTemplate>();
+ m_prototypeTemplate = Local<ObjectTemplate>();
+ m_data = Persistent<Value>::New(data);
+}
+
+Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback, Handle<Value> data)
+{
+ FunctionTemplate *ft = new FunctionTemplate(callback, data);
+ return Local<FunctionTemplate>::New(Handle<FunctionTemplate>(ft));
+}
+
+Local<Function> FunctionTemplate::GetFunction()
+{
+ VM::ExecutionEngine *engine = currentEngine();
+ VM::Object *o = new (engine->memoryManager) V4V8Function(engine, this);
+ VM::Object *proto = new (engine->memoryManager) V4V8Object<VM::FunctionPrototype>(engine, m_prototypeTemplate.get());
+ o->put(engine->current, engine->id_prototype, VM::Value::fromObject(proto));
+ return Local<Function>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<ObjectTemplate> FunctionTemplate::InstanceTemplate()
+{
+ if (m_instanceTemplate.IsEmpty())
+ m_instanceTemplate = ObjectTemplate::New();
+ return m_instanceTemplate;
+}
+
+Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate()
+{
+ if (m_prototypeTemplate.IsEmpty())
+ m_prototypeTemplate = ObjectTemplate::New();
+ return m_prototypeTemplate;
+}
+
+
+Local<ObjectTemplate> ObjectTemplate::New()
+{
+ ObjectTemplate *ot = new ObjectTemplate;
+ return Local<ObjectTemplate>::New(Handle<ObjectTemplate>(ot));
+}
+
+Local<Object> ObjectTemplate::NewInstance()
+{
+ VM::ExecutionEngine *engine = currentEngine();
+ VM::Object *o = new (engine->memoryManager) V4V8Object<VM::Object>(engine, this);
+ o->prototype = engine->objectPrototype;
+ o->externalComparison = m_useUserComparison;
+
+ return Local<Object>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+void ObjectTemplate::SetAccessor(Handle<String> name, AccessorGetter getter, AccessorSetter setter, Handle<Value> data, AccessControl settings, PropertyAttribute attribute)
+{
+ VM::ExecutionEngine *engine = currentEngine();
+
+ Accessor a;
+ if (getter) {
+ VM::FunctionObject *wrappedGetter = new (engine->memoryManager) V8AccessorGetter(engine->rootContext, name, getter, data);
+ a.getter = Persistent<Value>::New(Value::fromVmValue(VM::Value::fromObject(wrappedGetter)));
+ }
+ if (setter) {
+ VM::FunctionObject *wrappedSetter = new (engine->memoryManager) V8AccessorSetter(engine->rootContext, name, setter, data);
+ a.setter = Persistent<Value>::New(Value::fromVmValue(VM::Value::fromObject(wrappedSetter)));
+ }
+ a.attribute = attribute;
+ a.name = Persistent<String>::New(name);
+ m_accessors << a;
+}
+
+void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter, NamedPropertySetter setter, NamedPropertyQuery query, NamedPropertyDeleter deleter, NamedPropertyEnumerator enumerator, Handle<Value> data)
+{
+ m_namedPropertyGetter = getter;
+ m_namedPropertySetter = setter;
+ m_namedPropertyQuery = query;
+ m_namedPropertyDeleter = deleter;
+ m_namedPropertyEnumerator = enumerator;
+ m_namedPropertyData = Persistent<Value>::New(data);
+}
+
+void ObjectTemplate::SetFallbackPropertyHandler(NamedPropertyGetter getter, NamedPropertySetter setter, NamedPropertyQuery query, NamedPropertyDeleter deleter, NamedPropertyEnumerator enumerator, Handle<Value> data)
+{
+ m_fallbackPropertyGetter = getter;
+ m_fallbackPropertySetter = setter;
+ m_fallbackPropertyQuery = query;
+ m_fallbackPropertyDeleter = deleter;
+ m_fallbackPropertyEnumerator = enumerator;
+ m_fallbackPropertyData = Persistent<Value>::New(data);
+}
+
+void ObjectTemplate::SetIndexedPropertyHandler(IndexedPropertyGetter getter, IndexedPropertySetter setter, IndexedPropertyQuery query, IndexedPropertyDeleter deleter, IndexedPropertyEnumerator enumerator, Handle<Value> data)
+{
+ m_indexedPropertyGetter = getter;
+ m_indexedPropertySetter = setter;
+ m_indexedPropertyQuery = query;
+ m_indexedPropertyDeleter = deleter;
+ m_indexedPropertyEnumerator = enumerator;
+ m_indexedPropertyData = Persistent<Value>::New(data);
+}
+
+int ObjectTemplate::InternalFieldCount()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void ObjectTemplate::SetInternalFieldCount(int value)
+{
+ Q_UNIMPLEMENTED();
+}
+
+bool ObjectTemplate::HasExternalResource()
+{
+ // we always reserve the space for the external resource
+ return true;
+}
+
+void ObjectTemplate::SetHasExternalResource(bool value)
+{
+ // no need for this, we always reserve the space for the external resource
+ Q_UNUSED(value);
+}
+
+void ObjectTemplate::MarkAsUseUserObjectComparison()
+{
+ m_useUserComparison = true;
+}
+
+ObjectTemplate::ObjectTemplate()
+{
+ m_namedPropertyGetter = 0;
+ m_namedPropertySetter = 0;
+ m_namedPropertyQuery = 0;
+ m_namedPropertyDeleter = 0;
+ m_namedPropertyEnumerator = 0;
+
+ m_fallbackPropertyGetter = 0;
+ m_fallbackPropertySetter = 0;
+ m_fallbackPropertyQuery = 0;
+ m_fallbackPropertyDeleter = 0;
+ m_fallbackPropertyEnumerator = 0;
+
+ m_indexedPropertyGetter = 0;
+ m_indexedPropertySetter = 0;
+ m_indexedPropertyQuery = 0;
+ m_indexedPropertyDeleter = 0;
+ m_indexedPropertyEnumerator = 0;
+
+ m_useUserComparison = false;
+}
+
+Handle<Primitive> Undefined()
+{
+ Handle<Primitive> val;
+ val.val = VM::Value::undefinedValue().val;
+ return val;
+}
+
+Handle<Primitive> Null()
+{
+ Handle<Primitive> val;
+ val.val = VM::Value::nullValue().val;
+ return val;
+}
+
+Handle<Boolean> True()
+{
+ Handle<Primitive> val;
+ val.val = VM::Value::fromBoolean(true).val;
+ return val;
+}
+
+Handle<Boolean> False()
+{
+ Handle<Primitive> val;
+ val.val = VM::Value::fromBoolean(false).val;
+ return val;
+}
+
+
+Handle<Value> ThrowException(Handle<Value> exception)
+{
+ __qmljs_throw(currentEngine()->current, exception->vmValue());
+ return Handle<Value>();
+}
+
+
+Local<Value> Exception::ReferenceError(Handle<String> message)
+{
+ Q_UNUSED(message);
+ VM::Object *o = currentEngine()->newReferenceErrorObject(currentEngine()->current, message->ToString()->asQString());
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<Value> Exception::SyntaxError(Handle<String> message)
+{
+ Q_UNUSED(message);
+ VM::Object *o = currentEngine()->newSyntaxErrorObject(currentEngine()->current, 0);
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<Value> Exception::TypeError(Handle<String> message)
+{
+ Q_UNUSED(message);
+ VM::Object *o = currentEngine()->newTypeErrorObject(currentEngine()->current, message->ToString()->asQString());
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+Local<Value> Exception::Error(Handle<String> message)
+{
+ Q_UNUSED(message);
+ VM::Object *o = currentEngine()->newErrorObject(VM::Value::fromString(currentEngine()->current, message->ToString()->asQString()));
+ return Local<Value>::New(Value::fromVmValue(VM::Value::fromObject(o)));
+}
+
+
+static QThreadStorage<Isolate*> currentIsolate;
+
+Isolate::Isolate()
+ : m_lastIsolate(0)
+ , tryCatch(0)
+{
+}
+
+Isolate::~Isolate()
+{
+}
+
+Isolate *Isolate::New()
+{
+ assert(!"Isolate::New()");
+ Q_UNREACHABLE();
+}
+
+void Isolate::Enter()
+{
+ m_lastIsolate = currentIsolate.localData();
+ currentIsolate.localData() = this;
+}
+
+void Isolate::Exit()
+{
+ currentIsolate.localData() = m_lastIsolate;
+ m_lastIsolate = 0;
+}
+
+void Isolate::Dispose()
+{
+ Q_UNIMPLEMENTED();
+}
+
+void Isolate::SetData(void *data)
+{
+ Q_UNIMPLEMENTED();
+}
+
+void *Isolate::GetData()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void Isolate::setException(const VM::Value &ex)
+{
+ if (tryCatch) {
+ tryCatch->hasCaughtException = true;
+ tryCatch->exception = Local<Value>::New(Value::fromVmValue(ex));
+ }
+}
+
+Isolate *Isolate::GetCurrent()
+{
+ if (!currentIsolate.hasLocalData())
+ currentIsolate.setLocalData(new Isolate);
+ return currentIsolate.localData();
+}
+
+
+void V8::SetFlagsFromString(const char *, int)
+{
+ // we can safely ignore these
+}
+
+static UserObjectComparisonCallback userObjectComparisonCallback = 0;
+
+static bool v8ExternalResourceComparison(const VM::Value &a, const VM::Value &b)
+{
+ if (!userObjectComparisonCallback)
+ return false;
+ Local<Object> la = Local<Object>::New(Value::fromVmValue(a));
+ Local<Object> lb = Local<Object>::New(Value::fromVmValue(b));
+ return userObjectComparisonCallback(la, lb);
+}
+
+void V8::SetUserObjectComparisonCallbackFunction(UserObjectComparisonCallback callback)
+{
+ userObjectComparisonCallback = callback;
+ currentEngine()->externalResourceComparison = v8ExternalResourceComparison;
+}
+
+void V8::AddGCPrologueCallback(GCPrologueCallback, GCType)
+{
+ // not required currently as we don't have weak Persistent references.
+ // not having them will lead to some leaks in QQmlVMEMetaObejct, but shouldn't matter otherwise
+}
+
+void V8::RemoveGCPrologueCallback(GCPrologueCallback)
+{
+ assert(!"RemoveGCPrologueCallback();");
+}
+
+void V8::AddImplicitReferences(Persistent<Object> parent, Persistent<Value> *children, size_t length)
+{
+ // not required currently as we don't have weak Persistent references.
+ // not having them will lead to some leaks in QQmlVMEMetaObejct, but shouldn't matter otherwise
+ assert(!"AddImplicitReferences();");
+}
+
+bool V8::Initialize()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+bool V8::Dispose()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+bool V8::IdleNotification(int hint)
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void V8::LowMemoryNotification()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+
+TryCatch::TryCatch()
+{
+ Isolate *i = Isolate::GetCurrent();
+
+ hasCaughtException = false;
+ parent = i->tryCatch;
+ i->tryCatch = this;
+}
+
+TryCatch::~TryCatch()
+{
+ Isolate *i = Isolate::GetCurrent();
+ i->tryCatch = parent;
+}
+
+bool TryCatch::HasCaught() const
+{
+ return hasCaughtException;
+}
+
+Handle<Value> TryCatch::ReThrow()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+Local<Value> TryCatch::Exception() const
+{
+ return exception;
+}
+
+Local<Message> TryCatch::Message() const
+{
+ Q_UNIMPLEMENTED();
+ return Local<v8::Message>::New(Handle<v8::Message>(new v8::Message(QString(), QString(), 0)));
+}
+
+void TryCatch::Reset()
+{
+ hasCaughtException = false;
+}
+
+
+
+struct Context::Private
+{
+ Private()
+ {
+ engine.reset(new QQmlJS::VM::ExecutionEngine);
+ }
+
+ QScopedPointer<QQmlJS::VM::ExecutionEngine> engine;
+};
+
+Context::Context()
+ : m_lastContext(0)
+ , d(new Private)
+{
+}
+
+Context::~Context()
+{
+ delete d;
+}
+
+Persistent<Context> Context::New(ExtensionConfiguration *extensions, Handle<ObjectTemplate> global_template, Handle<Value> global_object)
+{
+ Context *result = new Context;
+ return Persistent<Context>::New(Handle<Context>(result));
+}
+
+Local<Object> Context::Global()
+{
+ return Local<Object>::New(Value::fromVmValue(VM::Value::fromObject(d->engine->globalObject)));
+}
+
+Local<Context> Context::GetCurrent()
+{
+ return Context::Adopt(Isolate::GetCurrent()->m_contextStack.top());
+}
+
+Local<Context> Context::GetCalling()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+Local<Object> Context::GetCallingQmlGlobal()
+{
+ VM::ExecutionEngine *engine = GetCurrent()->GetEngine();
+ VM::ExecutionContext *ctx = engine->current;
+ while (ctx && ctx->outer != engine->rootContext)
+ ctx = ctx->outer;
+
+ assert(ctx);
+ if (!ctx->type == ExecutionContext::Type_QmlContext)
+ return Local<Object>();
+
+ return Local<Object>::New(Value::fromVmValue(VM::Value::fromObject(static_cast<CallContext *>(ctx)->activation)));
+}
+
+Local<Value> Context::GetCallingScriptData()
+{
+ Q_UNIMPLEMENTED();
+ Q_UNREACHABLE();
+}
+
+void Context::Enter()
+{
+ Isolate* iso = Isolate::GetCurrent();
+ iso->m_contextStack.push(this);
+}
+
+void Context::Exit()
+{
+ Isolate::GetCurrent()->m_contextStack.pop();
+}
+
+
+QQmlJS::VM::ExecutionEngine *Context::GetEngine()
+{
+ return d->engine.data();
+}
+
+
+}
diff --git a/src/qml/qml/v4vm/qv4v8.h b/src/qml/qml/v4vm/qv4v8.h
new file mode 100644
index 0000000000..bcd6a7fef3
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4v8.h
@@ -0,0 +1,2581 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/** \mainpage V8 API Reference Guide
+ *
+ * V8 is Google's open source JavaScript engine.
+ *
+ * This set of documents provides reference material generated from the
+ * V8 header file, include/v8.h.
+ *
+ * For other documentation see http://code.google.com/apis/v8/
+ */
+
+#ifndef V8_H_
+#define V8_H_
+
+#include "qv4global.h"
+#include "qv4string.h"
+#include <QStack>
+#include <QSharedData>
+
+namespace QQmlJS {
+namespace VM {
+struct Value;
+struct String;
+struct ExecutionEngine;
+struct Object;
+class MemoryManager;
+}
+}
+
+#include <stdint.h>
+
+#define V8EXPORT Q_V4_EXPORT
+
+/**
+ * The v8 JavaScript engine.
+ */
+namespace v8 {
+
+class Context;
+class String;
+class StringObject;
+class Value;
+class Utils;
+class Number;
+class NumberObject;
+class Object;
+class Array;
+class Int32;
+class Uint32;
+class External;
+class Primitive;
+class Boolean;
+class BooleanObject;
+class Integer;
+class Function;
+class Date;
+class ImplementationUtilities;
+class Signature;
+class AccessorSignature;
+template <class T> struct Handle;
+template <class T> class Local;
+template <class T> class Persistent;
+class FunctionTemplate;
+class ObjectTemplate;
+class Data;
+class AccessorInfo;
+class StackTrace;
+class StackFrame;
+class Isolate;
+class TryCatch;
+
+V8EXPORT void *gcProtect(void *handle);
+V8EXPORT void gcProtect(void *memoryManager, void *handle);
+V8EXPORT void gcUnprotect(void *memoryManager, void *handle);
+
+// --- Weak Handles ---
+
+/**
+ * A weak reference callback function.
+ *
+ * This callback should either explicitly invoke Dispose on |object| if
+ * V8 wrapper is not needed anymore, or 'revive' it by invocation of MakeWeak.
+ *
+ * \param object the weak global object to be reclaimed by the garbage collector
+ * \param parameter the value passed in when making the weak global object
+ */
+typedef void (*WeakReferenceCallback)(Persistent<Value> object,
+ void* parameter);
+
+
+// --- Handles ---
+
+#define TYPE_CHECK(T, S) \
+ while (false) { \
+ *(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
+ }
+
+/**
+ * An object reference managed by the v8 garbage collector.
+ *
+ * All objects returned from v8 have to be tracked by the garbage
+ * collector so that it knows that the objects are still alive. Also,
+ * because the garbage collector may move objects, it is unsafe to
+ * point directly to an object. Instead, all objects are stored in
+ * handles which are known by the garbage collector and updated
+ * whenever an object moves. Handles should always be passed by value
+ * (except in cases like out-parameters) and they should never be
+ * allocated on the heap.
+ *
+ * There are two types of handles: local and persistent handles.
+ * Local handles are light-weight and transient and typically used in
+ * local operations. They are managed by HandleScopes. Persistent
+ * handles can be used when storing objects across several independent
+ * operations and have to be explicitly deallocated when they're no
+ * longer used.
+ *
+ * It is safe to extract the object stored in the handle by
+ * dereferencing the handle (for instance, to extract the Object* from
+ * a Handle<Object>); the value will still be governed by a handle
+ * behind the scenes and the same rules apply to these values as to
+ * their handles.
+ */
+
+template <typename T>
+struct Handle;
+
+template <typename T>
+struct HandleOperations
+{
+ static void init(Handle<T> *handle)
+ {
+#if QT_POINTER_SIZE == 8
+ handle->val = quint64(Handle<T>::_Null_Type) << Handle<T>::Tag_Shift;
+#else
+ handle->tag = Handle<T>::_Null_Type;
+ handle->int_32 = 0;
+#endif
+ }
+
+ static void ref(Handle<T> *)
+ {
+ }
+
+ static void deref(Handle<T> *)
+ {
+ }
+
+ static void *protect(Handle<T> *handle)
+ {
+ return gcProtect(handle);
+ }
+
+ static void protect(void *memoryManager, Handle<T> *handle)
+ {
+ gcProtect(memoryManager, handle);
+ }
+
+ static void unProtect(void *memoryManager, Handle<T> *handle)
+ {
+ gcUnprotect(memoryManager, handle);
+ }
+
+ static bool isEmpty(const Handle<T> *handle)
+ {
+ return handle->tag == Handle<T>::_Null_Type;
+ }
+
+ static T *get(const Handle<T> *handle)
+ {
+ return const_cast<T*>(reinterpret_cast<const T*>(handle));
+ }
+};
+
+#define DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Type) \
+ template <> \
+ struct HandleOperations<Type> \
+ { \
+ static void init(Handle<Type> *handle) \
+ { \
+ handle->object = 0; \
+ } \
+ \
+ static void ref(Handle<Type> *handle) \
+ { \
+ if (handle->object) \
+ handle->object->ref.ref(); \
+ } \
+ \
+ static void deref(Handle<Type> *handle) \
+ { \
+ if (handle->object && !handle->object->ref.deref()) { \
+ delete handle->object; \
+ handle->object = 0; \
+ } \
+ } \
+ static void *protect(Handle<Type> *) { return 0; } \
+ static void protect(void *, Handle<Type> *) {} \
+ static void unProtect(void *, Handle<Type> *) {} \
+ static bool isEmpty(const Handle<Type> *handle) \
+ { \
+ return handle->object == 0; \
+ } \
+ static Type *get(const Handle<Type> *handle) \
+ { \
+ return handle->object; \
+ } \
+ \
+ };
+
+template <typename T>
+struct Handle {
+ Handle()
+ {
+ HandleOperations<T>::init(this);
+ }
+ template <typename Other>
+ Handle(const Handle<Other> &that)
+ : val(that.val)
+ {
+ HandleOperations<T>::ref(this);
+ }
+
+ explicit Handle(T *obj)
+ {
+ object = obj;
+ HandleOperations<T>::ref(this);
+ }
+
+ Handle(const Handle<T> &other)
+ : val(other.val)
+ {
+ HandleOperations<T>::ref(this);
+ }
+ Handle<T> &operator=(const Handle<T> &other)
+ {
+ if (this == &other)
+ return *this;
+ HandleOperations<T>::deref(this);
+ this->val = other.val;
+ HandleOperations<T>::ref(this);
+ return *this;
+ }
+ ~Handle()
+ {
+ HandleOperations<T>::deref(this);
+ }
+
+ bool IsEmpty() const { return HandleOperations<T>::isEmpty(this); }
+
+ T *operator->() const { return HandleOperations<T>::get(this); }
+
+ T *get() const { return HandleOperations<T>::get(this); }
+
+ template <typename Source>
+ static Handle<T> Cast(Handle<Source> that)
+ {
+ return that.template As<T>();
+ }
+
+ template <typename Target>
+ Handle<Target> As()
+ {
+ return Handle<Target>(*this);
+ }
+
+ void Clear()
+ {
+ val = 0;
+ }
+
+ template <class S> inline bool operator==(Handle<S> that) const {
+ return val == that.val;
+ }
+ template <class S> inline bool operator!=(Handle<S> that) const {
+ return val != that.val;
+ }
+
+ enum Masks {
+ NotDouble_Mask = 0xfffc0000,
+ Type_Mask = 0xffff8000,
+ Immediate_Mask = NotDouble_Mask | 0x00008000,
+ Tag_Shift = 32
+ };
+
+ enum ValueType {
+ Undefined_Type = Immediate_Mask | 0x00000,
+ Null_Type = Immediate_Mask | 0x10000,
+ Boolean_Type = Immediate_Mask | 0x20000,
+ Integer_Type = Immediate_Mask | 0x30000,
+ Object_Type = NotDouble_Mask | 0x00000,
+ String_Type = NotDouble_Mask | 0x10000
+ };
+
+ enum ImmediateFlags {
+ ConvertibleToInt = Immediate_Mask | 0x1
+ };
+
+ enum ValueTypeInternal {
+ _Undefined_Type = Undefined_Type,
+ _Null_Type = Null_Type | ConvertibleToInt,
+ _Boolean_Type = Boolean_Type | ConvertibleToInt,
+ _Integer_Type = Integer_Type | ConvertibleToInt,
+ _Object_Type = Object_Type,
+ _String_Type = String_Type
+
+ };
+
+ union {
+ T *object;
+ quint64 val;
+ double dbl;
+ struct {
+#if Q_BYTE_ORDER != Q_LITTLE_ENDIAN
+ uint tag;
+#endif
+ union {
+ uint uint_32;
+ int int_32;
+#if QT_POINTER_SIZE == 4
+ T *o;
+#endif
+ };
+#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
+ uint tag;
+#endif
+ };
+ };
+};
+
+
+/**
+ * A light-weight stack-allocated object handle. All operations
+ * that return objects from within v8 return them in local handles. They
+ * are created within HandleScopes, and all local handles allocated within a
+ * handle scope are destroyed when the handle scope is destroyed. Hence it
+ * is not necessary to explicitly deallocate local handles.
+ */
+template <class T> class Local : public Handle<T> {
+ public:
+ Local() {}
+ template <class S> Local(Local<S> that)
+ : Handle<T>(Handle<T>::Cast(that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Handle<String> to a
+ * Handle<Number>.
+ */
+ TYPE_CHECK(T, S);
+ }
+ template <class S> Local(S* that) : Handle<T>(that) { }
+ template <class S> static Local<T> Cast(Local<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (that.IsEmpty()) return Local<T>();
+#endif
+ return Local<T>::New(Handle<T>::Cast(that));
+ }
+
+ template <class S> Local<S> As() {
+ return Local<S>::Cast(*this);
+ }
+
+ /** Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
+ */
+ static Local<T> New(Handle<T> that)
+ {
+ Local<T> result;
+ result.Handle<T>::operator =(that);
+ return result;
+ }
+};
+
+
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope in which it was
+ * allocated, a Persistent handle remains valid until it is explicitly
+ * disposed.
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the v8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using Persistent::New and existing handles can
+ * be disposed using Persistent::Dispose. Since persistent handles
+ * are passed by value you may have many persistent handle objects
+ * that point to the same storage cell. For instance, if you pass a
+ * persistent handle as an argument to a function you will not get two
+ * different storage cells but rather two references to the same
+ * storage cell.
+ */
+template <class T> class Persistent : public Handle<T> {
+ public:
+ /**
+ * Creates an empty persistent handle that doesn't point to any
+ * storage cell.
+ */
+ Persistent() {}
+ ~Persistent() {
+ HandleOperations<T>::unProtect(m_memoryManager, this);
+ }
+
+ Persistent(const Persistent &other)
+ : Handle<T>(other)
+ , m_memoryManager(other.m_memoryManager)
+ {
+ HandleOperations<T>::protect(m_memoryManager, this);
+ }
+
+ Persistent &operator =(const Persistent &other)
+ {
+ if (&other == this)
+ return *this;
+ HandleOperations<T>::unProtect(m_memoryManager, this);
+ Handle<T>::operator =(other);
+ m_memoryManager = other.m_memoryManager;
+ HandleOperations<T>::protect(m_memoryManager, this);
+ return *this;
+ }
+
+ /**
+ * Creates a persistent handle for the same storage cell as the
+ * specified handle. This constructor allows you to pass persistent
+ * handles as arguments by value and to assign between persistent
+ * handles. However, attempting to assign between incompatible
+ * persistent handles, for instance from a Persistent<String> to a
+ * Persistent<Number> will cause a compile-time error. Assigning
+ * between compatible persistent handles, for instance assigning a
+ * Persistent<String> to a variable declared as Persistent<Value>,
+ * is allowed as String is a subclass of Value.
+ */
+ template <class S> Persistent(Persistent<S> that)
+ : Handle<T>(Handle<T>::Cast(that)) {
+ m_memoryManager = that.m_memoryManager;
+ HandleOperations<T>::protect(m_memoryManager, this);
+ }
+
+ template <class S> Persistent(S* that) : Handle<T>(that)
+ {
+ m_memoryManager = HandleOperations<T>::protect(this);
+ }
+
+ /**
+ * "Casts" a plain handle which is known to be a persistent handle
+ * to a persistent handle.
+ */
+ template <class S> explicit Persistent(Handle<S> that)
+ : Handle<T>(*that)
+ {
+ m_memoryManager = HandleOperations<T>::protect(this);
+ }
+
+ template <class S> static Persistent<T> Cast(Persistent<S> that) {
+ return Persistent<T>(T::Cast(*that));
+ }
+
+ template <class S> Persistent<S> As() {
+ return Persistent<S>::Cast(*this);
+ }
+
+ /**
+ * Creates a new persistent handle for an existing local or
+ * persistent handle.
+ */
+ static Persistent<T> New(Handle<T> that)
+ {
+ Persistent<T> result;
+ result.Handle<T>::operator =(that);
+ result.m_memoryManager = HandleOperations<T>::protect(&result);
+ return result;
+ }
+
+ /**
+ * Releases the storage cell referenced by this persistent handle.
+ * Does not remove the reference to the cell from any handles.
+ * This handle's reference, and any other references to the storage
+ * cell remain and IsEmpty will still return false.
+ */
+ void Dispose() {
+ HandleOperations<T>::unProtect(m_memoryManager, this);
+ m_memoryManager = 0;
+ HandleOperations<T>::deref(this);
+ HandleOperations<T>::init(this);
+ }
+
+ void Dispose(Isolate*) {
+ Dispose();
+ }
+
+ /**
+ * Make the reference to this object weak. When only weak handles
+ * refer to the object, the garbage collector will perform a
+ * callback to the given V8::WeakReferenceCallback function, passing
+ * it the object reference and the given parameters.
+ */
+ void MakeWeak(void* parameters, WeakReferenceCallback callback);
+public:
+ void *m_memoryManager;
+};
+
+
+ /**
+ * A stack-allocated class that governs a number of local handles.
+ * After a handle scope has been created, all local handles will be
+ * allocated within that handle scope until either the handle scope is
+ * deleted or another handle scope is created. If there is already a
+ * handle scope and a new one is created, all allocations will take
+ * place in the new handle scope until it is deleted. After that,
+ * new handles will again be allocated in the original handle scope.
+ *
+ * After the handle scope of a local handle has been deleted the
+ * garbage collector will no longer track the object stored in the
+ * handle and may deallocate it. The behavior of accessing a handle
+ * for which the handle scope has been deleted is undefined.
+ */
+class V8EXPORT HandleScope {
+ public:
+ HandleScope() {}
+
+ ~HandleScope() {}
+
+ /**
+ * Closes the handle scope and returns the value as a handle in the
+ * previous scope, which is the new current scope after the call.
+ */
+ template <class T> Local<T> Close(Handle<T> value) { return Local<T>::New(value); }
+};
+
+
+// --- Special objects ---
+
+
+/**
+ * The superclass of values and API object templates.
+ */
+class V8EXPORT Data : public QSharedData {
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Data)
+
+/**
+ * The origin, within a file, of a script.
+ */
+class V8EXPORT ScriptOrigin {
+public:
+ ScriptOrigin() : m_lineNumber(0), m_columnNumber(0) {}
+
+ ScriptOrigin(
+ Handle<Value> resource_name,
+ Handle<Integer> resource_line_offset = Handle<Integer>(),
+ Handle<Integer> resource_column_offset = Handle<Integer>());
+ Handle<Value> ResourceName() const;
+ Handle<Integer> ResourceLineOffset() const;
+ Handle<Integer> ResourceColumnOffset() const;
+private:
+ QString m_fileName;
+ int m_lineNumber, m_columnNumber;
+ friend class Script;
+};
+
+class ScriptData;
+
+/**
+ * A compiled JavaScript script.
+ */
+class V8EXPORT Script : public QSharedData {
+ public:
+ enum CompileFlags {
+ Default = 0x00,
+ QmlMode = 0x01,
+ NativeMode = 0x02
+ };
+
+ /**
+ * Compiles the specified script (context-independent).
+ *
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when New() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when New() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but allows data to be
+ * available to compile event handlers.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>(),
+ CompileFlags = Default);
+
+ /**
+ * Compiles the specified script using the specified file name
+ * object (typically a string) as the script's origin.
+ *
+ * \param source Script source code.
+ * \param file_name file name object (typically a string) to be used
+ * as the script's origin.
+ * \return Compiled script object (context independent; when run it
+ * will use the currently entered context).
+ */
+ static Local<Script> New(Handle<String> source,
+ Handle<Value> file_name,
+ CompileFlags = Default);
+
+ /**
+ * Compiles the specified script (bound to current context).
+ *
+ * \param source Script source code.
+ * \param origin Script origin, owned by caller, no references are kept
+ * when Compile() returns
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when Compile() returns.
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
+ */
+ static Local<Script> Compile(Handle<String> source,
+ ScriptOrigin* origin = NULL,
+ ScriptData* pre_data = NULL,
+ Handle<String> script_data = Handle<String>(),
+ CompileFlags = Default);
+
+ /**
+ * Compiles the specified script using the specified file name
+ * object (typically a string) as the script's origin.
+ *
+ * \param source Script source code.
+ * \param file_name File name to use as script's origin
+ * \param script_data Arbitrary data associated with script. Using
+ * this has same effect as calling SetData(), but makes data available
+ * earlier (i.e. to compile event handlers).
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
+ */
+ static Local<Script> Compile(Handle<String> source,
+ Handle<Value> file_name,
+ Handle<String> script_data = Handle<String>(),
+ CompileFlags = Default);
+
+ /**
+ * Runs the script returning the resulting value. If the script is
+ * context independent (created using ::New) it will be run in the
+ * currently entered context. If it is context specific (created
+ * using ::Compile) it will be run in the context in which it was
+ * compiled.
+ */
+ Local<Value> Run();
+ Local<Value> Run(Handle<Object> qml);
+
+ /**
+ * Returns the script id value.
+ */
+ Local<Value> Id();
+
+ /**
+ * Associate an additional data object with the script. This is mainly used
+ * with the debugger as this data object is only available through the
+ * debugger API.
+ */
+ void SetData(Handle<String> data);
+
+private:
+ QString m_script;
+ ScriptOrigin m_origin;
+ CompileFlags m_flags;
+ Handle<Context> m_context;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Script)
+
+/**
+ * An error message.
+ */
+class V8EXPORT Message : public QSharedData {
+ public:
+ Message(const QString &message, const QString &resourceName, int lineNumber)
+ : m_message(message), m_resourceName(resourceName), m_lineNumber(lineNumber) {}
+
+ Local<String> Get() const;
+ /**
+ * Returns the resource name for the script from where the function causing
+ * the error originates.
+ */
+ Handle<Value> GetScriptResourceName() const;
+
+ /**
+ * Returns the number, 1-based, of the line where the error occurred.
+ */
+ int GetLineNumber() const;
+
+private:
+ QString m_message;
+ QString m_resourceName;
+ int m_lineNumber;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Message)
+
+/**
+ * Representation of a JavaScript stack trace. The information collected is a
+ * snapshot of the execution stack and the information remains valid after
+ * execution continues.
+ */
+class V8EXPORT StackTrace : public QSharedData
+{
+ public:
+ /**
+ * Flags that determine what information is placed captured for each
+ * StackFrame when grabbing the current stack trace.
+ */
+ enum StackTraceOptions {
+ kLineNumber = 1,
+ kColumnOffset = 1 << 1 | kLineNumber,
+ kScriptName = 1 << 2,
+ kFunctionName = 1 << 3,
+ kIsEval = 1 << 4,
+ kIsConstructor = 1 << 5,
+ kScriptNameOrSourceURL = 1 << 6,
+ kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
+ kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
+ };
+
+ /**
+ * Returns a StackFrame at a particular index.
+ */
+ Local<StackFrame> GetFrame(uint32_t index) const;
+
+ /**
+ * Returns the number of StackFrames.
+ */
+ int GetFrameCount() const;
+
+ /**
+ * Returns StackTrace as a v8::Array that contains StackFrame objects.
+ */
+ Local<Array> AsArray();
+
+ /**
+ * Grab a snapshot of the current JavaScript execution stack.
+ *
+ * \param frame_limit The maximum number of stack frames we want to capture.
+ * \param options Enumerates the set of things we will capture for each
+ * StackFrame.
+ */
+ static Local<StackTrace> CurrentStackTrace(
+ int frame_limit,
+ StackTraceOptions options = kOverview);
+
+ private:
+ QVector<Local<StackFrame> > frames;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(StackTrace)
+
+
+/**
+ * A single JavaScript stack frame.
+ */
+class V8EXPORT StackFrame : public QSharedData {
+ public:
+ /**
+ * Returns the number, 1-based, of the line for the associate function call.
+ * This method will return Message::kNoLineNumberInfo if it is unable to
+ * retrieve the line number, or if kLineNumber was not passed as an option
+ * when capturing the StackTrace.
+ */
+ int GetLineNumber() const;
+
+ /**
+ * Returns the 1-based column offset on the line for the associated function
+ * call.
+ * This method will return Message::kNoColumnInfo if it is unable to retrieve
+ * the column number, or if kColumnOffset was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetColumn() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame.
+ */
+ Local<String> GetScriptName() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame or sourceURL value if the script name
+ * is undefined and its source ends with //@ sourceURL=... string.
+ */
+ Local<String> GetScriptNameOrSourceURL() const;
+
+ /**
+ * Returns the name of the function associated with this stack frame.
+ */
+ Local<String> GetFunctionName() const;
+
+private:
+ friend class StackTrace;
+ StackFrame(Handle<String> script, Handle<String> function, int line, int column);
+ int m_lineNumber;
+ int m_columnNumber;
+ Persistent<String> m_scriptName;
+ Persistent<String> m_functionName;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(StackFrame)
+
+// --- Value ---
+
+
+/**
+ * The superclass of all JavaScript values and objects.
+ */
+class V8EXPORT Value {
+ public:
+ /**
+ * Returns true if this value is the undefined value. See ECMA-262
+ * 4.3.10.
+ */
+ bool IsUndefined() const;
+
+ /**
+ * Returns true if this value is the null value. See ECMA-262
+ * 4.3.11.
+ */
+ bool IsNull() const;
+
+ /**
+ * Returns true if this value is true.
+ */
+ bool IsTrue() const;
+
+ /**
+ * Returns true if this value is false.
+ */
+ bool IsFalse() const;
+
+ /**
+ * Returns true if this value is an instance of the String type.
+ * See ECMA-262 8.4.
+ */
+ bool IsString() const;
+
+ /**
+ * Returns true if this value is a function.
+ */
+ bool IsFunction() const;
+
+ /**
+ * Returns true if this value is an array.
+ */
+ bool IsArray() const;
+
+ /**
+ * Returns true if this value is an object.
+ */
+ bool IsObject() const;
+
+ /**
+ * Returns true if this value is boolean.
+ */
+ bool IsBoolean() const;
+
+ /**
+ * Returns true if this value is a number.
+ */
+ bool IsNumber() const;
+
+ /**
+ * Returns true if this value is external.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsInt32() const;
+
+ /**
+ * Returns true if this value is a 32-bit unsigned integer.
+ */
+ bool IsUint32() const;
+
+ /**
+ * Returns true if this value is a Date.
+ */
+ bool IsDate() const;
+
+ /**
+ * Returns true if this value is a Boolean object.
+ */
+ bool IsBooleanObject() const;
+
+ /**
+ * Returns true if this value is a Number object.
+ */
+ bool IsNumberObject() const;
+
+ /**
+ * Returns true if this value is a String object.
+ */
+ bool IsStringObject() const;
+
+ /**
+ * Returns true if this value is a RegExp.
+ */
+ bool IsRegExp() const;
+
+ /**
+ * Returns true if this value is an Error.
+ */
+ bool IsError() const;
+
+ Local<Boolean> ToBoolean() const;
+ Local<Number> ToNumber() const;
+ Local<String> ToString() const;
+ Local<Object> ToObject() const;
+ Local<Integer> ToInteger() const;
+ Local<Uint32> ToUint32() const;
+ Local<Int32> ToInt32() const;
+
+ /**
+ * Attempts to convert a string to an array index.
+ * Returns an empty handle if the conversion fails.
+ */
+ Local<Uint32> ToArrayIndex() const;
+
+ bool BooleanValue() const;
+ double NumberValue() const;
+ int64_t IntegerValue() const;
+ uint32_t Uint32Value() const;
+ int32_t Int32Value() const;
+
+ /** JS == */
+ bool Equals(Handle<Value> that) const;
+ bool StrictEquals(Handle<Value> that) const;
+
+ static Handle<Value> NewFromInternalValue(quint64 val)
+ {
+ Handle<Value> res;
+ res.val = val;
+ return res;
+ }
+
+ QQmlJS::VM::Value vmValue() const;
+ static Handle<Value> fromVmValue(const QQmlJS::VM::Value &vmValue);
+
+};
+
+
+/**
+ * The superclass of primitive values. See ECMA-262 4.3.2.
+ */
+class V8EXPORT Primitive : public Value { };
+
+
+/**
+ * A primitive boolean value (ECMA-262, 4.3.14). Either the true
+ * or false value.
+ */
+class V8EXPORT Boolean : public Primitive {
+ public:
+ bool Value() const;
+ static Handle<Boolean> New(bool value);
+};
+
+
+/**
+ * A JavaScript string value (ECMA-262, 4.3.17).
+ */
+class V8EXPORT String : public Primitive {
+ public:
+ /**
+ * Returns the number of characters in this string.
+ */
+ int Length() const;
+
+
+ /**
+ * Returns the hash of this string.
+ */
+ uint32_t Hash() const;
+
+ struct CompleteHashData {
+ CompleteHashData() : length(0), hash(0), symbol_id(0) {}
+ int length;
+ uint32_t hash;
+ uint32_t symbol_id;
+ };
+
+ /**
+ * Returns the "complete" hash of the string. This is
+ * all the information about the string needed to implement
+ * a very efficient hash keyed on the string.
+ *
+ * The members of CompleteHashData are:
+ * length: The length of the string. Equivalent to Length()
+ * hash: The hash of the string. Equivalent to Hash()
+ * symbol_id: If the string is a sequential symbol, the symbol
+ * id, otherwise 0. If the symbol ids of two strings are
+ * the same (and non-zero) the two strings are identical.
+ * If the symbol ids are different the strings may still be
+ * identical, but an Equals() check must be performed.
+ */
+ CompleteHashData CompleteHash() const;
+
+ /**
+ * Compute a hash value for the passed UTF16 string
+ * data.
+ */
+ static uint32_t ComputeHash(uint16_t *string, int length);
+ static uint32_t ComputeHash(char *string, int length);
+
+ /**
+ * Returns true if this string is equal to the external
+ * string data provided.
+ */
+ bool Equals(uint16_t *string, int length);
+ bool Equals(char *string, int length);
+ bool Equals(Handle<Value> that) const {
+ return v8::Value::Equals(that);
+ }
+
+ /**
+ * Write the contents of the string to an external buffer.
+ * If no arguments are given, expects the buffer to be large
+ * enough to hold the entire string and NULL terminator. Copies
+ * the contents of the string and the NULL terminator into the
+ * buffer.
+ *
+ * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
+ * before the end of the buffer.
+ *
+ * Copies up to length characters into the output buffer.
+ * Only null-terminates if there is enough space in the buffer.
+ *
+ * \param buffer The buffer into which the string will be copied.
+ * \param start The starting position within the string at which
+ * copying begins.
+ * \param length The number of characters to copy from the string. For
+ * WriteUtf8 the number of bytes in the buffer.
+ * \param nchars_ref The number of characters written, can be NULL.
+ * \param options Various options that might affect performance of this or
+ * subsequent operations.
+ * \return The number of characters copied to the buffer excluding the null
+ * terminator. For WriteUtf8: The number of bytes copied to the buffer
+ * including the null terminator (if written).
+ */
+ enum WriteOptions {
+ NO_OPTIONS = 0,
+ HINT_MANY_WRITES_EXPECTED = 1,
+ NO_NULL_TERMINATION = 2,
+ PRESERVE_ASCII_NULL = 4
+ };
+
+ uint16_t GetCharacter(int index);
+
+ // 16-bit character codes.
+ int Write(uint16_t* buffer,
+ int start = 0,
+ int length = -1,
+ int options = NO_OPTIONS) const;
+
+ /**
+ * A zero length string.
+ */
+ static v8::Local<v8::String> Empty();
+ static v8::Local<v8::String> Empty(Isolate* isolate);
+
+ /**
+ * Returns true if the string is external
+ */
+ bool IsExternal() const;
+
+ class V8EXPORT ExternalStringResourceBase { // NOLINT
+ public:
+ virtual ~ExternalStringResourceBase() {}
+
+ protected:
+ ExternalStringResourceBase() {}
+
+ /**
+ * Internally V8 will call this Dispose method when the external string
+ * resource is no longer needed. The default implementation will use the
+ * delete operator. This method can be overridden in subclasses to
+ * control how allocated external string resources are disposed.
+ */
+ virtual void Dispose() { delete this; }
+
+ };
+
+ /**
+ * An ExternalStringResource is a wrapper around a two-byte string
+ * buffer that resides outside V8's heap. Implement an
+ * ExternalStringResource to manage the life cycle of the underlying
+ * buffer. Note that the string data must be immutable.
+ */
+ class V8EXPORT ExternalStringResource
+ : public ExternalStringResourceBase {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ virtual ~ExternalStringResource() {}
+
+ /**
+ * The string data from the underlying buffer.
+ */
+ virtual const uint16_t* data() const = 0;
+
+ /**
+ * The length of the string. That is, the number of two-byte characters.
+ */
+ virtual size_t length() const = 0;
+
+ protected:
+ ExternalStringResource() {}
+ };
+
+ /**
+ * Get the ExternalStringResource for an external string. Returns
+ * NULL if IsExternal() doesn't return true.
+ */
+ ExternalStringResource* GetExternalStringResource() const;
+
+ static String* Cast(v8::Value* obj);
+
+ /**
+ * Allocates a new string from either UTF-8 encoded or ASCII data.
+ * The second parameter 'length' gives the buffer length.
+ * If the data is UTF-8 encoded, the caller must
+ * be careful to supply the length parameter.
+ * If it is not given, the function calls
+ * 'strlen' to determine the buffer length, it might be
+ * wrong if 'data' contains a null character.
+ */
+ static Local<String> New(const char* data, int length = -1);
+
+ /** Allocates a new string from 16-bit character codes.*/
+ static Local<String> New(const uint16_t* data, int length = -1);
+
+ /** Creates a symbol. Returns one if it exists already.*/
+ static Local<String> NewSymbol(const char* data, int length = -1);
+
+ static Local<String> New(QQmlJS::VM::String *s);
+
+ /**
+ * Creates a new external string using the data defined in the given
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
+ */
+ static Local<String> NewExternal(ExternalStringResource* resource);
+
+ /**
+ * Converts an object to an ASCII string.
+ * Useful if you want to print the object.
+ * If conversion to a string fails (eg. due to an exception in the toString()
+ * method of the object) then the length() method returns 0 and the * operator
+ * returns NULL.
+ */
+ class V8EXPORT AsciiValue {
+ public:
+ explicit AsciiValue(Handle<v8::Value> obj);
+ ~AsciiValue() {}
+ char* operator*() { return str.data(); }
+ const char* operator*() const { return str.constData(); }
+ int length() const { return str.length(); }
+ private:
+ QByteArray str;
+
+ // Disallow copying and assigning.
+ AsciiValue(const AsciiValue&);
+ void operator=(const AsciiValue&);
+ };
+
+ /**
+ * Converts an object to a two-byte string.
+ * If conversion to a string fails (eg. due to an exception in the toString()
+ * method of the object) then the length() method returns 0 and the * operator
+ * returns NULL.
+ */
+ class V8EXPORT Value {
+ public:
+ explicit Value(Handle<v8::Value> obj);
+ ~Value() {}
+ uint16_t* operator*() { return (uint16_t *)str.data(); }
+ const uint16_t* operator*() const { return str.utf16(); }
+ int length() const { return str.length(); }
+ private:
+ QString str;
+
+ // Disallow copying and assigning.
+ Value(const Value&);
+ void operator=(const Value&);
+ };
+
+ QString asQString() const;
+ QQmlJS::VM::String *asVMString() const;
+};
+
+
+/**
+ * A JavaScript number value (ECMA-262, 4.3.20)
+ */
+class V8EXPORT Number : public Primitive {
+ public:
+ double Value() const;
+ static Local<Number> New(double value);
+ static Number* Cast(v8::Value* obj);
+};
+
+
+/**
+ * A JavaScript value representing a signed integer.
+ */
+class V8EXPORT Integer : public Number {
+ public:
+ static Local<Integer> New(int32_t value);
+ static Local<Integer> NewFromUnsigned(uint32_t value);
+ static Local<Integer> New(int32_t value, Isolate*);
+ static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
+ int64_t Value() const;
+ static Integer* Cast(v8::Value* obj);
+};
+
+
+/**
+ * A JavaScript value representing a 32-bit signed integer.
+ */
+class V8EXPORT Int32 : public Integer {
+ public:
+ int32_t Value() const;
+ private:
+ Int32();
+};
+
+
+/**
+ * A JavaScript value representing a 32-bit unsigned integer.
+ */
+class V8EXPORT Uint32 : public Integer {
+ public:
+ uint32_t Value() const;
+ private:
+ Uint32();
+};
+
+
+enum PropertyAttribute {
+ None = 0,
+ ReadOnly = 1 << 0,
+ DontEnum = 1 << 1,
+ DontDelete = 1 << 2
+};
+
+/**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See Object and ObjectTemplate's
+ * method SetAccessor.
+ */
+typedef Handle<Value> (*AccessorGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+typedef void (*AccessorSetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Access control specifications.
+ *
+ * Some accessors should be accessible across contexts. These
+ * accessors have an explicit access control parameter which specifies
+ * the kind of cross-context access that should be allowed.
+ *
+ * Additionally, for security, accessors can prohibit overwriting by
+ * accessors defined in JavaScript. For objects that have such
+ * accessors either locally or in their prototype chain it is not
+ * possible to overwrite the accessor by using __defineGetter__ or
+ * __defineSetter__ from JavaScript code.
+ */
+enum AccessControl {
+ DEFAULT = 0,
+ ALL_CAN_READ = 1,
+ ALL_CAN_WRITE = 1 << 1,
+ PROHIBITS_OVERWRITING = 1 << 2
+};
+
+
+/**
+ * A JavaScript object (ECMA-262, 4.3.3)
+ */
+class V8EXPORT Object : public Value {
+ public:
+ bool Set(Handle<Value> key,
+ Handle<Value> value,
+ PropertyAttribute attribs = None);
+
+ bool Set(uint32_t index,
+ Handle<Value> value);
+
+ Local<Value> Get(Handle<Value> key);
+
+ Local<Value> Get(uint32_t index);
+
+ // TODO(1245389): Replace the type-specific versions of these
+ // functions with generic ones that accept a Handle<Value> key.
+ bool Has(Handle<String> key);
+
+ bool Delete(Handle<String> key);
+
+ bool Has(uint32_t index);
+
+ bool Delete(uint32_t index);
+
+ bool SetAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
+
+ /**
+ * Returns an array containing the names of the enumerable properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ Local<Array> GetPropertyNames();
+
+ /**
+ * This function has the same functionality as GetPropertyNames but
+ * the returned array doesn't contain the names of properties from
+ * prototype objects.
+ */
+ Local<Array> GetOwnPropertyNames();
+
+ /**
+ * Get the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ Local<Value> GetPrototype();
+
+ /**
+ * Set the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ bool SetPrototype(Handle<Value> prototype);
+
+ /** Gets the value in an internal field. */
+ Local<Value> GetInternalField(int index);
+ /** Sets the value in an internal field. */
+ void SetInternalField(int index, Handle<Value> value);
+
+ class V8EXPORT ExternalResource { // NOLINT
+ public:
+ ExternalResource() {}
+ virtual ~ExternalResource() {}
+
+ virtual void Dispose() { delete this; }
+
+ private:
+ // Disallow copying and assigning.
+ ExternalResource(const ExternalResource&);
+ void operator=(const ExternalResource&);
+ };
+
+ void SetExternalResource(ExternalResource *);
+ ExternalResource *GetExternalResource();
+
+ // Testers for local properties.
+ bool HasOwnProperty(Handle<String> key);
+
+ /**
+ * Returns the identity hash for this object. The current implementation
+ * uses a hidden property on the object to store the identity hash.
+ *
+ * The return value will never be 0. Also, it is not guaranteed to be
+ * unique.
+ */
+ int GetIdentityHash();
+
+ /**
+ * Access hidden properties on JavaScript objects. These properties are
+ * hidden from the executing JavaScript and only accessible through the V8
+ * C++ API. Hidden properties introduced by V8 internally (for example the
+ * identity hash) are prefixed with "v8::".
+ */
+ bool SetHiddenValue(Handle<String> key, Handle<Value> value);
+ Local<Value> GetHiddenValue(Handle<String> key);
+
+ /**
+ * Clone this object with a fast but shallow copy. Values will point
+ * to the same values as the original object.
+ */
+ Local<Object> Clone();
+
+
+ /**
+ * Checks whether a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * When an Object is callable this method returns true.
+ */
+ bool IsCallable();
+
+ /**
+ * Call an Object as a function if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ */
+ Local<Value> CallAsFunction(Handle<Object> recv,
+ int argc,
+ Handle<Value> argv[]);
+
+ /**
+ * Call an Object as a constructor if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * Note: This method behaves like the Function::NewInstance method.
+ */
+ Local<Value> CallAsConstructor(int argc,
+ Handle<Value> argv[]);
+
+ static Local<Object> New();
+ static Object* Cast(Value* obj);
+};
+
+
+/**
+ * An instance of the built-in array constructor (ECMA-262, 15.4.2).
+ */
+class V8EXPORT Array : public Object {
+ public:
+ uint32_t Length() const;
+
+ /**
+ * Creates a JavaScript array with the given length. If the length
+ * is negative the returned array will have length 0.
+ */
+ static Local<Array> New(int length = 0);
+
+ static Array* Cast(Value* obj);
+};
+
+
+/**
+ * A JavaScript function object (ECMA-262, 15.3).
+ */
+class V8EXPORT Function : public Object {
+ public:
+ Local<Object> NewInstance() const;
+ Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
+ Local<Value> Call(Handle<Object> recv,
+ int argc,
+ Handle<Value> argv[]);
+ Handle<Value> GetName() const;
+
+ ScriptOrigin GetScriptOrigin() const;
+ static Function* Cast(Value* obj);
+};
+
+
+/**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class V8EXPORT Date : public Object {
+ public:
+ static Local<Value> New(double time);
+
+ /**
+ * A specialization of Value::NumberValue that is more efficient
+ * because we know the structure of this object.
+ */
+ double NumberValue() const;
+
+ static Date* Cast(v8::Value* obj);
+
+ /**
+ * Notification that the embedder has changed the time zone,
+ * daylight savings time, or other date / time configuration
+ * parameters. V8 keeps a cache of various values used for
+ * date / time computation. This notification will reset
+ * those cached values for the current context so that date /
+ * time configuration changes would be reflected in the Date
+ * object.
+ *
+ * This API should not be called more than needed as it will
+ * negatively impact the performance of date operations.
+ */
+ static void DateTimeConfigurationChangeNotification();
+
+};
+
+
+/**
+ * A Number object (ECMA-262, 4.3.21).
+ */
+class V8EXPORT NumberObject : public Object {
+ public:
+ static Local<Value> New(double value);
+
+ /**
+ * Returns the Number held by the object.
+ */
+ double NumberValue() const;
+
+ static NumberObject* Cast(v8::Value* obj);
+
+};
+
+
+/**
+ * A Boolean object (ECMA-262, 4.3.15).
+ */
+class V8EXPORT BooleanObject : public Object {
+ public:
+ static Local<Value> New(bool value);
+
+ /**
+ * Returns the Boolean held by the object.
+ */
+ bool BooleanValue() const;
+
+ static BooleanObject* Cast(v8::Value* obj);
+
+};
+
+
+/**
+ * A String object (ECMA-262, 4.3.18).
+ */
+class V8EXPORT StringObject : public Object {
+ public:
+ static Local<Value> New(Handle<String> value);
+
+ /**
+ * Returns the String held by the object.
+ */
+ Local<String> StringValue() const;
+
+ static StringObject* Cast(v8::Value* obj);
+
+};
+
+
+/**
+ * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
+ */
+class V8EXPORT RegExp : public Object {
+ public:
+ /**
+ * Regular expression flag bits. They can be or'ed to enable a set
+ * of flags.
+ */
+ enum Flags {
+ kNone = 0,
+ kGlobal = 1,
+ kIgnoreCase = 2,
+ kMultiline = 4
+ };
+
+ /**
+ * Creates a regular expression from the given pattern string and
+ * the flags bit field. May throw a JavaScript exception as
+ * described in ECMA-262, 15.10.4.1.
+ *
+ * For example,
+ * RegExp::New(v8::String::New("foo"),
+ * static_cast<RegExp::Flags>(kGlobal | kMultiline))
+ * is equivalent to evaluating "/foo/gm".
+ */
+ static Local<RegExp> New(Handle<String> pattern,
+ Flags flags);
+
+ /**
+ * Returns the value of the source property: a string representing
+ * the regular expression.
+ */
+ Local<String> GetSource() const;
+
+ /**
+ * Returns the flags bit field.
+ */
+ Flags GetFlags() const;
+
+ static RegExp* Cast(v8::Value* obj);
+
+};
+
+
+/**
+ * A JavaScript value that wraps a C++ void*. This type of value is
+ * mainly used to associate C++ data structures with JavaScript
+ * objects.
+ *
+ * The Wrap function V8 will return the most optimal Value object wrapping the
+ * C++ void*. The type of the value is not guaranteed to be an External object
+ * and no assumptions about its type should be made. To access the wrapped
+ * value Unwrap should be used, all other operations on that object will lead
+ * to unpredictable results.
+ */
+class V8EXPORT External : public Value {
+ public:
+ static Local<Value> Wrap(void* data);
+ static void* Unwrap(Handle<Value> obj);
+
+ static Local<External> New(void* value);
+ static External* Cast(Value* obj);
+ void* Value() const;
+};
+
+
+// --- Templates ---
+
+
+/**
+ * The superclass of object and function templates.
+ */
+class V8EXPORT Template : public Data {
+ public:
+ /** Adds a property to each instance created by this template.*/
+ void Set(Handle<String> name, Handle<Value> value,
+ PropertyAttribute attributes = None);
+ void Set(const char* name, Handle<Value> value);
+
+ struct Property {
+ Persistent<String> name;
+ Persistent<Value> value;
+ PropertyAttribute attributes;
+ };
+ QVector<Property> m_properties;
+ };
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Template)
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about the context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+class V8EXPORT Arguments {
+ public:
+ Arguments(const QQmlJS::VM::Value *args, int argc, const QQmlJS::VM::Value &thisObject, bool isConstructor,
+ const Persistent<Value> &data);
+ int Length() const;
+ Local<Value> operator[](int i) const;
+ Local<Object> This() const;
+ Local<Object> Holder() const;
+ bool IsConstructCall() const;
+ Local<Value> Data() const;
+ Isolate* GetIsolate() const;
+
+private:
+ QVector<Persistent<Value> > m_args;
+ Persistent<Object> m_thisObject;
+ bool m_isConstructor;
+ Persistent<Value> m_data;
+};
+
+
+/**
+ * The information passed to an accessor callback about the context
+ * of the property access.
+ */
+class V8EXPORT AccessorInfo {
+ public:
+ AccessorInfo(const QQmlJS::VM::Value &thisObject, const Persistent<Value> &data);
+ Isolate* GetIsolate() const;
+ Local<Value> Data() const;
+ Local<Object> This() const;
+ Local<Object> Holder() const;
+private:
+ Persistent<Value> m_this;
+ Persistent<Value> m_data;
+};
+
+
+typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
+
+/**
+ * NamedProperty[Getter|Setter] are used as interceptors on object.
+ * See ObjectTemplate::SetNamedPropertyHandler.
+ */
+typedef Handle<Value> (*NamedPropertyGetter)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*NamedPropertySetter)(Local<String> property,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is an integer encoding property attributes (like v8::None,
+ * v8::DontEnum, etc.)
+ */
+typedef Handle<Integer> (*NamedPropertyQuery)(Local<String> property,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * The return value is true if the property could be deleted and false
+ * otherwise.
+ */
+typedef Handle<Boolean> (*NamedPropertyDeleter)(Local<String> property,
+ const AccessorInfo& info);
+
+/**
+ * Returns an array containing the names of the properties the named
+ * property getter intercepts.
+ */
+typedef Handle<Array> (*NamedPropertyEnumerator)(const AccessorInfo& info);
+
+
+/**
+ * Returns the value of the property if the getter intercepts the
+ * request. Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*IndexedPropertyGetter)(uint32_t index,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
+ Local<Value> value,
+ const AccessorInfo& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is an integer encoding property attributes.
+ */
+typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
+ const AccessorInfo& info);
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * The return value is true if the property could be deleted and false
+ * otherwise.
+ */
+typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
+ const AccessorInfo& info);
+
+/**
+ * Returns an array containing the indices of the properties the
+ * indexed property getter intercepts.
+ */
+typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
+
+
+/**
+ * A FunctionTemplate is used to create functions at runtime. There
+ * can only be one function created from a FunctionTemplate in a
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
+ *
+ * A FunctionTemplate can have properties, these properties are added to the
+ * function object when it is created.
+ *
+ * A FunctionTemplate has a corresponding instance template which is
+ * used to create object instances when the function is used as a
+ * constructor. Properties added to the instance template are added to
+ * each object instance.
+ *
+ * A FunctionTemplate can have a prototype template. The prototype template
+ * is used to create the prototype object of the function.
+ *
+ * The following example shows how to use a FunctionTemplate:
+ *
+ * \code
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+ * t->Set("func_property", v8::Number::New(1));
+ *
+ * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
+ * proto_t->Set("proto_method", v8::FunctionTemplate::New(InvokeCallback));
+ * proto_t->Set("proto_const", v8::Number::New(2));
+ *
+ * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
+ * instance_t->SetAccessor("instance_accessor", InstanceAccessorCallback);
+ * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback, ...);
+ * instance_t->Set("instance_property", Number::New(3));
+ *
+ * v8::Local<v8::Function> function = t->GetFunction();
+ * v8::Local<v8::Object> instance = function->NewInstance();
+ * \endcode
+ *
+ * Let's use "function" as the JS variable name of the function object
+ * and "instance" for the instance object created above. The function
+ * and the instance will have the following properties:
+ *
+ * \code
+ * func_property in function == true;
+ * function.func_property == 1;
+ *
+ * function.prototype.proto_method() invokes 'InvokeCallback'
+ * function.prototype.proto_const == 2;
+ *
+ * instance instanceof function == true;
+ * instance.instance_accessor calls 'InstanceAccessorCallback'
+ * instance.instance_property == 3;
+ * \endcode
+ *
+ * A FunctionTemplate can inherit from another one by calling the
+ * FunctionTemplate::Inherit method. The following graph illustrates
+ * the semantics of inheritance:
+ *
+ * \code
+ * FunctionTemplate Parent -> Parent() . prototype -> { }
+ * ^ ^
+ * | Inherit(Parent) | .__proto__
+ * | |
+ * FunctionTemplate Child -> Child() . prototype -> { }
+ * \endcode
+ *
+ * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
+ * object of the Child() function has __proto__ pointing to the
+ * Parent() function's prototype object. An instance of the Child
+ * function has all properties on Parent's instance templates.
+ *
+ * Let Parent be the FunctionTemplate initialized in the previous
+ * section and create a Child FunctionTemplate by:
+ *
+ * \code
+ * Local<FunctionTemplate> parent = t;
+ * Local<FunctionTemplate> child = FunctionTemplate::New();
+ * child->Inherit(parent);
+ *
+ * Local<Function> child_function = child->GetFunction();
+ * Local<Object> child_instance = child_function->NewInstance();
+ * \endcode
+ *
+ * The Child function and Child instance will have the following
+ * properties:
+ *
+ * \code
+ * child_func.prototype.__proto__ == function.prototype;
+ * child_instance.instance_accessor calls 'InstanceAccessorCallback'
+ * child_instance.instance_property == 3;
+ * \endcode
+ */
+class V8EXPORT FunctionTemplate : public Template {
+ public:
+ /** Creates a function template.*/
+ static Local<FunctionTemplate> New(
+ InvocationCallback callback = 0,
+ Handle<Value> data = Handle<Value>());
+ /** Returns the unique function instance in the current execution context.*/
+ Local<Function> GetFunction();
+
+ /** Get the InstanceTemplate. */
+ Local<ObjectTemplate> InstanceTemplate();
+
+ /**
+ * A PrototypeTemplate is the template used to create the prototype object
+ * of the function created by this template.
+ */
+ Local<ObjectTemplate> PrototypeTemplate();
+
+private:
+ FunctionTemplate(InvocationCallback callback, Handle<Value> data);
+ friend class V4V8Function;
+ InvocationCallback m_callback;
+ Persistent<Value> m_data;
+ Local<ObjectTemplate> m_instanceTemplate;
+ Local<ObjectTemplate> m_prototypeTemplate;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(FunctionTemplate)
+
+
+/**
+ * An ObjectTemplate is used to create objects at runtime.
+ *
+ * Properties added to an ObjectTemplate are added to each object
+ * created from the ObjectTemplate.
+ */
+class V8EXPORT ObjectTemplate : public Template {
+ public:
+ /** Creates an ObjectTemplate. */
+ static Local<ObjectTemplate> New();
+
+ /** Creates a new instance of this template.*/
+ Local<Object> NewInstance();
+
+ /**
+ * Sets an accessor on the object template.
+ *
+ * Whenever the property with the given name is accessed on objects
+ * created from this ObjectTemplate the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
+
+ /**
+ * Sets a named property handler on the object template.
+ *
+ * Whenever a named property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * \param getter The callback to invoke when getting a property.
+ * \param setter The callback to invoke when setting a property.
+ * \param query The callback to invoke to check if a property is present,
+ * and if present, get its attributes.
+ * \param deleter The callback to invoke when deleting a property.
+ * \param enumerator The callback to invoke to enumerate all the named
+ * properties of an object.
+ * \param data A piece of data that will be passed to the callbacks
+ * whenever they are invoked.
+ */
+ void SetNamedPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter = 0,
+ NamedPropertyQuery query = 0,
+ NamedPropertyDeleter deleter = 0,
+ NamedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+ void SetFallbackPropertyHandler(NamedPropertyGetter getter,
+ NamedPropertySetter setter = 0,
+ NamedPropertyQuery query = 0,
+ NamedPropertyDeleter deleter = 0,
+ NamedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * \param getter The callback to invoke when getting a property.
+ * \param setter The callback to invoke when setting a property.
+ * \param query The callback to invoke to check if an object has a property.
+ * \param deleter The callback to invoke when deleting a property.
+ * \param enumerator The callback to invoke to enumerate all the indexed
+ * properties of an object.
+ * \param data A piece of data that will be passed to the callbacks
+ * whenever they are invoked.
+ */
+ void SetIndexedPropertyHandler(IndexedPropertyGetter getter,
+ IndexedPropertySetter setter = 0,
+ IndexedPropertyQuery query = 0,
+ IndexedPropertyDeleter deleter = 0,
+ IndexedPropertyEnumerator enumerator = 0,
+ Handle<Value> data = Handle<Value>());
+
+ /**
+ * Gets the number of internal fields for objects generated from
+ * this template.
+ */
+ int InternalFieldCount();
+
+ /**
+ * Sets the number of internal fields for objects generated from
+ * this template.
+ */
+ void SetInternalFieldCount(int value);
+
+ /**
+ * Sets whether the object can store an "external resource" object.
+ */
+ bool HasExternalResource();
+ void SetHasExternalResource(bool value);
+
+ /**
+ * Mark object instances of the template as using the user object
+ * comparison callback.
+ */
+ void MarkAsUseUserObjectComparison();
+
+ struct Accessor {
+ Persistent<Value> getter;
+ Persistent<Value> setter;
+ Persistent<String> name;
+ PropertyAttribute attribute;
+ };
+
+ QVector<Accessor> m_accessors;
+
+ NamedPropertyGetter m_namedPropertyGetter;
+ NamedPropertySetter m_namedPropertySetter;
+ NamedPropertyQuery m_namedPropertyQuery;
+ NamedPropertyDeleter m_namedPropertyDeleter;
+ NamedPropertyEnumerator m_namedPropertyEnumerator;
+ Persistent<Value> m_namedPropertyData;
+
+ NamedPropertyGetter m_fallbackPropertyGetter;
+ NamedPropertySetter m_fallbackPropertySetter;
+ NamedPropertyQuery m_fallbackPropertyQuery;
+ NamedPropertyDeleter m_fallbackPropertyDeleter;
+ NamedPropertyEnumerator m_fallbackPropertyEnumerator;
+ Persistent<Value> m_fallbackPropertyData;
+
+ IndexedPropertyGetter m_indexedPropertyGetter;
+ IndexedPropertySetter m_indexedPropertySetter;
+ IndexedPropertyQuery m_indexedPropertyQuery;
+ IndexedPropertyDeleter m_indexedPropertyDeleter;
+ IndexedPropertyEnumerator m_indexedPropertyEnumerator;
+ Persistent<Value> m_indexedPropertyData;
+
+ bool m_useUserComparison;
+ private:
+ ObjectTemplate();
+ };
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(ObjectTemplate)
+
+// --- Statics ---
+
+
+Handle<Primitive> V8EXPORT Undefined();
+Handle<Primitive> V8EXPORT Null();
+Handle<Boolean> V8EXPORT True();
+Handle<Boolean> V8EXPORT False();
+
+inline Handle<Primitive> Undefined(Isolate*) { return Undefined(); }
+inline Handle<Primitive> Null(Isolate*) { return Null(); }
+inline Handle<Boolean> True(Isolate*) { return True(); }
+inline Handle<Boolean> False(Isolate*) { return False(); }
+
+
+
+// --- Exceptions ---
+
+
+/**
+ * Schedules an exception to be thrown when returning to JavaScript. When an
+ * exception has been scheduled it is illegal to invoke any JavaScript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke JavaScript operations.
+ */
+Handle<Value> V8EXPORT ThrowException(Handle<Value> exception);
+
+/**
+ * Create new error objects by calling the corresponding error object
+ * constructor with the message.
+ */
+class V8EXPORT Exception {
+ public:
+ static Local<Value> ReferenceError(Handle<String> message);
+ static Local<Value> SyntaxError(Handle<String> message);
+ static Local<Value> TypeError(Handle<String> message);
+ static Local<Value> Error(Handle<String> message);
+};
+
+
+// --- User Object Comparison Callback ---
+typedef bool (*UserObjectComparisonCallback)(Local<Object> lhs,
+ Local<Object> rhs);
+
+// --- Garbage Collection Callbacks ---
+
+/**
+ * Applications can register callback functions which will be called
+ * before and after a garbage collection. Allocations are not
+ * allowed in the callback functions, you therefore cannot manipulate
+ * objects (set or delete properties for example) since it is possible
+ * such operations will result in the allocation of objects.
+ */
+enum GCType {
+ kGCTypeScavenge = 1 << 0,
+ kGCTypeMarkSweepCompact = 1 << 1,
+ kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
+};
+
+enum GCCallbackFlags {
+ kNoGCCallbackFlags = 0,
+ kGCCallbackFlagCompacted = 1 << 0
+};
+
+typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
+typedef void (*GCCallback)();
+
+
+
+/**
+ * Isolate represents an isolated instance of the V8 engine. V8
+ * isolates have completely separate states. Objects from one isolate
+ * must not be used in other isolates. When V8 is initialized a
+ * default isolate is implicitly created and entered. The embedder
+ * can create additional isolates and use them in parallel in multiple
+ * threads. An isolate can be entered by at most one thread at any
+ * given time. The Locker/Unlocker API must be used to synchronize.
+ */
+class V8EXPORT Isolate {
+ public:
+ Isolate();
+ ~Isolate();
+ /**
+ * Stack-allocated class which sets the isolate for all operations
+ * executed within a local scope.
+ */
+ class V8EXPORT Scope {
+ public:
+ explicit Scope(Isolate* isolate) : isolate_(isolate) {
+ isolate->Enter();
+ }
+
+ ~Scope() { isolate_->Exit(); }
+
+ private:
+ Isolate* const isolate_;
+
+ // Prevent copying of Scope objects.
+ Scope(const Scope&);
+ Scope& operator=(const Scope&);
+ };
+
+ /**
+ * Creates a new isolate. Does not change the currently entered
+ * isolate.
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ */
+ static Isolate* New();
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ */
+ static Isolate* GetCurrent();
+
+ /**
+ * Methods below this point require holding a lock (using Locker) in
+ * a multi-threaded environment.
+ */
+
+ /**
+ * Sets this isolate as the entered one for the current thread.
+ * Saves the previously entered one (if any), so that it can be
+ * restored when exiting. Re-entering an isolate is allowed.
+ */
+ void Enter();
+
+ /**
+ * Exits this isolate by restoring the previously entered one in the
+ * current thread. The isolate may still stay the same, if it was
+ * entered more than once.
+ *
+ * Requires: this == Isolate::GetCurrent().
+ */
+ void Exit();
+
+ /**
+ * Disposes the isolate. The isolate must not be entered by any
+ * thread to be disposable.
+ */
+ void Dispose();
+
+ /**
+ * Associate embedder-specific data with the isolate
+ */
+ void SetData(void* data);
+
+ /**
+ * Retrieve embedder-specific data from the isolate.
+ * Returns NULL if SetData has never been called.
+ */
+ void* GetData();
+
+ Context *GetCurrentContext() { return m_contextStack.top(); }
+ void setException(const QQmlJS::VM::Value &ex);
+
+ private:
+ friend class Context;
+ friend class TryCatch;
+ Isolate* m_lastIsolate;
+ QStack<Context*> m_contextStack;
+ TryCatch *tryCatch;
+};
+
+
+/**
+ * Container class for static utility functions.
+ */
+class V8EXPORT V8 {
+ public:
+
+ /**
+ * Sets V8 flags from a string.
+ */
+ static void SetFlagsFromString(const char* str, int length);
+
+ /** Callback for user object comparisons */
+ static void SetUserObjectComparisonCallbackFunction(UserObjectComparisonCallback);
+
+ /**
+ * Enables the host application to receive a notification before a
+ * garbage collection. Allocations are not allowed in the
+ * callback function, you therefore cannot manipulate objects (set
+ * or delete properties for example) since it is possible such
+ * operations will result in the allocation of objects. It is possible
+ * to specify the GCType filter for your callback. But it is not possible to
+ * register the same callback function two times with different
+ * GCType filters.
+ */
+ static void AddGCPrologueCallback(
+ GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCPrologueCallback function.
+ */
+ static void RemoveGCPrologueCallback(GCPrologueCallback callback);
+
+ /**
+ * Allows the host application to declare implicit references between
+ * the objects: if |parent| is alive, all |children| are alive too.
+ * After each garbage collection, all implicit references
+ * are removed. It is intended to be used in the before-garbage-collection
+ * callback function.
+ */
+ static void AddImplicitReferences(Persistent<Object> parent,
+ Persistent<Value>* children,
+ size_t length);
+
+ /**
+ * Initializes from snapshot if possible. Otherwise, attempts to
+ * initialize from scratch. This function is called implicitly if
+ * you use the API without calling it first.
+ */
+ static bool Initialize();
+
+ /**
+ * Releases any resources used by v8 and stops any utility threads
+ * that may be running. Note that disposing v8 is permanent, it
+ * cannot be reinitialized.
+ *
+ * It should generally not be necessary to dispose v8 before exiting
+ * a process, this should happen automatically. It is only necessary
+ * to use if the process needs the resources taken up by v8.
+ */
+ static bool Dispose();
+
+ /**
+ * Optional notification that the embedder is idle.
+ * V8 uses the notification to reduce memory footprint.
+ * This call can be used repeatedly if the embedder remains idle.
+ * Returns true if the embedder should stop calling IdleNotification
+ * until real work has been done. This indicates that V8 has done
+ * as much cleanup as it will be able to do.
+ *
+ * The hint argument specifies the amount of work to be done in the function
+ * on scale from 1 to 1000. There is no guarantee that the actual work will
+ * match the hint.
+ */
+ static bool IdleNotification(int hint = 1000);
+
+ /**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to attempt to free memory.
+ */
+ static void LowMemoryNotification();
+};
+
+/**
+ * An external exception handler.
+ */
+class V8EXPORT TryCatch {
+ public:
+ /**
+ * Creates a new try/catch block and registers it with v8.
+ */
+ TryCatch();
+
+ /**
+ * Unregisters and deletes this try/catch block.
+ */
+ ~TryCatch();
+
+ /**
+ * Returns true if an exception has been caught by this try/catch block.
+ */
+ bool HasCaught() const;
+
+ /**
+ * Throws the exception caught by this TryCatch in a way that avoids
+ * it being caught again by this same TryCatch. As with ThrowException
+ * it is illegal to execute any JavaScript operations after calling
+ * ReThrow; the caller must return immediately to where the exception
+ * is caught.
+ */
+ Handle<Value> ReThrow();
+
+ /**
+ * Returns the exception caught by this try/catch block. If no exception has
+ * been caught an empty handle is returned.
+ *
+ * The returned handle is valid until this TryCatch block has been destroyed.
+ */
+ Local<Value> Exception() const;
+
+ /**
+ * Returns the message associated with this exception. If there is
+ * no message associated an empty handle is returned.
+ *
+ * The returned handle is valid until this TryCatch block has been
+ * destroyed.
+ */
+ Local<v8::Message> Message() const;
+
+ /**
+ * Clears any exceptions that may have been caught by this try/catch block.
+ * After this method has been called, HasCaught() will return false.
+ *
+ * It is not necessary to clear a try/catch block before using it again; if
+ * another exception is thrown the previously caught exception will just be
+ * overwritten. However, it is often a good idea since it makes it easier
+ * to determine which operation threw a given exception.
+ */
+ void Reset();
+
+private:
+ friend class Isolate;
+ TryCatch *parent;
+ bool hasCaughtException;
+ Local<Value> exception;
+};
+
+
+// --- Context ---
+class V8EXPORT ExtensionConfiguration;
+
+/**
+ * A sandboxed execution context with its own set of built-in objects
+ * and functions.
+ */
+class V8EXPORT Context : public QSharedData {
+ public:
+ Context();
+ ~Context();
+
+ static Local<Context> Adopt(Context *p)
+ {
+ Local<Context> l;
+ l.object = p;
+ l.object->ref.ref();
+ return l;
+ }
+ /**
+ * Returns the global proxy object or global object itself for
+ * detached contexts.
+ *
+ * Global proxy object is a thin wrapper whose prototype points to
+ * actual context's global object with the properties like Object, etc.
+ * This is done that way for security reasons (for more details see
+ * https://wiki.mozilla.org/Gecko:SplitWindow).
+ *
+ * Please note that changes to global proxy object prototype most probably
+ * would break VM---v8 expects only global object as a prototype of
+ * global proxy object.
+ *
+ * If DetachGlobal() has been invoked, Global() would return actual global
+ * object until global is reattached with ReattachGlobal().
+ */
+ Local<Object> Global();
+
+ /** Creates a new context.
+ *
+ * Returns a persistent handle to the newly allocated context. This
+ * persistent handle has to be disposed when the context is no
+ * longer used so the context can be garbage collected.
+ *
+ * \param extensions An optional extension configuration containing
+ * the extensions to be installed in the newly created context.
+ *
+ * \param global_template An optional object template from which the
+ * global object for the newly created context will be created.
+ *
+ * \param global_object An optional global object to be reused for
+ * the newly created context. This global object must have been
+ * created by a previous call to Context::New with the same global
+ * template. The state of the global object will be completely reset
+ * and only object identify will remain.
+ */
+ static Persistent<Context> New(
+ ExtensionConfiguration* extensions = NULL,
+ Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
+ Handle<Value> global_object = Handle<Value>());
+
+ /** Returns the context that is on the top of the stack. */
+ static Local<Context> GetCurrent();
+
+ /**
+ * Returns the context of the calling JavaScript code. That is the
+ * context of the top-most JavaScript frame. If there are no
+ * JavaScript frames an empty handle is returned.
+ */
+ static Local<Context> GetCalling();
+ static Local<Object> GetCallingQmlGlobal();
+ static Local<Value> GetCallingScriptData();
+
+ /**
+ * Enter this context. After entering a context, all code compiled
+ * and run is compiled and run in this context. If another context
+ * is already entered, this old context is saved so it can be
+ * restored when the new context is exited.
+ */
+ void Enter();
+
+ /**
+ * Exit this context. Exiting the current context restores the
+ * context that was in place when entering the current context.
+ */
+ void Exit();
+
+ /**
+ * Associate an additional data object with the context. This is mainly used
+ * with the debugger to provide additional information on the context through
+ * the debugger API.
+ */
+ void SetData(Handle<Value> data);
+ Local<Value> GetData();
+
+ /**
+ * Stack-allocated class which sets the execution context for all
+ * operations executed within a local scope.
+ */
+ class Scope {
+ public:
+ explicit Scope(Handle<Context> context) : context_(context) {
+ context_->Enter();
+ }
+ ~Scope() { context_->Exit(); }
+ private:
+ Handle<Context> context_;
+ };
+
+ QQmlJS::VM::ExecutionEngine *GetEngine();
+
+private:
+ Context* m_lastContext;
+ struct Private;
+ Private *d;
+ friend class Value;
+ friend class Script;
+ friend class Object;
+ friend class Function;
+};
+
+DEFINE_REFCOUNTED_HANDLE_OPERATIONS(Context)
+
+template<typename T>
+void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback)
+{
+ Q_UNUSED(parameters);
+ Q_UNUSED(callback);
+
+ Q_UNIMPLEMENTED();
+}
+
+
+
+} // namespace v8
+
+
+#undef V8EXPORT
+#undef TYPE_CHECK
+
+
+#endif // V8_H_
diff --git a/src/qml/qml/v4vm/qv4value.cpp b/src/qml/qml/v4vm/qv4value.cpp
new file mode 100644
index 0000000000..73efe7d22d
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4value.cpp
@@ -0,0 +1,214 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#include <qv4engine.h>
+#include <qv4object.h>
+#include <qv4objectproto.h>
+#include "qv4mm.h"
+
+#include <wtf/MathExtras.h>
+
+namespace QQmlJS {
+namespace VM {
+
+
+int Value::toUInt16() const
+{
+ if (isConvertibleToInt())
+ return (ushort)(uint)integerValue();
+
+ double number = __qmljs_to_number(*this);
+
+ double D16 = 65536.0;
+ if ((number >= 0 && number < D16))
+ return static_cast<ushort>(number);
+
+ if (!std::isfinite(number))
+ return +0;
+
+ double d = ::floor(::fabs(number));
+ if (std::signbit(number))
+ d = -d;
+
+ number = ::fmod(d , D16);
+
+ if (number < 0)
+ number += D16;
+
+ return (unsigned short)number;
+}
+
+double Value::toInteger() const
+{
+ if (isConvertibleToInt())
+ return int_32;
+
+ return Value::toInteger(__qmljs_to_number(*this));
+}
+
+double Value::toNumber() const
+{
+ return __qmljs_to_number(*this);
+}
+
+bool Value::sameValue(Value other) const {
+ if (val == other.val)
+ return true;
+ if (isString() && other.isString())
+ return stringValue()->isEqualTo(other.stringValue());
+ if (isInteger())
+ return int_32 ? (double(int_32) == other.dbl) : (other.val == 0);
+ if (other.isInteger())
+ return other.int_32 ? (dbl == double(other.int_32)) : (val == 0);
+ return false;
+}
+
+Value Value::fromString(ExecutionContext *ctx, const QString &s)
+{
+ return fromString(ctx->engine->newString(s));
+}
+
+int Value::toInt32(double number)
+{
+ const double D32 = 4294967296.0;
+ const double D31 = D32 / 2.0;
+
+ if ((number >= -D31 && number < D31))
+ return static_cast<int>(number);
+
+
+ if (!std::isfinite(number))
+ return 0;
+
+ double d = ::floor(::fabs(number));
+ if (std::signbit(number))
+ d = -d;
+
+ number = ::fmod(d , D32);
+
+ if (number < -D31)
+ number += D32;
+ else if (number >= D31)
+ number -= D32;
+
+ return int(number);
+}
+
+unsigned int Value::toUInt32(double number)
+{
+ const double D32 = 4294967296.0;
+ if ((number >= 0 && number < D32))
+ return static_cast<uint>(number);
+
+ if (!std::isfinite(number))
+ return +0;
+
+ double d = ::floor(::fabs(number));
+ if (std::signbit(number))
+ d = -d;
+
+ number = ::fmod(d , D32);
+
+ if (number < 0)
+ number += D32;
+
+ return unsigned(number);
+}
+
+double Value::toInteger(double number)
+{
+ if (isnan(number))
+ return +0;
+ else if (! number || isinf(number))
+ return number;
+ const double v = floor(fabs(number));
+ return std::signbit(number) ? -v : v;
+}
+
+Value Value::property(ExecutionContext *ctx, String *name) const
+{
+ return isObject() ? objectValue()->get(ctx, name) : undefinedValue();
+}
+
+PersistentValue::PersistentValue()
+ : m_memoryManager(0)
+ , m_value(Value::undefinedValue())
+{
+}
+
+PersistentValue::PersistentValue(MemoryManager *mm, const Value &val)
+ : m_memoryManager(mm)
+ , m_value(val)
+{
+ assert(mm);
+ if (Managed *m = asManaged())
+ m_memoryManager->protect(m);
+}
+
+PersistentValue::PersistentValue(const PersistentValue &other)
+ : m_memoryManager(other.m_memoryManager)
+ , m_value(other.m_value)
+{
+ if (Managed *m = asManaged())
+ m_memoryManager->protect(m);
+}
+
+PersistentValue &PersistentValue::operator=(const PersistentValue &other)
+{
+ if (this == &other)
+ return *this;
+ if (Managed *m = asManaged())
+ m_memoryManager->unprotect(m);
+ m_memoryManager = other.m_memoryManager;
+ m_value = other.m_value;
+ if (Managed *m = asManaged())
+ m_memoryManager->protect(m);
+}
+
+PersistentValue::~PersistentValue()
+{
+ if (Managed *m = asManaged())
+ m_memoryManager->unprotect(m);
+}
+
+
+
+} // namespace VM
+} // namespace QQmlJS
diff --git a/src/qml/qml/v4vm/qv4value.h b/src/qml/qml/v4vm/qv4value.h
new file mode 100644
index 0000000000..bbfba842b5
--- /dev/null
+++ b/src/qml/qml/v4vm/qv4value.h
@@ -0,0 +1,572 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+#ifndef QMLJS_VALUE_H
+#define QMLJS_VALUE_H
+
+#include <QtCore/QString>
+#include <QtCore/qnumeric.h>
+#include "qv4global.h"
+#include "qv4string.h"
+#include <QtCore/QDebug>
+#include "qv4managed.h"
+
+#include <wtf/MathExtras.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace VM {
+
+struct String;
+struct ExecutionContext;
+struct ExecutionEngine;
+struct Value;
+
+extern "C" {
+double __qmljs_to_number(const Value &value);
+Q_V4_EXPORT String *__qmljs_convert_to_string(ExecutionContext *ctx, const Value &value);
+Object *__qmljs_convert_to_object(ExecutionContext *ctx, const Value &value);
+}
+
+typedef uint Bool;
+
+
+struct Q_V4_EXPORT Value
+{
+ union {
+ quint64 val;
+ double dbl;
+ struct {
+#if Q_BYTE_ORDER != Q_LITTLE_ENDIAN
+ uint tag;
+#endif
+ union {
+ uint uint_32;
+ int int_32;
+#if QT_POINTER_SIZE == 4
+ Managed *m;
+ Object *o;
+ String *s;
+#endif
+ };
+#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
+ uint tag;
+#endif
+ };
+ };
+
+ enum Masks {
+ NotDouble_Mask = 0xfffc0000,
+ Type_Mask = 0xffff8000,
+ Immediate_Mask = NotDouble_Mask | 0x00008000,
+ Special_Mask = Immediate_Mask | 0x20000,
+ Tag_Shift = 32
+ };
+ enum ValueType {
+ Undefined_Type = Immediate_Mask | 0x00000,
+ Null_Type = Immediate_Mask | 0x10000,
+ Boolean_Type = Immediate_Mask | 0x20000,
+ Integer_Type = Immediate_Mask | 0x30000,
+ Object_Type = NotDouble_Mask | 0x00000,
+ String_Type = NotDouble_Mask | 0x10000,
+ Deleted_Type = NotDouble_Mask | 0x30000,
+ };
+
+ enum ImmediateFlags {
+ ConvertibleToInt = Immediate_Mask | 0x1
+ };
+
+ enum ValueTypeInternal {
+ _Undefined_Type = Undefined_Type,
+ _Deleted_Type = Deleted_Type,
+ _Null_Type = Null_Type | ConvertibleToInt,
+ _Boolean_Type = Boolean_Type | ConvertibleToInt,
+ _Integer_Type = Integer_Type | ConvertibleToInt,
+ _Object_Type = Object_Type,
+ _String_Type = String_Type
+
+ };
+
+ inline unsigned type() const {
+ return tag & Type_Mask;
+ }
+
+ // used internally in property
+ inline bool isDeleted() const { return tag == _Deleted_Type; }
+
+ inline bool isUndefined() const { return tag == _Undefined_Type; }
+ inline bool isNull() const { return tag == _Null_Type; }
+ inline bool isBoolean() const { return tag == _Boolean_Type; }
+ inline bool isInteger() const { return tag == _Integer_Type; }
+ inline bool isDouble() const { return (tag & NotDouble_Mask) != NotDouble_Mask; }
+ inline bool isNumber() const { return tag == _Integer_Type || (tag & NotDouble_Mask) != NotDouble_Mask; }
+#if QT_POINTER_SIZE == 8
+ inline bool isString() const { return (tag & Type_Mask) == String_Type; }
+ inline bool isObject() const { return (tag & Type_Mask) == Object_Type; }
+#else
+ inline bool isString() const { return tag == String_Type; }
+ inline bool isObject() const { return tag == Object_Type; }
+#endif
+ inline bool isConvertibleToInt() const { return (tag & ConvertibleToInt) == ConvertibleToInt; }
+
+ Bool booleanValue() const {
+ return int_32;
+ }
+ double doubleValue() const {
+ return dbl;
+ }
+ void setDouble(double d) {
+ dbl = d;
+ }
+ double asDouble() const {
+ if (tag == _Integer_Type)
+ return int_32;
+ return dbl;
+ }
+ int integerValue() const {
+ return int_32;
+ }
+
+#if QT_POINTER_SIZE == 8
+ String *stringValue() const {
+ return (String *)(val & ~(quint64(Type_Mask) << Tag_Shift));
+ }
+ Object *objectValue() const {
+ return (Object *)(val & ~(quint64(Type_Mask) << Tag_Shift));
+ }
+ Managed *managed() const {
+ return (Managed *)(val & ~(quint64(Type_Mask) << Tag_Shift));
+ }
+#else
+ String *stringValue() const {
+ return s;
+ }
+ Object *objectValue() const {
+ return o;
+ }
+ Managed *managed() const {
+ return m;
+ }
+#endif
+
+ quint64 rawValue() const {
+ return val;
+ }
+
+ static Value deletedValue();
+ static Value undefinedValue();
+ static Value nullValue();
+ static Value fromBoolean(Bool b);
+ static Value fromDouble(double d);
+ static Value fromInt32(int i);
+ static Value fromUInt32(uint i);
+ static Value fromString(String *s);
+ static Value fromObject(Object *o);
+
+#ifndef QMLJS_LLVM_RUNTIME
+ static Value fromString(ExecutionContext *ctx, const QString &fromString);
+#endif
+
+ static double toInteger(double fromNumber);
+ static int toInt32(double value);
+ static unsigned int toUInt32(double value);
+
+ int toUInt16() const;
+ int toInt32() const;
+ unsigned int toUInt32() const;
+
+ Bool toBoolean() const;
+ double toInteger() const;
+ double toNumber() const;
+ String *toString(ExecutionContext *ctx) const;
+ Object *toObject(ExecutionContext *ctx) const;
+
+ inline bool isPrimitive() const { return !isObject(); }
+#if QT_POINTER_SIZE == 8
+ inline bool integerCompatible() const {
+ const quint64 mask = quint64(ConvertibleToInt) << 32;
+ return (val & mask) == mask;
+ }
+ static inline bool integerCompatible(Value a, Value b) {
+ const quint64 mask = quint64(ConvertibleToInt) << 32;
+ return ((a.val & b.val) & mask) == mask;
+ }
+ static inline bool bothDouble(Value a, Value b) {
+ const quint64 mask = quint64(NotDouble_Mask) << 32;
+ return ((a.val | b.val) & mask) != mask;
+ }
+#else
+ inline bool integerCompatible() const {
+ return (tag & ConvertibleToInt) == ConvertibleToInt;
+ }
+ static inline bool integerCompatible(Value a, Value b) {
+ return ((a.tag & b.tag) & ConvertibleToInt) == ConvertibleToInt;
+ }
+ static inline bool bothDouble(Value a, Value b) {
+ return ((a.tag | b.tag) & NotDouble_Mask) != NotDouble_Mask;
+ }
+#endif
+ inline bool tryIntegerConversion() {
+ bool b = isConvertibleToInt();
+ if (b)
+ tag = _Integer_Type;
+ return b;
+ }
+
+ String *asString() const;
+ Managed *asManaged() const;
+ Object *asObject() const;
+ FunctionObject *asFunctionObject() const;
+ BooleanObject *asBooleanObject() const;
+ NumberObject *asNumberObject() const;
+ StringObject *asStringObject() const;
+ DateObject *asDateObject() const;
+ RegExpObject *asRegExpObject() const;
+ ArrayObject *asArrayObject() const;
+ ErrorObject *asErrorObject() const;
+ uint asArrayIndex() const;
+ uint asArrayLength(bool *ok) const;
+
+ Value property(ExecutionContext *ctx, String *name) const;
+
+ // Section 9.12
+ bool sameValue(Value other) const;
+
+ void mark() const {
+ Managed *m = asManaged();
+ if (m)
+ m->mark();
+ }
+};
+
+inline Value Value::undefinedValue()
+{
+ Value v;
+#if QT_POINTER_SIZE == 8
+ v.val = quint64(_Undefined_Type) << Tag_Shift;
+#else
+ v.tag = _Undefined_Type;
+ v.int_32 = 0;
+#endif
+ return v;
+}
+
+inline Value Value::nullValue()
+{
+ Value v;
+#if QT_POINTER_SIZE == 8
+ v.val = quint64(_Null_Type) << Tag_Shift;
+#else
+ v.tag = _Null_Type;
+ v.int_32 = 0;
+#endif
+ return v;
+}
+
+inline VM::Value Value::deletedValue()
+{
+ VM::Value v;
+ v.tag = VM::Value::_Deleted_Type;
+ v.uint_32 = 0;
+ return v;
+}
+
+
+inline Value Value::fromBoolean(Bool b)
+{
+ Value v;
+ v.tag = _Boolean_Type;
+ v.int_32 = (bool)b;
+ return v;
+}
+
+inline Value Value::fromDouble(double d)
+{
+ Value v;
+ v.dbl = d;
+ return v;
+}
+
+inline Value Value::fromInt32(int i)
+{
+ Value v;
+ v.tag = _Integer_Type;
+ v.int_32 = i;
+ return v;
+}
+
+inline Value Value::fromUInt32(uint i)
+{
+ Value v;
+ if (i < INT_MAX) {
+ v.tag = _Integer_Type;
+ v.int_32 = (int)i;
+ } else {
+ v.dbl = i;
+ }
+ return v;
+}
+
+inline Value Value::fromString(String *s)
+{
+ Value v;
+#if QT_POINTER_SIZE == 8
+ v.val = (quint64)s;
+ v.val |= quint64(_String_Type) << Tag_Shift;
+#else
+ v.tag = _String_Type;
+ v.s = s;
+#endif
+ return v;
+}
+
+inline Value Value::fromObject(Object *o)
+{
+ Value v;
+#if QT_POINTER_SIZE == 8
+ v.val = (quint64)o;
+ v.val |= quint64(_Object_Type) << Tag_Shift;
+#else
+ v.tag = _Object_Type;
+ v.o = o;
+#endif
+ return v;
+}
+
+inline Bool Value::toBoolean() const
+{
+ switch (type()) {
+ case Value::Undefined_Type:
+ case Value::Null_Type:
+ return false;
+ case Value::Boolean_Type:
+ case Value::Integer_Type:
+ return (bool)int_32;
+ case Value::String_Type:
+ return stringValue()->toQString().length() > 0;
+ case Value::Object_Type:
+ return true;
+ default: // double
+ if (! doubleValue() || isnan(doubleValue()))
+ return false;
+ return true;
+ }
+}
+
+inline String *Value::toString(ExecutionContext *ctx) const
+{
+ if (isString())
+ return stringValue();
+ return __qmljs_convert_to_string(ctx, *this);
+}
+
+inline Object *Value::toObject(ExecutionContext *ctx) const
+{
+ if (isObject())
+ return objectValue();
+ return __qmljs_convert_to_object(ctx, *this);
+}
+
+inline int Value::toInt32() const
+{
+ if (isConvertibleToInt())
+ return int_32;
+ double d;
+ if (isDouble())
+ d = dbl;
+ else
+ d = __qmljs_to_number(*this);
+
+ const double D32 = 4294967296.0;
+ const double D31 = D32 / 2.0;
+
+ if ((d >= -D31 && d < D31))
+ return static_cast<int>(d);
+
+ return Value::toInt32(__qmljs_to_number(*this));
+}
+
+inline unsigned int Value::toUInt32() const
+{
+ if (isConvertibleToInt())
+ return (unsigned) int_32;
+ double d;
+ if (isDouble())
+ d = dbl;
+ else
+ d = __qmljs_to_number(*this);
+
+ const double D32 = 4294967296.0;
+ if (dbl >= 0 && dbl < D32)
+ return static_cast<uint>(dbl);
+ return toUInt32(d);
+}
+
+inline uint Value::asArrayIndex() const
+{
+ if (isInteger() && int_32 >= 0)
+ return (uint)int_32;
+ if (!isDouble())
+ return UINT_MAX;
+ uint idx = (uint)dbl;
+ if (idx != dbl)
+ return UINT_MAX;
+ return idx;
+}
+
+inline uint Value::asArrayLength(bool *ok) const
+{
+ *ok = true;
+ if (isConvertibleToInt() && int_32 >= 0)
+ return (uint)int_32;
+ if (isDouble()) {
+ uint idx = (uint)dbl;
+ if ((double)idx != dbl) {
+ *ok = false;
+ return UINT_MAX;
+ }
+ return idx;
+ }
+ if (isString())
+ return stringValue()->toUInt(ok);
+
+ uint idx = toUInt32();
+ double d = toNumber();
+ if (d != idx) {
+ *ok = false;
+ return UINT_MAX;
+ }
+ return idx;
+}
+
+inline String *Value::asString() const
+{
+ if (isString())
+ return stringValue();
+ return 0;
+}
+
+inline Managed *Value::asManaged() const
+{
+ if (isObject() || isString())
+ return managed();
+ return 0;
+}
+
+inline Object *Value::asObject() const
+{
+ return isObject() ? objectValue() : 0;
+}
+
+inline FunctionObject *Value::asFunctionObject() const
+{
+ return isObject() ? managed()->asFunctionObject() : 0;
+}
+
+inline BooleanObject *Value::asBooleanObject() const
+{
+ return isObject() ? managed()->asBooleanObject() : 0;
+}
+
+inline NumberObject *Value::asNumberObject() const
+{
+ return isObject() ? managed()->asNumberObject() : 0;
+}
+
+inline StringObject *Value::asStringObject() const
+{
+ return isObject() ? managed()->asStringObject() : 0;
+}
+
+inline DateObject *Value::asDateObject() const
+{
+ return isObject() ? managed()->asDateObject() : 0;
+}
+
+inline RegExpObject *Value::asRegExpObject() const
+{
+ return isObject() ? managed()->asRegExpObject() : 0;
+}
+
+inline ArrayObject *Value::asArrayObject() const
+{
+ return isObject() ? managed()->asArrayObject() : 0;
+}
+
+inline ErrorObject *Value::asErrorObject() const
+{
+ return isObject() ? managed()->asErrorObject() : 0;
+}
+
+// ###
+inline Value Managed::construct(ExecutionContext *context, Value *args, int argc) {
+ return vtbl->construct(this, context, args, argc);
+}
+inline Value Managed::call(ExecutionContext *context, const Value &thisObject, Value *args, int argc) {
+ return vtbl->call(this, context, thisObject, args, argc);
+}
+
+class PersistentValue
+{
+public:
+ PersistentValue();
+ PersistentValue(MemoryManager *mm, const Value &val);
+ PersistentValue(const PersistentValue &other);
+ PersistentValue &operator=(const PersistentValue &other);
+ ~PersistentValue();
+
+ Value *operator->() { return &m_value; }
+ Value *operator*() { return &m_value; }
+
+ operator Value() const { return m_value; }
+
+private:
+ Managed *asManaged() { return m_memoryManager ? m_value.asManaged() : 0; }
+ MemoryManager *m_memoryManager;
+ Value m_value;
+};
+
+} // namespace VM
+} // namespace QQmlJS
+
+QT_END_NAMESPACE
+
+#endif
diff --git a/src/qml/qml/v4vm/v4.pri b/src/qml/qml/v4vm/v4.pri
new file mode 100644
index 0000000000..3b9711bdb6
--- /dev/null
+++ b/src/qml/qml/v4vm/v4.pri
@@ -0,0 +1,8 @@
+include(llvm_installation.pri)
+include(../3rdparty/masm/masm-defs.pri)
+
+CONFIG += exceptions
+
+!llvm: DEFINES += QMLJS_NO_LLVM
+
+INCLUDEPATH += $$PWD
diff --git a/src/qml/qml/v4vm/v4.pro b/src/qml/qml/v4vm/v4.pro
new file mode 100644
index 0000000000..325a1013b1
--- /dev/null
+++ b/src/qml/qml/v4vm/v4.pro
@@ -0,0 +1,177 @@
+TARGET = QtV4
+QT_PRIVATE = core-private qmldevtools-private
+QT = core
+
+CONFIG += internal_module
+
+include(v4.pri)
+
+load(qt_build_config)
+load(qt_module)
+
+CONFIG += warn_off
+
+#win32-msvc*|win32-icc:QMAKE_LFLAGS += /BASE:0x66000000 #TODO ??!
+
+!macx-clang*:!win*:LIBS += -rdynamic
+
+SOURCES += \
+ qv4codegen.cpp \
+ qv4jsir.cpp \
+ qv4engine.cpp \
+ qv4context.cpp \
+ qv4runtime.cpp \
+ qv4value.cpp \
+ qv4syntaxchecker.cpp \
+ qv4isel_masm.cpp \
+ llvm_runtime.cpp \
+ qv4isel_p.cpp \
+ debugging.cpp \
+ qv4lookup.cpp \
+ qv4mm.cpp \
+ qv4managed.cpp \
+ qv4internalclass.cpp \
+ qv4sparsearray.cpp \
+ qv4arrayobject.cpp \
+ qv4argumentsobject.cpp \
+ qv4booleanobject.cpp \
+ qv4dateobject.cpp \
+ qv4errorobject.cpp \
+ qv4functionobject.cpp \
+ qv4globalobject.cpp \
+ qv4jsonobject.cpp \
+ qv4mathobject.cpp \
+ qv4numberobject.cpp \
+ qv4object.cpp \
+ qv4objectproto.cpp \
+ qv4regexpobject.cpp \
+ qv4stringobject.cpp \
+ qv4string.cpp \
+ qv4objectiterator.cpp \
+ qv4regexp.cpp \
+ qv4unwindhelper.cpp \
+ qv4v8.cpp \
+ qv4executableallocator.cpp
+
+HEADERS += \
+ qv4global.h \
+ qv4codegen_p.h \
+ qv4jsir_p.h \
+ qv4engine.h \
+ qv4context.h \
+ qv4runtime.h \
+ qv4math.h \
+ qv4value.h \
+ qv4syntaxchecker_p.h \
+ qv4isel_masm_p.h \
+ qv4isel_p.h \
+ qv4isel_util_p.h \
+ debugging.h \
+ qv4lookup.h \
+ qv4identifier.h \
+ qv4mm.h \
+ qv4managed.h \
+ qv4internalclass.h \
+ qv4sparsearray.h \
+ qv4arrayobject.h \
+ qv4argumentsobject.h \
+ qv4booleanobject.h \
+ qv4dateobject.h \
+ qv4errorobject.h \
+ qv4functionobject.h \
+ qv4globalobject.h \
+ qv4jsonobject.h \
+ qv4mathobject.h \
+ qv4numberobject.h \
+ qv4object.h \
+ qv4objectproto.h \
+ qv4regexpobject.h \
+ qv4stringobject.h \
+ qv4string.h \
+ qv4property.h \
+ qv4objectiterator.h \
+ qv4regexp.h \
+ qv4unwindhelper.h \
+ qv4unwindhelper_p-dw2.h \
+ qv4unwindhelper_p-arm.h \
+ qv4v8.h \
+ qcalculatehash_p.h \
+ qv4util.h \
+ qv4executableallocator.h
+
+llvm-libs {
+
+SOURCES += \
+ qv4isel_llvm.cpp
+
+HEADERS += \
+ qv4isel_llvm_p.h \
+ qv4_llvm_p.h
+
+LLVM_RUNTIME_BC = $$PWD/llvm_runtime.bc
+DEFINES += LLVM_RUNTIME="\"\\\"$$LLVM_RUNTIME_BC\\\"\""
+DEFINES += QMLJS_WITH_LLVM
+
+INCLUDEPATH += \
+ $$system($$LLVM_CONFIG --includedir)
+
+QMAKE_CXXFLAGS += $$system($$LLVM_CONFIG --cppflags) -fvisibility-inlines-hidden
+QMAKE_CXXFLAGS -= -pedantic
+QMAKE_CXXFLAGS -= -Wcovered-switch-default
+
+LIBS += \
+ $$system($$LLVM_CONFIG --ldflags) \
+ $$system($$LLVM_CONFIG --libs core jit bitreader linker ipo target x86 arm native)
+
+QMAKE_EXTRA_TARGETS += gen_llvm_runtime
+
+GEN_LLVM_RUNTIME_FLAGS = $$system($$LLVM_CONFIG --cppflags)
+GEN_LLVM_RUNTIME_FLAGS -= -pedantic
+
+gen_llvm_runtime.target = llvm_runtime
+gen_llvm_runtime.commands = clang -O2 -emit-llvm -c -I$$PWD -I$$PWD/../3rdparty/masm $$join(QT.core.includes, " -I", "-I") $$GEN_LLVM_RUNTIME_FLAGS -DQMLJS_LLVM_RUNTIME llvm_runtime.cpp -o $$LLVM_RUNTIME_BC
+}
+
+# Use SSE2 floating point math on 32 bit instead of the default
+# 387 to make test results pass on 32 and on 64 bit builds.
+linux-g++*:isEqual(QT_ARCH,i386) {
+ QMAKE_CFLAGS += -march=pentium4 -msse2 -mfpmath=sse
+ QMAKE_CXXFLAGS += -march=pentium4 -msse2 -mfpmath=sse
+}
+
+TESTSCRIPT=$$PWD/../../tests/test262.py
+V4CMD = v4
+
+checktarget.target = check
+checktarget.commands = python $$TESTSCRIPT --command=$$V4CMD --parallel --with-test-expectations --update-expectations
+checktarget.depends = all
+QMAKE_EXTRA_TARGETS += checktarget
+
+checkmothtarget.target = check-interpreter
+checkmothtarget.commands = python $$TESTSCRIPT --command=\"$$V4CMD --interpret\" --parallel --with-test-expectations
+checkmothtarget.depends = all
+QMAKE_EXTRA_TARGETS += checkmothtarget
+
+linux*|mac {
+ LIBS += -ldl
+}
+
+debug-with-libunwind {
+ UW_INC=$$(LIBUNWIND_INCLUDES)
+ isEmpty(UW_INC): error("Please set LIBUNWIND_INCLUDES")
+ INCLUDEPATH += $$UW_INC
+ UW_LIBS=$$(LIBUNWIND_LIBS)
+ isEmpty(UW_LIBS): error("Please set LIBUNWIND_LIBS")
+ LIBS += -L$$UW_LIBS
+ equals(QT_ARCH, arm): LIBS += -lunwind-arm
+ LIBS += -lunwind-dwarf-common -lunwind-dwarf-local -lunwind-elf32 -lunwind
+ DEFINES += WTF_USE_LIBUNWIND_DEBUG=1
+}
+
+valgrind {
+ DEFINES += V4_USE_VALGRIND
+}
+
+include(moth/moth.pri)
+include(../3rdparty/masm/masm.pri)
+include(../3rdparty/double-conversion/double-conversion.pri)