summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitmodules3
-rw-r--r--src/3rdparty/double-conversion/README6
-rw-r--r--src/3rdparty/double-conversion/bignum-dtoa.cc640
-rw-r--r--src/3rdparty/double-conversion/bignum-dtoa.h84
-rw-r--r--src/3rdparty/double-conversion/bignum.cc764
-rw-r--r--src/3rdparty/double-conversion/bignum.h145
-rw-r--r--src/3rdparty/double-conversion/cached-powers.cc175
-rw-r--r--src/3rdparty/double-conversion/cached-powers.h64
-rw-r--r--src/3rdparty/double-conversion/diy-fp.cc57
-rw-r--r--src/3rdparty/double-conversion/diy-fp.h118
-rw-r--r--src/3rdparty/double-conversion/double-conversion.cc889
-rw-r--r--src/3rdparty/double-conversion/double-conversion.h536
-rw-r--r--src/3rdparty/double-conversion/double-conversion.pri4
-rw-r--r--src/3rdparty/double-conversion/fast-dtoa.cc664
-rw-r--r--src/3rdparty/double-conversion/fast-dtoa.h88
-rw-r--r--src/3rdparty/double-conversion/fixed-dtoa.cc402
-rw-r--r--src/3rdparty/double-conversion/fixed-dtoa.h56
-rw-r--r--src/3rdparty/double-conversion/ieee.h398
-rw-r--r--src/3rdparty/double-conversion/strtod.cc554
-rw-r--r--src/3rdparty/double-conversion/strtod.h45
-rw-r--r--src/3rdparty/double-conversion/utils.h313
-rw-r--r--src/3rdparty/masm/WeakRandom.h52
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.cpp444
-rw-r--r--src/3rdparty/masm/assembler/ARMAssembler.h1129
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.cpp36
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.h2790
-rw-r--r--src/3rdparty/masm/assembler/AbstractMacroAssembler.h842
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h342
-rw-r--r--src/3rdparty/masm/assembler/CodeLocation.h218
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.cpp230
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.h297
-rw-r--r--src/3rdparty/masm/assembler/MIPSAssembler.h1107
-rw-r--r--src/3rdparty/masm/assembler/MacroAssembler.h1465
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.cpp99
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.h1386
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARMv7.h1914
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerCodeRef.h406
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerMIPS.h2751
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.cpp52
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.h2293
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86.h314
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86Common.h1541
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86_64.h643
-rw-r--r--src/3rdparty/masm/assembler/RepatchBuffer.h181
-rw-r--r--src/3rdparty/masm/assembler/SH4Assembler.h2152
-rw-r--r--src/3rdparty/masm/assembler/X86Assembler.h2540
-rw-r--r--src/3rdparty/masm/config.h56
-rw-r--r--src/3rdparty/masm/create_regex_tables121
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.cpp43
-rw-r--r--src/3rdparty/masm/disassembler/Disassembler.h52
-rw-r--r--src/3rdparty/masm/disassembler/UDis86Disassembler.cpp63
-rw-r--r--src/3rdparty/masm/disassembler/udis86/differences.txt24
-rw-r--r--src/3rdparty/masm/disassembler/udis86/itab.py360
-rw-r--r--src/3rdparty/masm/disassembler/udis86/optable.xml8959
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_opcode.py235
-rw-r--r--src/3rdparty/masm/disassembler/udis86/ud_optable.py103
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.c182
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86.h33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.c1141
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_decode.h258
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_extern.h88
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.c262
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_input.h67
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_itab_holder.c33
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-att.c252
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn-intel.c278
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.c86
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_syn.h47
-rw-r--r--src/3rdparty/masm/disassembler/udis86/udis86_types.h238
-rw-r--r--src/3rdparty/masm/jit/JITCompilationEffort.h39
-rw-r--r--src/3rdparty/masm/masm-defs.pri28
-rw-r--r--src/3rdparty/masm/masm.pri86
-rw-r--r--src/3rdparty/masm/runtime/MatchResult.h71
-rw-r--r--src/3rdparty/masm/stubs/ExecutableAllocator.h120
-rw-r--r--src/3rdparty/masm/stubs/JSGlobalData.h65
-rw-r--r--src/3rdparty/masm/stubs/LLIntData.h0
-rw-r--r--src/3rdparty/masm/stubs/Options.h53
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.cpp131
-rw-r--r--src/3rdparty/masm/stubs/WTFStubs.h50
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastAllocBase.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/FastMalloc.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/Noncopyable.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/OwnPtr.h46
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassOwnPtr.h120
-rw-r--r--src/3rdparty/masm/stubs/wtf/PassRefPtr.h101
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefCounted.h70
-rw-r--r--src/3rdparty/masm/stubs/wtf/RefPtr.h93
-rw-r--r--src/3rdparty/masm/stubs/wtf/TypeTraits.h58
-rw-r--r--src/3rdparty/masm/stubs/wtf/UnusedParam.h48
-rw-r--r--src/3rdparty/masm/stubs/wtf/Vector.h104
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/CString.h44
-rw-r--r--src/3rdparty/masm/stubs/wtf/text/WTFString.h75
-rw-r--r--src/3rdparty/masm/stubs/wtf/unicode/Unicode.h59
-rw-r--r--src/3rdparty/masm/wtf/ASCIICType.h181
-rw-r--r--src/3rdparty/masm/wtf/Assertions.h428
-rw-r--r--src/3rdparty/masm/wtf/Atomics.h227
-rw-r--r--src/3rdparty/masm/wtf/BumpPointerAllocator.h252
-rw-r--r--src/3rdparty/masm/wtf/CheckedArithmetic.h721
-rw-r--r--src/3rdparty/masm/wtf/Compiler.h302
-rw-r--r--src/3rdparty/masm/wtf/CryptographicallyRandomNumber.h45
-rw-r--r--src/3rdparty/masm/wtf/DataLog.h128
-rw-r--r--src/3rdparty/masm/wtf/DynamicAnnotations.h96
-rw-r--r--src/3rdparty/masm/wtf/EnumClass.h134
-rw-r--r--src/3rdparty/masm/wtf/FeatureDefines.h874
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.cpp64
-rw-r--r--src/3rdparty/masm/wtf/FilePrintStream.h62
-rw-r--r--src/3rdparty/masm/wtf/Locker.h48
-rw-r--r--src/3rdparty/masm/wtf/MathExtras.h459
-rw-r--r--src/3rdparty/masm/wtf/NotFound.h37
-rw-r--r--src/3rdparty/masm/wtf/NullPtr.h56
-rw-r--r--src/3rdparty/masm/wtf/OSAllocator.h115
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorPosix.cpp193
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorWin.cpp84
-rw-r--r--src/3rdparty/masm/wtf/PageAllocation.h120
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.cpp85
-rw-r--r--src/3rdparty/masm/wtf/PageAllocationAligned.h70
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.cpp78
-rw-r--r--src/3rdparty/masm/wtf/PageBlock.h88
-rw-r--r--src/3rdparty/masm/wtf/PageReservation.h149
-rw-r--r--src/3rdparty/masm/wtf/Platform.h1019
-rw-r--r--src/3rdparty/masm/wtf/PossiblyNull.h59
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.cpp114
-rw-r--r--src/3rdparty/masm/wtf/PrintStream.h300
-rw-r--r--src/3rdparty/masm/wtf/RawPointer.h58
-rw-r--r--src/3rdparty/masm/wtf/StdLibExtras.h282
-rw-r--r--src/3rdparty/masm/wtf/VMTags.h75
-rw-r--r--src/3rdparty/masm/yarr/Yarr.h69
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.cpp463
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.h138
-rw-r--r--src/3rdparty/masm/yarr/YarrCanonicalizeUCS2.js219
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.cpp1959
-rw-r--r--src/3rdparty/masm/yarr/YarrInterpreter.h380
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.cpp2702
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.h141
-rw-r--r--src/3rdparty/masm/yarr/YarrParser.h880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.cpp880
-rw-r--r--src/3rdparty/masm/yarr/YarrPattern.h401
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.cpp59
-rw-r--r--src/3rdparty/masm/yarr/YarrSyntaxChecker.h38
-rw-r--r--src/3rdparty/masm/yarr/yarr.pri12
-rw-r--r--src/qml/qml/v4vm/debugging.cpp308
-rw-r--r--src/qml/qml/v4vm/debugging.h157
-rw-r--r--src/qml/qml/v4vm/llvm_installation.pri23
-rw-r--r--src/qml/qml/v4vm/llvm_runtime.cpp513
-rw-r--r--src/qml/qml/v4vm/moth/moth.pri13
-rw-r--r--src/qml/qml/v4vm/moth/qv4instr_moth.cpp15
-rw-r--r--src/qml/qml/v4vm/moth/qv4instr_moth_p.h527
-rw-r--r--src/qml/qml/v4vm/moth/qv4isel_moth.cpp812
-rw-r--r--src/qml/qml/v4vm/moth/qv4isel_moth_p.h169
-rw-r--r--src/qml/qml/v4vm/moth/qv4vme_moth.cpp532
-rw-r--r--src/qml/qml/v4vm/moth/qv4vme_moth_p.h35
-rw-r--r--src/qml/qml/v4vm/qcalculatehash_p.h73
-rw-r--r--src/qml/qml/v4vm/qv4_llvm_p.h65
-rw-r--r--src/qml/qml/v4vm/qv4alloca_p.h54
-rw-r--r--src/qml/qml/v4vm/qv4argumentsobject.cpp176
-rw-r--r--src/qml/qml/v4vm/qv4argumentsobject.h99
-rw-r--r--src/qml/qml/v4vm/qv4arrayobject.cpp859
-rw-r--r--src/qml/qml/v4vm/qv4arrayobject.h103
-rw-r--r--src/qml/qml/v4vm/qv4booleanobject.cpp97
-rw-r--r--src/qml/qml/v4vm/qv4booleanobject.h79
-rw-r--r--src/qml/qml/v4vm/qv4codegen.cpp3256
-rw-r--r--src/qml/qml/v4vm/qv4codegen_p.h444
-rw-r--r--src/qml/qml/v4vm/qv4context.cpp576
-rw-r--r--src/qml/qml/v4vm/qv4context.h194
-rw-r--r--src/qml/qml/v4vm/qv4dateobject.cpp1316
-rw-r--r--src/qml/qml/v4vm/qv4dateobject.h132
-rw-r--r--src/qml/qml/v4vm/qv4engine.cpp551
-rw-r--r--src/qml/qml/v4vm/qv4engine.h271
-rw-r--r--src/qml/qml/v4vm/qv4errorobject.cpp238
-rw-r--r--src/qml/qml/v4vm/qv4errorobject.h235
-rw-r--r--src/qml/qml/v4vm/qv4executableallocator.cpp208
-rw-r--r--src/qml/qml/v4vm/qv4executableallocator.h121
-rw-r--r--src/qml/qml/v4vm/qv4functionobject.cpp522
-rw-r--r--src/qml/qml/v4vm/qv4functionobject.h243
-rw-r--r--src/qml/qml/v4vm/qv4global.h169
-rw-r--r--src/qml/qml/v4vm/qv4globalobject.cpp731
-rw-r--r--src/qml/qml/v4vm/qv4globalobject.h93
-rw-r--r--src/qml/qml/v4vm/qv4identifier.h111
-rw-r--r--src/qml/qml/v4vm/qv4internalclass.cpp188
-rw-r--r--src/qml/qml/v4vm/qv4internalclass.h91
-rw-r--r--src/qml/qml/v4vm/qv4isel_llvm.cpp1375
-rw-r--r--src/qml/qml/v4vm/qv4isel_llvm_p.h177
-rw-r--r--src/qml/qml/v4vm/qv4isel_masm.cpp1252
-rw-r--r--src/qml/qml/v4vm/qv4isel_masm_p.h894
-rw-r--r--src/qml/qml/v4vm/qv4isel_p.cpp398
-rw-r--r--src/qml/qml/v4vm/qv4isel_p.h158
-rw-r--r--src/qml/qml/v4vm/qv4isel_util_p.h77
-rw-r--r--src/qml/qml/v4vm/qv4jsir.cpp948
-rw-r--r--src/qml/qml/v4vm/qv4jsir_p.h821
-rw-r--r--src/qml/qml/v4vm/qv4jsonobject.cpp936
-rw-r--r--src/qml/qml/v4vm/qv4jsonobject.h65
-rw-r--r--src/qml/qml/v4vm/qv4lookup.cpp332
-rw-r--r--src/qml/qml/v4vm/qv4lookup.h144
-rw-r--r--src/qml/qml/v4vm/qv4managed.cpp187
-rw-r--r--src/qml/qml/v4vm/qv4managed.h247
-rw-r--r--src/qml/qml/v4vm/qv4math.h120
-rw-r--r--src/qml/qml/v4vm/qv4mathobject.cpp311
-rw-r--r--src/qml/qml/v4vm/qv4mathobject.h80
-rw-r--r--src/qml/qml/v4vm/qv4mm.cpp493
-rw-r--r--src/qml/qml/v4vm/qv4mm.h155
-rw-r--r--src/qml/qml/v4vm/qv4numberobject.cpp237
-rw-r--r--src/qml/qml/v4vm/qv4numberobject.h83
-rw-r--r--src/qml/qml/v4vm/qv4object.cpp1177
-rw-r--r--src/qml/qml/v4vm/qv4object.h417
-rw-r--r--src/qml/qml/v4vm/qv4objectiterator.cpp185
-rw-r--r--src/qml/qml/v4vm/qv4objectiterator.h84
-rw-r--r--src/qml/qml/v4vm/qv4objectproto.cpp565
-rw-r--r--src/qml/qml/v4vm/qv4objectproto.h104
-rw-r--r--src/qml/qml/v4vm/qv4property.h152
-rw-r--r--src/qml/qml/v4vm/qv4regexp.cpp167
-rw-r--r--src/qml/qml/v4vm/qv4regexp.h149
-rw-r--r--src/qml/qml/v4vm/qv4regexpobject.cpp256
-rw-r--r--src/qml/qml/v4vm/qv4regexpobject.h108
-rw-r--r--src/qml/qml/v4vm/qv4runtime.cpp1319
-rw-r--r--src/qml/qml/v4vm/qv4runtime.h745
-rw-r--r--src/qml/qml/v4vm/qv4sparsearray.cpp464
-rw-r--r--src/qml/qml/v4vm/qv4sparsearray.h369
-rw-r--r--src/qml/qml/v4vm/qv4string.cpp242
-rw-r--r--src/qml/qml/v4vm/qv4string.h136
-rw-r--r--src/qml/qml/v4vm/qv4stringobject.cpp726
-rw-r--r--src/qml/qml/v4vm/qv4stringobject.h108
-rw-r--r--src/qml/qml/v4vm/qv4syntaxchecker.cpp119
-rw-r--r--src/qml/qml/v4vm/qv4syntaxchecker_p.h73
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper.cpp37
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper.h27
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper_p-arm.h176
-rw-r--r--src/qml/qml/v4vm/qv4unwindhelper_p-dw2.h189
-rw-r--r--src/qml/qml/v4vm/qv4util.h74
-rw-r--r--src/qml/qml/v4vm/qv4v8.cpp2141
-rw-r--r--src/qml/qml/v4vm/qv4v8.h2581
-rw-r--r--src/qml/qml/v4vm/qv4value.cpp214
-rw-r--r--src/qml/qml/v4vm/qv4value.h572
-rw-r--r--src/qml/qml/v4vm/v4.pri8
-rw-r--r--src/qml/qml/v4vm/v4.pro177
-rw-r--r--tests/manual/v4/TestExpectations21
-rw-r--r--tests/manual/v4/accessors.js8
-rw-r--r--tests/manual/v4/array.1.js32
-rw-r--r--tests/manual/v4/auto/auto.pro4
-rw-r--r--tests/manual/v4/auto/executableallocator/executableallocator.pro4
-rw-r--r--tests/manual/v4/auto/executableallocator/tst_executableallocator.cpp97
-rw-r--r--tests/manual/v4/crypto.js1712
-rw-r--r--tests/manual/v4/exceptions.1.js16
-rw-r--r--tests/manual/v4/exceptions.2.js2
-rw-r--r--tests/manual/v4/fact.2.js8
-rw-r--r--tests/manual/v4/fact.js15
-rw-r--r--tests/manual/v4/for/for.1.js6
-rw-r--r--tests/manual/v4/for/for.2.js7
-rw-r--r--tests/manual/v4/for/for.3.js7
-rw-r--r--tests/manual/v4/for/for.4.js7
-rw-r--r--tests/manual/v4/fun.1.js7
-rw-r--r--tests/manual/v4/fun.2.js14
-rw-r--r--tests/manual/v4/fun.3.js17
-rw-r--r--tests/manual/v4/fun.4.js18
-rw-r--r--tests/manual/v4/instanceof.1.js9
-rw-r--r--tests/manual/v4/label.1.js8
-rw-r--r--tests/manual/v4/label.2.js2
-rw-r--r--tests/manual/v4/label.3.js6
-rw-r--r--tests/manual/v4/obj.1.js8
-rw-r--r--tests/manual/v4/obj.2.js10
-rw-r--r--tests/manual/v4/property_lookup.js9
-rw-r--r--tests/manual/v4/prototype.1.js14
-rw-r--r--tests/manual/v4/prototype.2.js13
-rw-r--r--tests/manual/v4/prototype.3.js10
-rw-r--r--tests/manual/v4/prototype.4.js9
-rw-r--r--tests/manual/v4/regexp.1.js4
-rw-r--r--tests/manual/v4/simple.2.js19
-rw-r--r--tests/manual/v4/simple.js15
-rw-r--r--tests/manual/v4/string.1.js20
-rw-r--r--tests/manual/v4/switch.1.js16
-rw-r--r--tests/manual/v4/switch.2.js11
m---------tests/manual/v4/test2620
-rwxr-xr-xtests/manual/v4/test262.py555
-rw-r--r--tests/manual/v4/tests.pro2
-rw-r--r--tests/manual/v4/typeof.1.js12
-rw-r--r--tests/manual/v4/v8-bench.js11591
-rw-r--r--tests/manual/v4/with.js43
-rw-r--r--tools/fdegen/fdegen.pro8
-rw-r--r--tools/fdegen/main.cpp372
-rw-r--r--tools/v4/main.cpp402
-rw-r--r--tools/v4/v4.pro10
281 files changed, 114268 insertions, 0 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000..b813acb9f2
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "tests/manual/v4/test262"]
+ path = tests/manual/v4/test262
+ url = git://github.com/tronical/test262.git
diff --git a/src/3rdparty/double-conversion/README b/src/3rdparty/double-conversion/README
new file mode 100644
index 0000000000..40ed4a7efd
--- /dev/null
+++ b/src/3rdparty/double-conversion/README
@@ -0,0 +1,6 @@
+This is a copy of the library for binary-decimal and decimal-binary conversion routines for IEEE doubles, taken
+from
+
+ http://code.google.com/p/double-conversion/
+
+commit e5b34421b763f7bf7e4f9081403db417d5a55a36
diff --git a/src/3rdparty/double-conversion/bignum-dtoa.cc b/src/3rdparty/double-conversion/bignum-dtoa.cc
new file mode 100644
index 0000000000..b6c2e85d17
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum-dtoa.cc
@@ -0,0 +1,640 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+
+#include "bignum-dtoa.h"
+
+#include "bignum.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+static int NormalizedExponent(uint64_t significand, int exponent) {
+ ASSERT(significand != 0);
+ while ((significand & Double::kHiddenBit) == 0) {
+ significand = significand << 1;
+ exponent = exponent - 1;
+ }
+ return exponent;
+}
+
+
+// Forward declarations:
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
+static int EstimatePower(int exponent);
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator.
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus);
+// Multiplies numerator/denominator so that its values lies in the range 1-10.
+// Returns decimal_point s.t.
+// v = numerator'/denominator' * 10^(decimal_point-1)
+// where numerator' and denominator' are the values of numerator and
+// denominator after the call to this function.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus);
+// Generates digits from the left to the right and stops when the generated
+// digits yield the shortest decimal representation of v.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length);
+// Generates 'requested_digits' after the decimal point.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length);
+// Generates 'count' digits of numerator/denominator.
+// Once 'count' digits have been produced rounds the result depending on the
+// remainder (remainders of exactly .5 round upwards). Might update the
+// decimal_point when rounding up (for example for 0.9999).
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length);
+
+
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* decimal_point) {
+ ASSERT(v > 0);
+ ASSERT(!Double(v).IsSpecial());
+ uint64_t significand;
+ int exponent;
+ bool lower_boundary_is_closer;
+ if (mode == BIGNUM_DTOA_SHORTEST_SINGLE) {
+ float f = static_cast<float>(v);
+ ASSERT(f == v);
+ significand = Single(f).Significand();
+ exponent = Single(f).Exponent();
+ lower_boundary_is_closer = Single(f).LowerBoundaryIsCloser();
+ } else {
+ significand = Double(v).Significand();
+ exponent = Double(v).Exponent();
+ lower_boundary_is_closer = Double(v).LowerBoundaryIsCloser();
+ }
+ bool need_boundary_deltas =
+ (mode == BIGNUM_DTOA_SHORTEST || mode == BIGNUM_DTOA_SHORTEST_SINGLE);
+
+ bool is_even = (significand & 1) == 0;
+ int normalized_exponent = NormalizedExponent(significand, exponent);
+ // estimated_power might be too low by 1.
+ int estimated_power = EstimatePower(normalized_exponent);
+
+ // Shortcut for Fixed.
+ // The requested digits correspond to the digits after the point. If the
+ // number is much too small, then there is no need in trying to get any
+ // digits.
+ if (mode == BIGNUM_DTOA_FIXED && -estimated_power - 1 > requested_digits) {
+ buffer[0] = '\0';
+ *length = 0;
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ return;
+ }
+
+ Bignum numerator;
+ Bignum denominator;
+ Bignum delta_minus;
+ Bignum delta_plus;
+ // Make sure the bignum can grow large enough. The smallest double equals
+ // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
+ // The maximum double is 1.7976931348623157e308 which needs fewer than
+ // 308*4 binary digits.
+ ASSERT(Bignum::kMaxSignificantBits >= 324*4);
+ InitialScaledStartValues(significand, exponent, lower_boundary_is_closer,
+ estimated_power, need_boundary_deltas,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^estimated_power.
+ FixupMultiply10(estimated_power, is_even, decimal_point,
+ &numerator, &denominator,
+ &delta_minus, &delta_plus);
+ // We now have v = (numerator / denominator) * 10^(decimal_point-1), and
+ // 1 <= (numerator + delta_plus) / denominator < 10
+ switch (mode) {
+ case BIGNUM_DTOA_SHORTEST:
+ case BIGNUM_DTOA_SHORTEST_SINGLE:
+ GenerateShortestDigits(&numerator, &denominator,
+ &delta_minus, &delta_plus,
+ is_even, buffer, length);
+ break;
+ case BIGNUM_DTOA_FIXED:
+ BignumToFixed(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ case BIGNUM_DTOA_PRECISION:
+ GenerateCountedDigits(requested_digits, decimal_point,
+ &numerator, &denominator,
+ buffer, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ buffer[*length] = '\0';
+}
+
+
+// The procedure starts generating digits from the left to the right and stops
+// when the generated digits yield the shortest decimal representation of v. A
+// decimal representation of v is a number lying closer to v than to any other
+// double, so it converts to v when read.
+//
+// This is true if d, the decimal representation, is between m- and m+, the
+// upper and lower boundaries. d must be strictly between them if !is_even.
+// m- := (numerator - delta_minus) / denominator
+// m+ := (numerator + delta_plus) / denominator
+//
+// Precondition: 0 <= (numerator+delta_plus) / denominator < 10.
+// If 1 <= (numerator+delta_plus) / denominator < 10 then no leading 0 digit
+// will be produced. This should be the standard precondition.
+static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus,
+ bool is_even,
+ Vector<char> buffer, int* length) {
+ // Small optimization: if delta_minus and delta_plus are the same just reuse
+ // one of the two bignums.
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_plus = delta_minus;
+ }
+ *length = 0;
+ while (true) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[(*length)++] = digit + '0';
+
+ // Can we stop already?
+ // If the remainder of the division is less than the distance to the lower
+ // boundary we can stop. In this case we simply round down (discarding the
+ // remainder).
+ // Similarly we test if we can round up (using the upper boundary).
+ bool in_delta_room_minus;
+ bool in_delta_room_plus;
+ if (is_even) {
+ in_delta_room_minus = Bignum::LessEqual(*numerator, *delta_minus);
+ } else {
+ in_delta_room_minus = Bignum::Less(*numerator, *delta_minus);
+ }
+ if (is_even) {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_delta_room_plus =
+ Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (!in_delta_room_minus && !in_delta_room_plus) {
+ // Prepare for next iteration.
+ numerator->Times10();
+ delta_minus->Times10();
+ // We optimized delta_plus to be equal to delta_minus (if they share the
+ // same value). So don't multiply delta_plus if they point to the same
+ // object.
+ if (delta_minus != delta_plus) {
+ delta_plus->Times10();
+ }
+ } else if (in_delta_room_minus && in_delta_room_plus) {
+ // Let's see if 2*numerator < denominator.
+ // If yes, then the next digit would be < 5 and we can round down.
+ int compare = Bignum::PlusCompare(*numerator, *numerator, *denominator);
+ if (compare < 0) {
+ // Remaining digits are less than .5. -> Round down (== do nothing).
+ } else if (compare > 0) {
+ // Remaining digits are more than .5 of denominator. -> Round up.
+ // Note that the last digit could not be a '9' as otherwise the whole
+ // loop would have stopped earlier.
+ // We still have an assert here in case the preconditions were not
+ // satisfied.
+ ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ } else {
+ // Halfway case.
+ // TODO(floitsch): need a way to solve half-way cases.
+ // For now let's round towards even (since this is what Gay seems to
+ // do).
+
+ if ((buffer[(*length) - 1] - '0') % 2 == 0) {
+ // Round down => Do nothing.
+ } else {
+ ASSERT(buffer[(*length) - 1] != '9');
+ buffer[(*length) - 1]++;
+ }
+ }
+ return;
+ } else if (in_delta_room_minus) {
+ // Round down (== do nothing).
+ return;
+ } else { // in_delta_room_plus
+ // Round up.
+ // Note again that the last digit could not be '9' since this would have
+ // stopped the loop earlier.
+ // We still have an ASSERT here, in case the preconditions were not
+ // satisfied.
+ ASSERT(buffer[(*length) -1] != '9');
+ buffer[(*length) - 1]++;
+ return;
+ }
+ }
+}
+
+
+// Let v = numerator / denominator < 10.
+// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
+// from left to right. Once 'count' digits have been produced we decide wether
+// to round up or down. Remainders of exactly .5 round upwards. Numbers such
+// as 9.999999 propagate a carry all the way, and change the
+// exponent (decimal_point), when rounding upwards.
+static void GenerateCountedDigits(int count, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length) {
+ ASSERT(count >= 0);
+ for (int i = 0; i < count - 1; ++i) {
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ ASSERT(digit <= 9); // digit is a uint16_t and therefore always positive.
+ // digit = numerator / denominator (integer division).
+ // numerator = numerator % denominator.
+ buffer[i] = digit + '0';
+ // Prepare for next iteration.
+ numerator->Times10();
+ }
+ // Generate the last digit.
+ uint16_t digit;
+ digit = numerator->DivideModuloIntBignum(*denominator);
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ digit++;
+ }
+ buffer[count - 1] = digit + '0';
+ // Correct bad digits (in case we had a sequence of '9's). Propagate the
+ // carry until we hat a non-'9' or til we reach the first digit.
+ for (int i = count - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ if (buffer[0] == '0' + 10) {
+ // Propagate a carry past the top place.
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+ *length = count;
+}
+
+
+// Generates 'requested_digits' after the decimal point. It might omit
+// trailing '0's. If the input number is too small then no digits at all are
+// generated (ex.: 2 fixed digits for 0.00001).
+//
+// Input verifies: 1 <= (numerator + delta) / denominator < 10.
+static void BignumToFixed(int requested_digits, int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Vector<char>(buffer), int* length) {
+ // Note that we have to look at more than just the requested_digits, since
+ // a number could be rounded up. Example: v=0.5 with requested_digits=0.
+ // Even though the power of v equals 0 we can't just stop here.
+ if (-(*decimal_point) > requested_digits) {
+ // The number is definitively too small.
+ // Ex: 0.001 with requested_digits == 1.
+ // Set decimal-point to -requested_digits. This is what Gay does.
+ // Note that it should not have any effect anyways since the string is
+ // empty.
+ *decimal_point = -requested_digits;
+ *length = 0;
+ return;
+ } else if (-(*decimal_point) == requested_digits) {
+ // We only need to verify if the number rounds down or up.
+ // Ex: 0.04 and 0.06 with requested_digits == 1.
+ ASSERT(*decimal_point == -requested_digits);
+ // Initially the fraction lies in range (1, 10]. Multiply the denominator
+ // by 10 so that we can compare more easily.
+ denominator->Times10();
+ if (Bignum::PlusCompare(*numerator, *numerator, *denominator) >= 0) {
+ // If the fraction is >= 0.5 then we have to include the rounded
+ // digit.
+ buffer[0] = '1';
+ *length = 1;
+ (*decimal_point)++;
+ } else {
+ // Note that we caught most of similar cases earlier.
+ *length = 0;
+ }
+ return;
+ } else {
+ // The requested digits correspond to the digits after the point.
+ // The variable 'needed_digits' includes the digits before the point.
+ int needed_digits = (*decimal_point) + requested_digits;
+ GenerateCountedDigits(needed_digits, decimal_point,
+ numerator, denominator,
+ buffer, length);
+ }
+}
+
+
+// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
+// v = f * 2^exponent and 2^52 <= f < 2^53.
+// v is hence a normalized double with the given exponent. The output is an
+// approximation for the exponent of the decimal approimation .digits * 10^k.
+//
+// The result might undershoot by 1 in which case 10^k <= v < 10^k+1.
+// Note: this property holds for v's upper boundary m+ too.
+// 10^k <= m+ < 10^k+1.
+// (see explanation below).
+//
+// Examples:
+// EstimatePower(0) => 16
+// EstimatePower(-52) => 0
+//
+// Note: e >= 0 => EstimatedPower(e) > 0. No similar claim can be made for e<0.
+static int EstimatePower(int exponent) {
+ // This function estimates log10 of v where v = f*2^e (with e == exponent).
+ // Note that 10^floor(log10(v)) <= v, but v <= 10^ceil(log10(v)).
+ // Note that f is bounded by its container size. Let p = 53 (the double's
+ // significand size). Then 2^(p-1) <= f < 2^p.
+ //
+ // Given that log10(v) == log2(v)/log2(10) and e+(len(f)-1) is quite close
+ // to log2(v) the function is simplified to (e+(len(f)-1)/log2(10)).
+ // The computed number undershoots by less than 0.631 (when we compute log3
+ // and not log10).
+ //
+ // Optimization: since we only need an approximated result this computation
+ // can be performed on 64 bit integers. On x86/x64 architecture the speedup is
+ // not really measurable, though.
+ //
+ // Since we want to avoid overshooting we decrement by 1e10 so that
+ // floating-point imprecisions don't affect us.
+ //
+ // Explanation for v's boundary m+: the computation takes advantage of
+ // the fact that 2^(p-1) <= f < 2^p. Boundaries still satisfy this requirement
+ // (even for denormals where the delta can be much more important).
+
+ const double k1Log10 = 0.30102999566398114; // 1/lg(10)
+
+ // For doubles len(f) == 53 (don't forget the hidden bit).
+ const int kSignificandSize = Double::kSignificandSize;
+ double estimate = ceil((exponent + kSignificandSize - 1) * k1Log10 - 1e-10);
+ return static_cast<int>(estimate);
+}
+
+
+// See comments for InitialScaledStartValues.
+static void InitialScaledStartValuesPositiveExponent(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // A positive exponent implies a positive power.
+ ASSERT(estimated_power >= 0);
+ // Since the estimated_power is positive we simply multiply the denominator
+ // by 10^estimated_power.
+
+ // numerator = v.
+ numerator->AssignUInt64(significand);
+ numerator->ShiftLeft(exponent);
+ // denominator = 10^estimated_power.
+ denominator->AssignPowerUInt16(10, estimated_power);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ delta_plus->AssignUInt16(1);
+ delta_plus->ShiftLeft(exponent);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ delta_minus->ShiftLeft(exponent);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentPositivePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // v = f * 2^e with e < 0, and with estimated_power >= 0.
+ // This means that e is close to 0 (have a look at how estimated_power is
+ // computed).
+
+ // numerator = significand
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * / 2^-exponent
+ numerator->AssignUInt64(significand);
+ // denominator = 10^estimated_power * 2^-exponent (with exponent < 0)
+ denominator->AssignPowerUInt16(10, estimated_power);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ denominator->ShiftLeft(1);
+ numerator->ShiftLeft(1);
+ // Let v = f * 2^e, then m+ - v = 1/2 * 2^e; With the common
+ // denominator (of 2) delta_plus equals 2^e.
+ // Given that the denominator already includes v's exponent the distance
+ // to the boundaries is simply 1.
+ delta_plus->AssignUInt16(1);
+ // Same for delta_minus. The adjustments if f == 2^p-1 are done later.
+ delta_minus->AssignUInt16(1);
+ }
+}
+
+
+// See comments for InitialScaledStartValues
+static void InitialScaledStartValuesNegativeExponentNegativePower(
+ uint64_t significand, int exponent,
+ int estimated_power, bool need_boundary_deltas,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ // Instead of multiplying the denominator with 10^estimated_power we
+ // multiply all values (numerator and deltas) by 10^-estimated_power.
+
+ // Use numerator as temporary container for power_ten.
+ Bignum* power_ten = numerator;
+ power_ten->AssignPowerUInt16(10, -estimated_power);
+
+ if (need_boundary_deltas) {
+ // Since power_ten == numerator we must make a copy of 10^estimated_power
+ // before we complete the computation of the numerator.
+ // delta_plus = delta_minus = 10^estimated_power
+ delta_plus->AssignBignum(*power_ten);
+ delta_minus->AssignBignum(*power_ten);
+ }
+
+ // numerator = significand * 2 * 10^-estimated_power
+ // since v = significand * 2^exponent this is equivalent to
+ // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
+ // Remember: numerator has been abused as power_ten. So no need to assign it
+ // to itself.
+ ASSERT(numerator == power_ten);
+ numerator->MultiplyByUInt64(significand);
+
+ // denominator = 2 * 2^-exponent with exponent < 0.
+ denominator->AssignUInt16(1);
+ denominator->ShiftLeft(-exponent);
+
+ if (need_boundary_deltas) {
+ // Introduce a common denominator so that the deltas to the boundaries are
+ // integers.
+ numerator->ShiftLeft(1);
+ denominator->ShiftLeft(1);
+ // With this shift the boundaries have their correct value, since
+ // delta_plus = 10^-estimated_power, and
+ // delta_minus = 10^-estimated_power.
+ // These assignments have been done earlier.
+ // The adjustments if f == 2^p-1 (lower boundary is closer) are done later.
+ }
+}
+
+
+// Let v = significand * 2^exponent.
+// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
+// and denominator. The functions GenerateShortestDigits and
+// GenerateCountedDigits will then convert this ratio to its decimal
+// representation d, with the required accuracy.
+// Then d * 10^estimated_power is the representation of v.
+// (Note: the fraction and the estimated_power might get adjusted before
+// generating the decimal representation.)
+//
+// The initial start values consist of:
+// - a scaled numerator: s.t. numerator/denominator == v / 10^estimated_power.
+// - a scaled (common) denominator.
+// optionally (used by GenerateShortestDigits to decide if it has the shortest
+// decimal converting back to v):
+// - v - m-: the distance to the lower boundary.
+// - m+ - v: the distance to the upper boundary.
+//
+// v, m+, m-, and therefore v - m- and m+ - v all share the same denominator.
+//
+// Let ep == estimated_power, then the returned values will satisfy:
+// v / 10^ep = numerator / denominator.
+// v's boundarys m- and m+:
+// m- / 10^ep == v / 10^ep - delta_minus / denominator
+// m+ / 10^ep == v / 10^ep + delta_plus / denominator
+// Or in other words:
+// m- == v - delta_minus * 10^ep / denominator;
+// m+ == v + delta_plus * 10^ep / denominator;
+//
+// Since 10^(k-1) <= v < 10^k (with k == estimated_power)
+// or 10^k <= v < 10^(k+1)
+// we then have 0.1 <= numerator/denominator < 1
+// or 1 <= numerator/denominator < 10
+//
+// It is then easy to kickstart the digit-generation routine.
+//
+// The boundary-deltas are only filled if the mode equals BIGNUM_DTOA_SHORTEST
+// or BIGNUM_DTOA_SHORTEST_SINGLE.
+
+static void InitialScaledStartValues(uint64_t significand,
+ int exponent,
+ bool lower_boundary_is_closer,
+ int estimated_power,
+ bool need_boundary_deltas,
+ Bignum* numerator,
+ Bignum* denominator,
+ Bignum* delta_minus,
+ Bignum* delta_plus) {
+ if (exponent >= 0) {
+ InitialScaledStartValuesPositiveExponent(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else if (estimated_power >= 0) {
+ InitialScaledStartValuesNegativeExponentPositivePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ } else {
+ InitialScaledStartValuesNegativeExponentNegativePower(
+ significand, exponent, estimated_power, need_boundary_deltas,
+ numerator, denominator, delta_minus, delta_plus);
+ }
+
+ if (need_boundary_deltas && lower_boundary_is_closer) {
+ // The lower boundary is closer at half the distance of "normal" numbers.
+ // Increase the common denominator and adapt all but the delta_minus.
+ denominator->ShiftLeft(1); // *2
+ numerator->ShiftLeft(1); // *2
+ delta_plus->ShiftLeft(1); // *2
+ }
+}
+
+
+// This routine multiplies numerator/denominator so that its values lies in the
+// range 1-10. That is after a call to this function we have:
+// 1 <= (numerator + delta_plus) /denominator < 10.
+// Let numerator the input before modification and numerator' the argument
+// after modification, then the output-parameter decimal_point is such that
+// numerator / denominator * 10^estimated_power ==
+// numerator' / denominator' * 10^(decimal_point - 1)
+// In some cases estimated_power was too low, and this is already the case. We
+// then simply adjust the power so that 10^(k-1) <= v < 10^k (with k ==
+// estimated_power) but do not touch the numerator or denominator.
+// Otherwise the routine multiplies the numerator and the deltas by 10.
+static void FixupMultiply10(int estimated_power, bool is_even,
+ int* decimal_point,
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
+ bool in_range;
+ if (is_even) {
+ // For IEEE doubles half-way cases (in decimal system numbers ending with 5)
+ // are rounded to the closest floating-point number with even significand.
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) >= 0;
+ } else {
+ in_range = Bignum::PlusCompare(*numerator, *delta_plus, *denominator) > 0;
+ }
+ if (in_range) {
+ // Since numerator + delta_plus >= denominator we already have
+ // 1 <= numerator/denominator < 10. Simply update the estimated_power.
+ *decimal_point = estimated_power + 1;
+ } else {
+ *decimal_point = estimated_power;
+ numerator->Times10();
+ if (Bignum::Equal(*delta_minus, *delta_plus)) {
+ delta_minus->Times10();
+ delta_plus->AssignBignum(*delta_minus);
+ } else {
+ delta_minus->Times10();
+ delta_plus->Times10();
+ }
+ }
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/bignum-dtoa.h b/src/3rdparty/double-conversion/bignum-dtoa.h
new file mode 100644
index 0000000000..34b961992d
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum-dtoa.h
@@ -0,0 +1,84 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+#define DOUBLE_CONVERSION_BIGNUM_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum BignumDtoaMode {
+ // Return the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate but
+ // correct) 0.3.
+ BIGNUM_DTOA_SHORTEST,
+ // Same as BIGNUM_DTOA_SHORTEST but for single-precision floats.
+ BIGNUM_DTOA_SHORTEST_SINGLE,
+ // Return a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ BIGNUM_DTOA_FIXED,
+ // Return a fixed number of digits, no matter what the exponent is.
+ BIGNUM_DTOA_PRECISION
+};
+
+// Converts the given double 'v' to ascii.
+// The result should be interpreted as buffer * 10^(point-length).
+// The buffer will be null-terminated.
+//
+// The input v must be > 0 and different from NaN, and Infinity.
+//
+// The output depends on the given mode:
+// - SHORTEST: produce the least amount of digits for which the internal
+// identity requirement is still satisfied. If the digits are printed
+// (together with the correct exponent) then reading this number will give
+// 'v' again. The buffer will choose the representation that is closest to
+// 'v'. If there are two at the same distance, than the number is round up.
+// In this mode the 'requested_digits' parameter is ignored.
+// - FIXED: produces digits necessary to print a given number with
+// 'requested_digits' digits after the decimal point. The produced digits
+// might be too short in which case the caller has to fill the gaps with '0's.
+// Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+// Halfway cases are rounded up. The call toFixed(0.15, 2) thus returns
+// buffer="2", point=0.
+// Note: the length of the returned buffer has no meaning wrt the significance
+// of its digits. That is, just because it contains '0's does not mean that
+// any other digit would not satisfy the internal identity requirement.
+// - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+// Even though the length of produced digits usually equals
+// 'requested_digits', the function is allowed to return fewer digits, in
+// which case the caller has to fill the missing digits with '0's.
+// Halfway cases are again rounded up.
+// 'BignumDtoa' expects the given buffer to be big enough to hold all digits
+// and a terminating null-character.
+void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_
diff --git a/src/3rdparty/double-conversion/bignum.cc b/src/3rdparty/double-conversion/bignum.cc
new file mode 100644
index 0000000000..747491a089
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum.cc
@@ -0,0 +1,764 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "bignum.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+Bignum::Bignum()
+ : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
+ for (int i = 0; i < kBigitCapacity; ++i) {
+ bigits_[i] = 0;
+ }
+}
+
+
+template<typename S>
+static int BitSize(S value) {
+ return 8 * sizeof(value);
+}
+
+// Guaranteed to lie in one Bigit.
+void Bignum::AssignUInt16(uint16_t value) {
+ ASSERT(kBigitSize >= BitSize(value));
+ Zero();
+ if (value == 0) return;
+
+ EnsureCapacity(1);
+ bigits_[0] = value;
+ used_digits_ = 1;
+}
+
+
+void Bignum::AssignUInt64(uint64_t value) {
+ const int kUInt64Size = 64;
+
+ Zero();
+ if (value == 0) return;
+
+ int needed_bigits = kUInt64Size / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ for (int i = 0; i < needed_bigits; ++i) {
+ bigits_[i] = value & kBigitMask;
+ value = value >> kBigitSize;
+ }
+ used_digits_ = needed_bigits;
+ Clamp();
+}
+
+
+void Bignum::AssignBignum(const Bignum& other) {
+ exponent_ = other.exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ bigits_[i] = other.bigits_[i];
+ }
+ // Clear the excess digits (if there were any).
+ for (int i = other.used_digits_; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = other.used_digits_;
+}
+
+
+static uint64_t ReadUInt64(Vector<const char> buffer,
+ int from,
+ int digits_to_read) {
+ uint64_t result = 0;
+ for (int i = from; i < from + digits_to_read; ++i) {
+ int digit = buffer[i] - '0';
+ ASSERT(0 <= digit && digit <= 9);
+ result = result * 10 + digit;
+ }
+ return result;
+}
+
+
+void Bignum::AssignDecimalString(Vector<const char> value) {
+ // 2^64 = 18446744073709551616 > 10^19
+ const int kMaxUint64DecimalDigits = 19;
+ Zero();
+ int length = value.length();
+ int pos = 0;
+ // Let's just say that each digit needs 4 bits.
+ while (length >= kMaxUint64DecimalDigits) {
+ uint64_t digits = ReadUInt64(value, pos, kMaxUint64DecimalDigits);
+ pos += kMaxUint64DecimalDigits;
+ length -= kMaxUint64DecimalDigits;
+ MultiplyByPowerOfTen(kMaxUint64DecimalDigits);
+ AddUInt64(digits);
+ }
+ uint64_t digits = ReadUInt64(value, pos, length);
+ MultiplyByPowerOfTen(length);
+ AddUInt64(digits);
+ Clamp();
+}
+
+
+static int HexCharValue(char c) {
+ if ('0' <= c && c <= '9') return c - '0';
+ if ('a' <= c && c <= 'f') return 10 + c - 'a';
+ if ('A' <= c && c <= 'F') return 10 + c - 'A';
+ UNREACHABLE();
+ return 0; // To make compiler happy.
+}
+
+
+void Bignum::AssignHexString(Vector<const char> value) {
+ Zero();
+ int length = value.length();
+
+ int needed_bigits = length * 4 / kBigitSize + 1;
+ EnsureCapacity(needed_bigits);
+ int string_index = length - 1;
+ for (int i = 0; i < needed_bigits - 1; ++i) {
+ // These bigits are guaranteed to be "full".
+ Chunk current_bigit = 0;
+ for (int j = 0; j < kBigitSize / 4; j++) {
+ current_bigit += HexCharValue(value[string_index--]) << (j * 4);
+ }
+ bigits_[i] = current_bigit;
+ }
+ used_digits_ = needed_bigits - 1;
+
+ Chunk most_significant_bigit = 0; // Could be = 0;
+ for (int j = 0; j <= string_index; ++j) {
+ most_significant_bigit <<= 4;
+ most_significant_bigit += HexCharValue(value[j]);
+ }
+ if (most_significant_bigit != 0) {
+ bigits_[used_digits_] = most_significant_bigit;
+ used_digits_++;
+ }
+ Clamp();
+}
+
+
+void Bignum::AddUInt64(uint64_t operand) {
+ if (operand == 0) return;
+ Bignum other;
+ other.AssignUInt64(operand);
+ AddBignum(other);
+}
+
+
+void Bignum::AddBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+
+ // If this has a greater exponent than other append zero-bigits to this.
+ // After this call exponent_ <= other.exponent_.
+ Align(other);
+
+ // There are two possibilities:
+ // aaaaaaaaaaa 0000 (where the 0s represent a's exponent)
+ // bbbbb 00000000
+ // ----------------
+ // ccccccccccc 0000
+ // or
+ // aaaaaaaaaa 0000
+ // bbbbbbbbb 0000000
+ // -----------------
+ // cccccccccccc 0000
+ // In both cases we might need a carry bigit.
+
+ EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
+ Chunk carry = 0;
+ int bigit_pos = other.exponent_ - exponent_;
+ ASSERT(bigit_pos >= 0);
+ for (int i = 0; i < other.used_digits_; ++i) {
+ Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+
+ while (carry != 0) {
+ Chunk sum = bigits_[bigit_pos] + carry;
+ bigits_[bigit_pos] = sum & kBigitMask;
+ carry = sum >> kBigitSize;
+ bigit_pos++;
+ }
+ used_digits_ = Max(bigit_pos, used_digits_);
+ ASSERT(IsClamped());
+}
+
+
+void Bignum::SubtractBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ // We require this to be bigger than other.
+ ASSERT(LessEqual(other, *this));
+
+ Align(other);
+
+ int offset = other.exponent_ - exponent_;
+ Chunk borrow = 0;
+ int i;
+ for (i = 0; i < other.used_digits_; ++i) {
+ ASSERT((borrow == 0) || (borrow == 1));
+ Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ }
+ while (borrow != 0) {
+ Chunk difference = bigits_[i + offset] - borrow;
+ bigits_[i + offset] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+void Bignum::ShiftLeft(int shift_amount) {
+ if (used_digits_ == 0) return;
+ exponent_ += shift_amount / kBigitSize;
+ int local_shift = shift_amount % kBigitSize;
+ EnsureCapacity(used_digits_ + 1);
+ BigitsShiftLeft(local_shift);
+}
+
+
+void Bignum::MultiplyByUInt32(uint32_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ if (used_digits_ == 0) return;
+
+ // The product of a bigit with the factor is of size kBigitSize + 32.
+ // Assert that this number + 1 (for the carry) fits into double chunk.
+ ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
+ DoubleChunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
+ bigits_[i] = static_cast<Chunk>(product & kBigitMask);
+ carry = (product >> kBigitSize);
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByUInt64(uint64_t factor) {
+ if (factor == 1) return;
+ if (factor == 0) {
+ Zero();
+ return;
+ }
+ ASSERT(kBigitSize < 32);
+ uint64_t carry = 0;
+ uint64_t low = factor & 0xFFFFFFFF;
+ uint64_t high = factor >> 32;
+ for (int i = 0; i < used_digits_; ++i) {
+ uint64_t product_low = low * bigits_[i];
+ uint64_t product_high = high * bigits_[i];
+ uint64_t tmp = (carry & kBigitMask) + product_low;
+ bigits_[i] = tmp & kBigitMask;
+ carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
+ (product_high << (32 - kBigitSize));
+ }
+ while (carry != 0) {
+ EnsureCapacity(used_digits_ + 1);
+ bigits_[used_digits_] = carry & kBigitMask;
+ used_digits_++;
+ carry >>= kBigitSize;
+ }
+}
+
+
+void Bignum::MultiplyByPowerOfTen(int exponent) {
+ const uint64_t kFive27 = UINT64_2PART_C(0x6765c793, fa10079d);
+ const uint16_t kFive1 = 5;
+ const uint16_t kFive2 = kFive1 * 5;
+ const uint16_t kFive3 = kFive2 * 5;
+ const uint16_t kFive4 = kFive3 * 5;
+ const uint16_t kFive5 = kFive4 * 5;
+ const uint16_t kFive6 = kFive5 * 5;
+ const uint32_t kFive7 = kFive6 * 5;
+ const uint32_t kFive8 = kFive7 * 5;
+ const uint32_t kFive9 = kFive8 * 5;
+ const uint32_t kFive10 = kFive9 * 5;
+ const uint32_t kFive11 = kFive10 * 5;
+ const uint32_t kFive12 = kFive11 * 5;
+ const uint32_t kFive13 = kFive12 * 5;
+ const uint32_t kFive1_to_12[] =
+ { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
+ kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
+
+ ASSERT(exponent >= 0);
+ if (exponent == 0) return;
+ if (used_digits_ == 0) return;
+
+ // We shift by exponent at the end just before returning.
+ int remaining_exponent = exponent;
+ while (remaining_exponent >= 27) {
+ MultiplyByUInt64(kFive27);
+ remaining_exponent -= 27;
+ }
+ while (remaining_exponent >= 13) {
+ MultiplyByUInt32(kFive13);
+ remaining_exponent -= 13;
+ }
+ if (remaining_exponent > 0) {
+ MultiplyByUInt32(kFive1_to_12[remaining_exponent - 1]);
+ }
+ ShiftLeft(exponent);
+}
+
+
+void Bignum::Square() {
+ ASSERT(IsClamped());
+ int product_length = 2 * used_digits_;
+ EnsureCapacity(product_length);
+
+ // Comba multiplication: compute each column separately.
+ // Example: r = a2a1a0 * b2b1b0.
+ // r = 1 * a0b0 +
+ // 10 * (a1b0 + a0b1) +
+ // 100 * (a2b0 + a1b1 + a0b2) +
+ // 1000 * (a2b1 + a1b2) +
+ // 10000 * a2b2
+ //
+ // In the worst case we have to accumulate nb-digits products of digit*digit.
+ //
+ // Assert that the additional number of bits in a DoubleChunk are enough to
+ // sum up used_digits of Bigit*Bigit.
+ if ((1 << (2 * (kChunkSize - kBigitSize))) <= used_digits_) {
+ UNIMPLEMENTED();
+ }
+ DoubleChunk accumulator = 0;
+ // First shift the digits so we don't overwrite them.
+ int copy_offset = used_digits_;
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[copy_offset + i] = bigits_[i];
+ }
+ // We have two loops to avoid some 'if's in the loop.
+ for (int i = 0; i < used_digits_; ++i) {
+ // Process temporary digit i with power i.
+ // The sum of the two indices must be equal to i.
+ int bigit_index1 = i;
+ int bigit_index2 = 0;
+ // Sum all of the sub-products.
+ while (bigit_index1 >= 0) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ for (int i = used_digits_; i < product_length; ++i) {
+ int bigit_index1 = used_digits_ - 1;
+ int bigit_index2 = i - bigit_index1;
+ // Invariant: sum of both indices is again equal to i.
+ // Inner loop runs 0 times on last iteration, emptying accumulator.
+ while (bigit_index2 < used_digits_) {
+ Chunk chunk1 = bigits_[copy_offset + bigit_index1];
+ Chunk chunk2 = bigits_[copy_offset + bigit_index2];
+ accumulator += static_cast<DoubleChunk>(chunk1) * chunk2;
+ bigit_index1--;
+ bigit_index2++;
+ }
+ // The overwritten bigits_[i] will never be read in further loop iterations,
+ // because bigit_index1 and bigit_index2 are always greater
+ // than i - used_digits_.
+ bigits_[i] = static_cast<Chunk>(accumulator) & kBigitMask;
+ accumulator >>= kBigitSize;
+ }
+ // Since the result was guaranteed to lie inside the number the
+ // accumulator must be 0 now.
+ ASSERT(accumulator == 0);
+
+ // Don't forget to update the used_digits and the exponent.
+ used_digits_ = product_length;
+ exponent_ *= 2;
+ Clamp();
+}
+
+
+void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
+ ASSERT(base != 0);
+ ASSERT(power_exponent >= 0);
+ if (power_exponent == 0) {
+ AssignUInt16(1);
+ return;
+ }
+ Zero();
+ int shifts = 0;
+ // We expect base to be in range 2-32, and most often to be 10.
+ // It does not make much sense to implement different algorithms for counting
+ // the bits.
+ while ((base & 1) == 0) {
+ base >>= 1;
+ shifts++;
+ }
+ int bit_size = 0;
+ int tmp_base = base;
+ while (tmp_base != 0) {
+ tmp_base >>= 1;
+ bit_size++;
+ }
+ int final_size = bit_size * power_exponent;
+ // 1 extra bigit for the shifting, and one for rounded final_size.
+ EnsureCapacity(final_size / kBigitSize + 2);
+
+ // Left to Right exponentiation.
+ int mask = 1;
+ while (power_exponent >= mask) mask <<= 1;
+
+ // The mask is now pointing to the bit above the most significant 1-bit of
+ // power_exponent.
+ // Get rid of first 1-bit;
+ mask >>= 2;
+ uint64_t this_value = base;
+
+ bool delayed_multipliciation = false;
+ const uint64_t max_32bits = 0xFFFFFFFF;
+ while (mask != 0 && this_value <= max_32bits) {
+ this_value = this_value * this_value;
+ // Verify that there is enough space in this_value to perform the
+ // multiplication. The first bit_size bits must be 0.
+ if ((power_exponent & mask) != 0) {
+ uint64_t base_bits_mask =
+ ~((static_cast<uint64_t>(1) << (64 - bit_size)) - 1);
+ bool high_bits_zero = (this_value & base_bits_mask) == 0;
+ if (high_bits_zero) {
+ this_value *= base;
+ } else {
+ delayed_multipliciation = true;
+ }
+ }
+ mask >>= 1;
+ }
+ AssignUInt64(this_value);
+ if (delayed_multipliciation) {
+ MultiplyByUInt32(base);
+ }
+
+ // Now do the same thing as a bignum.
+ while (mask != 0) {
+ Square();
+ if ((power_exponent & mask) != 0) {
+ MultiplyByUInt32(base);
+ }
+ mask >>= 1;
+ }
+
+ // And finally add the saved shifts.
+ ShiftLeft(shifts * power_exponent);
+}
+
+
+// Precondition: this/other < 16bit.
+uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
+ ASSERT(IsClamped());
+ ASSERT(other.IsClamped());
+ ASSERT(other.used_digits_ > 0);
+
+ // Easy case: if we have less digits than the divisor than the result is 0.
+ // Note: this handles the case where this == 0, too.
+ if (BigitLength() < other.BigitLength()) {
+ return 0;
+ }
+
+ Align(other);
+
+ uint16_t result = 0;
+
+ // Start by removing multiples of 'other' until both numbers have the same
+ // number of digits.
+ while (BigitLength() > other.BigitLength()) {
+ // This naive approach is extremely inefficient if the this divided other
+ // might be big. This function is implemented for doubleToString where
+ // the result should be small (less than 10).
+ ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
+ // Remove the multiples of the first digit.
+ // Example this = 23 and other equals 9. -> Remove 2 multiples.
+ result += bigits_[used_digits_ - 1];
+ SubtractTimes(other, bigits_[used_digits_ - 1]);
+ }
+
+ ASSERT(BigitLength() == other.BigitLength());
+
+ // Both bignums are at the same length now.
+ // Since other has more than 0 digits we know that the access to
+ // bigits_[used_digits_ - 1] is safe.
+ Chunk this_bigit = bigits_[used_digits_ - 1];
+ Chunk other_bigit = other.bigits_[other.used_digits_ - 1];
+
+ if (other.used_digits_ == 1) {
+ // Shortcut for easy (and common) case.
+ int quotient = this_bigit / other_bigit;
+ bigits_[used_digits_ - 1] = this_bigit - other_bigit * quotient;
+ result += quotient;
+ Clamp();
+ return result;
+ }
+
+ int division_estimate = this_bigit / (other_bigit + 1);
+ result += division_estimate;
+ SubtractTimes(other, division_estimate);
+
+ if (other_bigit * (division_estimate + 1) > this_bigit) {
+ // No need to even try to subtract. Even if other's remaining digits were 0
+ // another subtraction would be too much.
+ return result;
+ }
+
+ while (LessEqual(other, *this)) {
+ SubtractBignum(other);
+ result++;
+ }
+ return result;
+}
+
+
+template<typename S>
+static int SizeInHexChars(S number) {
+ ASSERT(number > 0);
+ int result = 0;
+ while (number != 0) {
+ number >>= 4;
+ result++;
+ }
+ return result;
+}
+
+
+static char HexCharOfValue(int value) {
+ ASSERT(0 <= value && value <= 16);
+ if (value < 10) return value + '0';
+ return value - 10 + 'A';
+}
+
+
+bool Bignum::ToHexString(char* buffer, int buffer_size) const {
+ ASSERT(IsClamped());
+ // Each bigit must be printable as separate hex-character.
+ ASSERT(kBigitSize % 4 == 0);
+ const int kHexCharsPerBigit = kBigitSize / 4;
+
+ if (used_digits_ == 0) {
+ if (buffer_size < 2) return false;
+ buffer[0] = '0';
+ buffer[1] = '\0';
+ return true;
+ }
+ // We add 1 for the terminating '\0' character.
+ int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
+ SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
+ if (needed_chars > buffer_size) return false;
+ int string_index = needed_chars - 1;
+ buffer[string_index--] = '\0';
+ for (int i = 0; i < exponent_; ++i) {
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = '0';
+ }
+ }
+ for (int i = 0; i < used_digits_ - 1; ++i) {
+ Chunk current_bigit = bigits_[i];
+ for (int j = 0; j < kHexCharsPerBigit; ++j) {
+ buffer[string_index--] = HexCharOfValue(current_bigit & 0xF);
+ current_bigit >>= 4;
+ }
+ }
+ // And finally the last bigit.
+ Chunk most_significant_bigit = bigits_[used_digits_ - 1];
+ while (most_significant_bigit != 0) {
+ buffer[string_index--] = HexCharOfValue(most_significant_bigit & 0xF);
+ most_significant_bigit >>= 4;
+ }
+ return true;
+}
+
+
+Bignum::Chunk Bignum::BigitAt(int index) const {
+ if (index >= BigitLength()) return 0;
+ if (index < exponent_) return 0;
+ return bigits_[index - exponent_];
+}
+
+
+int Bignum::Compare(const Bignum& a, const Bignum& b) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ int bigit_length_a = a.BigitLength();
+ int bigit_length_b = b.BigitLength();
+ if (bigit_length_a < bigit_length_b) return -1;
+ if (bigit_length_a > bigit_length_b) return +1;
+ for (int i = bigit_length_a - 1; i >= Min(a.exponent_, b.exponent_); --i) {
+ Chunk bigit_a = a.BigitAt(i);
+ Chunk bigit_b = b.BigitAt(i);
+ if (bigit_a < bigit_b) return -1;
+ if (bigit_a > bigit_b) return +1;
+ // Otherwise they are equal up to this digit. Try the next digit.
+ }
+ return 0;
+}
+
+
+int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
+ ASSERT(a.IsClamped());
+ ASSERT(b.IsClamped());
+ ASSERT(c.IsClamped());
+ if (a.BigitLength() < b.BigitLength()) {
+ return PlusCompare(b, a, c);
+ }
+ if (a.BigitLength() + 1 < c.BigitLength()) return -1;
+ if (a.BigitLength() > c.BigitLength()) return +1;
+ // The exponent encodes 0-bigits. So if there are more 0-digits in 'a' than
+ // 'b' has digits, then the bigit-length of 'a'+'b' must be equal to the one
+ // of 'a'.
+ if (a.exponent_ >= b.BigitLength() && a.BigitLength() < c.BigitLength()) {
+ return -1;
+ }
+
+ Chunk borrow = 0;
+ // Starting at min_exponent all digits are == 0. So no need to compare them.
+ int min_exponent = Min(Min(a.exponent_, b.exponent_), c.exponent_);
+ for (int i = c.BigitLength() - 1; i >= min_exponent; --i) {
+ Chunk chunk_a = a.BigitAt(i);
+ Chunk chunk_b = b.BigitAt(i);
+ Chunk chunk_c = c.BigitAt(i);
+ Chunk sum = chunk_a + chunk_b;
+ if (sum > chunk_c + borrow) {
+ return +1;
+ } else {
+ borrow = chunk_c + borrow - sum;
+ if (borrow > 1) return -1;
+ borrow <<= kBigitSize;
+ }
+ }
+ if (borrow == 0) return 0;
+ return -1;
+}
+
+
+void Bignum::Clamp() {
+ while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
+ used_digits_--;
+ }
+ if (used_digits_ == 0) {
+ // Zero.
+ exponent_ = 0;
+ }
+}
+
+
+bool Bignum::IsClamped() const {
+ return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
+}
+
+
+void Bignum::Zero() {
+ for (int i = 0; i < used_digits_; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ = 0;
+ exponent_ = 0;
+}
+
+
+void Bignum::Align(const Bignum& other) {
+ if (exponent_ > other.exponent_) {
+ // If "X" represents a "hidden" digit (by the exponent) then we are in the
+ // following case (a == this, b == other):
+ // a: aaaaaaXXXX or a: aaaaaXXX
+ // b: bbbbbbX b: bbbbbbbbXX
+ // We replace some of the hidden digits (X) of a with 0 digits.
+ // a: aaaaaa000X or a: aaaaa0XX
+ int zero_digits = exponent_ - other.exponent_;
+ EnsureCapacity(used_digits_ + zero_digits);
+ for (int i = used_digits_ - 1; i >= 0; --i) {
+ bigits_[i + zero_digits] = bigits_[i];
+ }
+ for (int i = 0; i < zero_digits; ++i) {
+ bigits_[i] = 0;
+ }
+ used_digits_ += zero_digits;
+ exponent_ -= zero_digits;
+ ASSERT(used_digits_ >= 0);
+ ASSERT(exponent_ >= 0);
+ }
+}
+
+
+void Bignum::BigitsShiftLeft(int shift_amount) {
+ ASSERT(shift_amount < kBigitSize);
+ ASSERT(shift_amount >= 0);
+ Chunk carry = 0;
+ for (int i = 0; i < used_digits_; ++i) {
+ Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
+ bigits_[i] = ((bigits_[i] << shift_amount) + carry) & kBigitMask;
+ carry = new_carry;
+ }
+ if (carry != 0) {
+ bigits_[used_digits_] = carry;
+ used_digits_++;
+ }
+}
+
+
+void Bignum::SubtractTimes(const Bignum& other, int factor) {
+ ASSERT(exponent_ <= other.exponent_);
+ if (factor < 3) {
+ for (int i = 0; i < factor; ++i) {
+ SubtractBignum(other);
+ }
+ return;
+ }
+ Chunk borrow = 0;
+ int exponent_diff = other.exponent_ - exponent_;
+ for (int i = 0; i < other.used_digits_; ++i) {
+ DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
+ DoubleChunk remove = borrow + product;
+ Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
+ bigits_[i + exponent_diff] = difference & kBigitMask;
+ borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
+ (remove >> kBigitSize));
+ }
+ for (int i = other.used_digits_ + exponent_diff; i < used_digits_; ++i) {
+ if (borrow == 0) return;
+ Chunk difference = bigits_[i] - borrow;
+ bigits_[i] = difference & kBigitMask;
+ borrow = difference >> (kChunkSize - 1);
+ ++i;
+ }
+ Clamp();
+}
+
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/bignum.h b/src/3rdparty/double-conversion/bignum.h
new file mode 100644
index 0000000000..5ec3544f57
--- /dev/null
+++ b/src/3rdparty/double-conversion/bignum.h
@@ -0,0 +1,145 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_BIGNUM_H_
+#define DOUBLE_CONVERSION_BIGNUM_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+class Bignum {
+ public:
+ // 3584 = 128 * 28. We can represent 2^3584 > 10^1000 accurately.
+ // This bignum can encode much bigger numbers, since it contains an
+ // exponent.
+ static const int kMaxSignificantBits = 3584;
+
+ Bignum();
+ void AssignUInt16(uint16_t value);
+ void AssignUInt64(uint64_t value);
+ void AssignBignum(const Bignum& other);
+
+ void AssignDecimalString(Vector<const char> value);
+ void AssignHexString(Vector<const char> value);
+
+ void AssignPowerUInt16(uint16_t base, int exponent);
+
+ void AddUInt16(uint16_t operand);
+ void AddUInt64(uint64_t operand);
+ void AddBignum(const Bignum& other);
+ // Precondition: this >= other.
+ void SubtractBignum(const Bignum& other);
+
+ void Square();
+ void ShiftLeft(int shift_amount);
+ void MultiplyByUInt32(uint32_t factor);
+ void MultiplyByUInt64(uint64_t factor);
+ void MultiplyByPowerOfTen(int exponent);
+ void Times10() { return MultiplyByUInt32(10); }
+ // Pseudocode:
+ // int result = this / other;
+ // this = this % other;
+ // In the worst case this function is in O(this/other).
+ uint16_t DivideModuloIntBignum(const Bignum& other);
+
+ bool ToHexString(char* buffer, int buffer_size) const;
+
+ // Returns
+ // -1 if a < b,
+ // 0 if a == b, and
+ // +1 if a > b.
+ static int Compare(const Bignum& a, const Bignum& b);
+ static bool Equal(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) == 0;
+ }
+ static bool LessEqual(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) <= 0;
+ }
+ static bool Less(const Bignum& a, const Bignum& b) {
+ return Compare(a, b) < 0;
+ }
+ // Returns Compare(a + b, c);
+ static int PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c);
+ // Returns a + b == c
+ static bool PlusEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) == 0;
+ }
+ // Returns a + b <= c
+ static bool PlusLessEqual(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) <= 0;
+ }
+ // Returns a + b < c
+ static bool PlusLess(const Bignum& a, const Bignum& b, const Bignum& c) {
+ return PlusCompare(a, b, c) < 0;
+ }
+ private:
+ typedef uint32_t Chunk;
+ typedef uint64_t DoubleChunk;
+
+ static const int kChunkSize = sizeof(Chunk) * 8;
+ static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
+ // With bigit size of 28 we loose some bits, but a double still fits easily
+ // into two chunks, and more importantly we can use the Comba multiplication.
+ static const int kBigitSize = 28;
+ static const Chunk kBigitMask = (1 << kBigitSize) - 1;
+ // Every instance allocates kBigitLength chunks on the stack. Bignums cannot
+ // grow. There are no checks if the stack-allocated space is sufficient.
+ static const int kBigitCapacity = kMaxSignificantBits / kBigitSize;
+
+ void EnsureCapacity(int size) {
+ if (size > kBigitCapacity) {
+ UNREACHABLE();
+ }
+ }
+ void Align(const Bignum& other);
+ void Clamp();
+ bool IsClamped() const;
+ void Zero();
+ // Requires this to have enough capacity (no tests done).
+ // Updates used_digits_ if necessary.
+ // shift_amount must be < kBigitSize.
+ void BigitsShiftLeft(int shift_amount);
+ // BigitLength includes the "hidden" digits encoded in the exponent.
+ int BigitLength() const { return used_digits_ + exponent_; }
+ Chunk BigitAt(int index) const;
+ void SubtractTimes(const Bignum& other, int factor);
+
+ Chunk bigits_buffer_[kBigitCapacity];
+ // A vector backed by bigits_buffer_. This way accesses to the array are
+ // checked for out-of-bounds errors.
+ Vector<Chunk> bigits_;
+ int used_digits_;
+ // The Bignum's value equals value(bigits_) * 2^(exponent_ * kBigitSize).
+ int exponent_;
+
+ DISALLOW_COPY_AND_ASSIGN(Bignum);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_BIGNUM_H_
diff --git a/src/3rdparty/double-conversion/cached-powers.cc b/src/3rdparty/double-conversion/cached-powers.cc
new file mode 100644
index 0000000000..c676429194
--- /dev/null
+++ b/src/3rdparty/double-conversion/cached-powers.cc
@@ -0,0 +1,175 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+#include <math.h>
+
+#include "utils.h"
+
+#include "cached-powers.h"
+
+namespace double_conversion {
+
+struct CachedPower {
+ uint64_t significand;
+ int16_t binary_exponent;
+ int16_t decimal_exponent;
+};
+
+static const CachedPower kCachedPowers[] = {
+ {UINT64_2PART_C(0xfa8fd5a0, 081c0288), -1220, -348},
+ {UINT64_2PART_C(0xbaaee17f, a23ebf76), -1193, -340},
+ {UINT64_2PART_C(0x8b16fb20, 3055ac76), -1166, -332},
+ {UINT64_2PART_C(0xcf42894a, 5dce35ea), -1140, -324},
+ {UINT64_2PART_C(0x9a6bb0aa, 55653b2d), -1113, -316},
+ {UINT64_2PART_C(0xe61acf03, 3d1a45df), -1087, -308},
+ {UINT64_2PART_C(0xab70fe17, c79ac6ca), -1060, -300},
+ {UINT64_2PART_C(0xff77b1fc, bebcdc4f), -1034, -292},
+ {UINT64_2PART_C(0xbe5691ef, 416bd60c), -1007, -284},
+ {UINT64_2PART_C(0x8dd01fad, 907ffc3c), -980, -276},
+ {UINT64_2PART_C(0xd3515c28, 31559a83), -954, -268},
+ {UINT64_2PART_C(0x9d71ac8f, ada6c9b5), -927, -260},
+ {UINT64_2PART_C(0xea9c2277, 23ee8bcb), -901, -252},
+ {UINT64_2PART_C(0xaecc4991, 4078536d), -874, -244},
+ {UINT64_2PART_C(0x823c1279, 5db6ce57), -847, -236},
+ {UINT64_2PART_C(0xc2109436, 4dfb5637), -821, -228},
+ {UINT64_2PART_C(0x9096ea6f, 3848984f), -794, -220},
+ {UINT64_2PART_C(0xd77485cb, 25823ac7), -768, -212},
+ {UINT64_2PART_C(0xa086cfcd, 97bf97f4), -741, -204},
+ {UINT64_2PART_C(0xef340a98, 172aace5), -715, -196},
+ {UINT64_2PART_C(0xb23867fb, 2a35b28e), -688, -188},
+ {UINT64_2PART_C(0x84c8d4df, d2c63f3b), -661, -180},
+ {UINT64_2PART_C(0xc5dd4427, 1ad3cdba), -635, -172},
+ {UINT64_2PART_C(0x936b9fce, bb25c996), -608, -164},
+ {UINT64_2PART_C(0xdbac6c24, 7d62a584), -582, -156},
+ {UINT64_2PART_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+ {UINT64_2PART_C(0xf3e2f893, dec3f126), -529, -140},
+ {UINT64_2PART_C(0xb5b5ada8, aaff80b8), -502, -132},
+ {UINT64_2PART_C(0x87625f05, 6c7c4a8b), -475, -124},
+ {UINT64_2PART_C(0xc9bcff60, 34c13053), -449, -116},
+ {UINT64_2PART_C(0x964e858c, 91ba2655), -422, -108},
+ {UINT64_2PART_C(0xdff97724, 70297ebd), -396, -100},
+ {UINT64_2PART_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+ {UINT64_2PART_C(0xf8a95fcf, 88747d94), -343, -84},
+ {UINT64_2PART_C(0xb9447093, 8fa89bcf), -316, -76},
+ {UINT64_2PART_C(0x8a08f0f8, bf0f156b), -289, -68},
+ {UINT64_2PART_C(0xcdb02555, 653131b6), -263, -60},
+ {UINT64_2PART_C(0x993fe2c6, d07b7fac), -236, -52},
+ {UINT64_2PART_C(0xe45c10c4, 2a2b3b06), -210, -44},
+ {UINT64_2PART_C(0xaa242499, 697392d3), -183, -36},
+ {UINT64_2PART_C(0xfd87b5f2, 8300ca0e), -157, -28},
+ {UINT64_2PART_C(0xbce50864, 92111aeb), -130, -20},
+ {UINT64_2PART_C(0x8cbccc09, 6f5088cc), -103, -12},
+ {UINT64_2PART_C(0xd1b71758, e219652c), -77, -4},
+ {UINT64_2PART_C(0x9c400000, 00000000), -50, 4},
+ {UINT64_2PART_C(0xe8d4a510, 00000000), -24, 12},
+ {UINT64_2PART_C(0xad78ebc5, ac620000), 3, 20},
+ {UINT64_2PART_C(0x813f3978, f8940984), 30, 28},
+ {UINT64_2PART_C(0xc097ce7b, c90715b3), 56, 36},
+ {UINT64_2PART_C(0x8f7e32ce, 7bea5c70), 83, 44},
+ {UINT64_2PART_C(0xd5d238a4, abe98068), 109, 52},
+ {UINT64_2PART_C(0x9f4f2726, 179a2245), 136, 60},
+ {UINT64_2PART_C(0xed63a231, d4c4fb27), 162, 68},
+ {UINT64_2PART_C(0xb0de6538, 8cc8ada8), 189, 76},
+ {UINT64_2PART_C(0x83c7088e, 1aab65db), 216, 84},
+ {UINT64_2PART_C(0xc45d1df9, 42711d9a), 242, 92},
+ {UINT64_2PART_C(0x924d692c, a61be758), 269, 100},
+ {UINT64_2PART_C(0xda01ee64, 1a708dea), 295, 108},
+ {UINT64_2PART_C(0xa26da399, 9aef774a), 322, 116},
+ {UINT64_2PART_C(0xf209787b, b47d6b85), 348, 124},
+ {UINT64_2PART_C(0xb454e4a1, 79dd1877), 375, 132},
+ {UINT64_2PART_C(0x865b8692, 5b9bc5c2), 402, 140},
+ {UINT64_2PART_C(0xc83553c5, c8965d3d), 428, 148},
+ {UINT64_2PART_C(0x952ab45c, fa97a0b3), 455, 156},
+ {UINT64_2PART_C(0xde469fbd, 99a05fe3), 481, 164},
+ {UINT64_2PART_C(0xa59bc234, db398c25), 508, 172},
+ {UINT64_2PART_C(0xf6c69a72, a3989f5c), 534, 180},
+ {UINT64_2PART_C(0xb7dcbf53, 54e9bece), 561, 188},
+ {UINT64_2PART_C(0x88fcf317, f22241e2), 588, 196},
+ {UINT64_2PART_C(0xcc20ce9b, d35c78a5), 614, 204},
+ {UINT64_2PART_C(0x98165af3, 7b2153df), 641, 212},
+ {UINT64_2PART_C(0xe2a0b5dc, 971f303a), 667, 220},
+ {UINT64_2PART_C(0xa8d9d153, 5ce3b396), 694, 228},
+ {UINT64_2PART_C(0xfb9b7cd9, a4a7443c), 720, 236},
+ {UINT64_2PART_C(0xbb764c4c, a7a44410), 747, 244},
+ {UINT64_2PART_C(0x8bab8eef, b6409c1a), 774, 252},
+ {UINT64_2PART_C(0xd01fef10, a657842c), 800, 260},
+ {UINT64_2PART_C(0x9b10a4e5, e9913129), 827, 268},
+ {UINT64_2PART_C(0xe7109bfb, a19c0c9d), 853, 276},
+ {UINT64_2PART_C(0xac2820d9, 623bf429), 880, 284},
+ {UINT64_2PART_C(0x80444b5e, 7aa7cf85), 907, 292},
+ {UINT64_2PART_C(0xbf21e440, 03acdd2d), 933, 300},
+ {UINT64_2PART_C(0x8e679c2f, 5e44ff8f), 960, 308},
+ {UINT64_2PART_C(0xd433179d, 9c8cb841), 986, 316},
+ {UINT64_2PART_C(0x9e19db92, b4e31ba9), 1013, 324},
+ {UINT64_2PART_C(0xeb96bf6e, badf77d9), 1039, 332},
+ {UINT64_2PART_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+};
+
+static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
+static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
+// Difference between the decimal exponents in the table above.
+const int PowersOfTenCache::kDecimalExponentDistance = 8;
+const int PowersOfTenCache::kMinDecimalExponent = -348;
+const int PowersOfTenCache::kMaxDecimalExponent = 340;
+
+void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent) {
+ int kQ = DiyFp::kSignificandSize;
+ double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ int foo = kCachedPowersOffset;
+ int index =
+ (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
+ ASSERT(0 <= index && index < kCachedPowersLength);
+ CachedPower cached_power = kCachedPowers[index];
+ ASSERT(min_exponent <= cached_power.binary_exponent);
+ ASSERT(cached_power.binary_exponent <= max_exponent);
+ *decimal_exponent = cached_power.decimal_exponent;
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+}
+
+
+void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent) {
+ ASSERT(kMinDecimalExponent <= requested_exponent);
+ ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
+ int index =
+ (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
+ CachedPower cached_power = kCachedPowers[index];
+ *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
+ *found_exponent = cached_power.decimal_exponent;
+ ASSERT(*found_exponent <= requested_exponent);
+ ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/cached-powers.h b/src/3rdparty/double-conversion/cached-powers.h
new file mode 100644
index 0000000000..61a50614cf
--- /dev/null
+++ b/src/3rdparty/double-conversion/cached-powers.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_CACHED_POWERS_H_
+#define DOUBLE_CONVERSION_CACHED_POWERS_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+class PowersOfTenCache {
+ public:
+
+ // Not all powers of ten are cached. The decimal exponent of two neighboring
+ // cached numbers will differ by kDecimalExponentDistance.
+ static const int kDecimalExponentDistance;
+
+ static const int kMinDecimalExponent;
+ static const int kMaxDecimalExponent;
+
+ // Returns a cached power-of-ten with a binary exponent in the range
+ // [min_exponent; max_exponent] (boundaries included).
+ static void GetCachedPowerForBinaryExponentRange(int min_exponent,
+ int max_exponent,
+ DiyFp* power,
+ int* decimal_exponent);
+
+ // Returns a cached power of ten x ~= 10^k such that
+ // k <= decimal_exponent < k + kCachedPowersDecimalDistance.
+ // The given decimal_exponent must satisfy
+ // kMinDecimalExponent <= requested_exponent, and
+ // requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance.
+ static void GetCachedPowerForDecimalExponent(int requested_exponent,
+ DiyFp* power,
+ int* found_exponent);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_
diff --git a/src/3rdparty/double-conversion/diy-fp.cc b/src/3rdparty/double-conversion/diy-fp.cc
new file mode 100644
index 0000000000..ddd1891b16
--- /dev/null
+++ b/src/3rdparty/double-conversion/diy-fp.cc
@@ -0,0 +1,57 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "diy-fp.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+void DiyFp::Multiply(const DiyFp& other) {
+ // Simply "emulates" a 128 bit multiplication.
+ // However: the resulting number only contains 64 bits. The least
+ // significant 64 bits are only used for rounding the most significant 64
+ // bits.
+ const uint64_t kM32 = 0xFFFFFFFFU;
+ uint64_t a = f_ >> 32;
+ uint64_t b = f_ & kM32;
+ uint64_t c = other.f_ >> 32;
+ uint64_t d = other.f_ & kM32;
+ uint64_t ac = a * c;
+ uint64_t bc = b * c;
+ uint64_t ad = a * d;
+ uint64_t bd = b * d;
+ uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
+ // By adding 1U << 31 to tmp we round the final result.
+ // Halfway cases will be round up.
+ tmp += 1U << 31;
+ uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ e_ += other.e_ + 64;
+ f_ = result_f;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/diy-fp.h b/src/3rdparty/double-conversion/diy-fp.h
new file mode 100644
index 0000000000..9dcf8fbdba
--- /dev/null
+++ b/src/3rdparty/double-conversion/diy-fp.h
@@ -0,0 +1,118 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DIY_FP_H_
+#define DOUBLE_CONVERSION_DIY_FP_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// This "Do It Yourself Floating Point" class implements a floating-point number
+// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
+// have the most significant bit of the significand set.
+// Multiplication and Subtraction do not normalize their results.
+// DiyFp are not designed to contain special doubles (NaN and Infinity).
+class DiyFp {
+ public:
+ static const int kSignificandSize = 64;
+
+ DiyFp() : f_(0), e_(0) {}
+ DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
+
+ // this = this - other.
+ // The exponents of both numbers must be the same and the significand of this
+ // must be bigger than the significand of other.
+ // The result will not be normalized.
+ void Subtract(const DiyFp& other) {
+ ASSERT(e_ == other.e_);
+ ASSERT(f_ >= other.f_);
+ f_ -= other.f_;
+ }
+
+ // Returns a - b.
+ // The exponents of both numbers must be the same and this must be bigger
+ // than other. The result will not be normalized.
+ static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Subtract(b);
+ return result;
+ }
+
+
+ // this = this * other.
+ void Multiply(const DiyFp& other);
+
+ // returns a * b;
+ static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Multiply(b);
+ return result;
+ }
+
+ void Normalize() {
+ ASSERT(f_ != 0);
+ uint64_t f = f_;
+ int e = e_;
+
+ // This method is mainly called for normalizing boundaries. In general
+ // boundaries need to be shifted by 10 bits. We thus optimize for this case.
+ const uint64_t k10MSBits = UINT64_2PART_C(0xFFC00000, 00000000);
+ while ((f & k10MSBits) == 0) {
+ f <<= 10;
+ e -= 10;
+ }
+ while ((f & kUint64MSB) == 0) {
+ f <<= 1;
+ e--;
+ }
+ f_ = f;
+ e_ = e;
+ }
+
+ static DiyFp Normalize(const DiyFp& a) {
+ DiyFp result = a;
+ result.Normalize();
+ return result;
+ }
+
+ uint64_t f() const { return f_; }
+ int e() const { return e_; }
+
+ void set_f(uint64_t new_value) { f_ = new_value; }
+ void set_e(int new_value) { e_ = new_value; }
+
+ private:
+ static const uint64_t kUint64MSB = UINT64_2PART_C(0x80000000, 00000000);
+
+ uint64_t f_;
+ int e_;
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DIY_FP_H_
diff --git a/src/3rdparty/double-conversion/double-conversion.cc b/src/3rdparty/double-conversion/double-conversion.cc
new file mode 100644
index 0000000000..a79fe92d22
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.cc
@@ -0,0 +1,889 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <limits.h>
+#include <math.h>
+
+#include "double-conversion.h"
+
+#include "bignum-dtoa.h"
+#include "fast-dtoa.h"
+#include "fixed-dtoa.h"
+#include "ieee.h"
+#include "strtod.h"
+#include "utils.h"
+
+namespace double_conversion {
+
+const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() {
+ int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN;
+ static DoubleToStringConverter converter(flags,
+ "Infinity",
+ "NaN",
+ 'e',
+ -6, 21,
+ 6, 0);
+ return converter;
+}
+
+
+bool DoubleToStringConverter::HandleSpecialValues(
+ double value,
+ StringBuilder* result_builder) const {
+ Double double_inspect(value);
+ if (double_inspect.IsInfinite()) {
+ if (infinity_symbol_ == NULL) return false;
+ if (value < 0) {
+ result_builder->AddCharacter('-');
+ }
+ result_builder->AddString(infinity_symbol_);
+ return true;
+ }
+ if (double_inspect.IsNan()) {
+ if (nan_symbol_ == NULL) return false;
+ result_builder->AddString(nan_symbol_);
+ return true;
+ }
+ return false;
+}
+
+
+void DoubleToStringConverter::CreateExponentialRepresentation(
+ const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const {
+ ASSERT(length != 0);
+ result_builder->AddCharacter(decimal_digits[0]);
+ if (length != 1) {
+ result_builder->AddCharacter('.');
+ result_builder->AddSubstring(&decimal_digits[1], length-1);
+ }
+ result_builder->AddCharacter(exponent_character_);
+ if (exponent < 0) {
+ result_builder->AddCharacter('-');
+ exponent = -exponent;
+ } else {
+ if ((flags_ & EMIT_POSITIVE_EXPONENT_SIGN) != 0) {
+ result_builder->AddCharacter('+');
+ }
+ }
+ if (exponent == 0) {
+ result_builder->AddCharacter('0');
+ return;
+ }
+ ASSERT(exponent < 1e4);
+ const int kMaxExponentLength = 5;
+ char buffer[kMaxExponentLength + 1];
+ buffer[kMaxExponentLength] = '\0';
+ int first_char_pos = kMaxExponentLength;
+ while (exponent > 0) {
+ buffer[--first_char_pos] = '0' + (exponent % 10);
+ exponent /= 10;
+ }
+ result_builder->AddSubstring(&buffer[first_char_pos],
+ kMaxExponentLength - first_char_pos);
+}
+
+
+void DoubleToStringConverter::CreateDecimalRepresentation(
+ const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const {
+ // Create a representation that is padded with zeros if needed.
+ if (decimal_point <= 0) {
+ // "0.00000decimal_rep".
+ result_builder->AddCharacter('0');
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', -decimal_point);
+ ASSERT(length <= digits_after_point - (-decimal_point));
+ result_builder->AddSubstring(decimal_digits, length);
+ int remaining_digits = digits_after_point - (-decimal_point) - length;
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ } else if (decimal_point >= length) {
+ // "decimal_rep0000.00000" or "decimal_rep.0000"
+ result_builder->AddSubstring(decimal_digits, length);
+ result_builder->AddPadding('0', decimal_point - length);
+ if (digits_after_point > 0) {
+ result_builder->AddCharacter('.');
+ result_builder->AddPadding('0', digits_after_point);
+ }
+ } else {
+ // "decima.l_rep000"
+ ASSERT(digits_after_point > 0);
+ result_builder->AddSubstring(decimal_digits, decimal_point);
+ result_builder->AddCharacter('.');
+ ASSERT(length - decimal_point <= digits_after_point);
+ result_builder->AddSubstring(&decimal_digits[decimal_point],
+ length - decimal_point);
+ int remaining_digits = digits_after_point - (length - decimal_point);
+ result_builder->AddPadding('0', remaining_digits);
+ }
+ if (digits_after_point == 0) {
+ if ((flags_ & EMIT_TRAILING_DECIMAL_POINT) != 0) {
+ result_builder->AddCharacter('.');
+ }
+ if ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) {
+ result_builder->AddCharacter('0');
+ }
+ }
+}
+
+
+bool DoubleToStringConverter::ToShortestIeeeNumber(
+ double value,
+ StringBuilder* result_builder,
+ DoubleToStringConverter::DtoaMode mode) const {
+ ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE);
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ int decimal_point;
+ bool sign;
+ const int kDecimalRepCapacity = kBase10MaximalLength + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, mode, 0, decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = (flags_ & UNIQUE_ZERO) != 0;
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ if ((decimal_in_shortest_low_ <= exponent) &&
+ (exponent < decimal_in_shortest_high_)) {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length,
+ decimal_point,
+ Max(0, decimal_rep_length - decimal_point),
+ result_builder);
+ } else {
+ CreateExponentialRepresentation(decimal_rep, decimal_rep_length, exponent,
+ result_builder);
+ }
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ ASSERT(kMaxFixedDigitsBeforePoint == 60);
+ const double kFirstNonFixed = 1e60;
+
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits > kMaxFixedDigitsAfterPoint) return false;
+ if (value >= kFirstNonFixed || value <= -kFirstNonFixed) return false;
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add space for the '\0' byte.
+ const int kDecimalRepCapacity =
+ kMaxFixedDigitsBeforePoint + kMaxFixedDigitsAfterPoint + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+ DoubleToAscii(value, FIXED, requested_digits,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ requested_digits, result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToExponential(
+ double value,
+ int requested_digits,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (requested_digits < -1) return false;
+ if (requested_digits > kMaxExponentialDigits) return false;
+
+ int decimal_point;
+ bool sign;
+ // Add space for digit before the decimal point and the '\0' character.
+ const int kDecimalRepCapacity = kMaxExponentialDigits + 2;
+ ASSERT(kDecimalRepCapacity > kBase10MaximalLength);
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ if (requested_digits == -1) {
+ DoubleToAscii(value, SHORTEST, 0,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ } else {
+ DoubleToAscii(value, PRECISION, requested_digits + 1,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ ASSERT(decimal_rep_length <= requested_digits + 1);
+
+ for (int i = decimal_rep_length; i < requested_digits + 1; ++i) {
+ decimal_rep[i] = '0';
+ }
+ decimal_rep_length = requested_digits + 1;
+ }
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ int exponent = decimal_point - 1;
+ CreateExponentialRepresentation(decimal_rep,
+ decimal_rep_length,
+ exponent,
+ result_builder);
+ return true;
+}
+
+
+bool DoubleToStringConverter::ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const {
+ if (Double(value).IsSpecial()) {
+ return HandleSpecialValues(value, result_builder);
+ }
+
+ if (precision < kMinPrecisionDigits || precision > kMaxPrecisionDigits) {
+ return false;
+ }
+
+ // Find a sufficiently precise decimal representation of n.
+ int decimal_point;
+ bool sign;
+ // Add one for the terminating null character.
+ const int kDecimalRepCapacity = kMaxPrecisionDigits + 1;
+ char decimal_rep[kDecimalRepCapacity];
+ int decimal_rep_length;
+
+ DoubleToAscii(value, PRECISION, precision,
+ decimal_rep, kDecimalRepCapacity,
+ &sign, &decimal_rep_length, &decimal_point);
+ ASSERT(decimal_rep_length <= precision);
+
+ bool unique_zero = ((flags_ & UNIQUE_ZERO) != 0);
+ if (sign && (value != 0.0 || !unique_zero)) {
+ result_builder->AddCharacter('-');
+ }
+
+ // The exponent if we print the number as x.xxeyyy. That is with the
+ // decimal point after the first digit.
+ int exponent = decimal_point - 1;
+
+ int extra_zero = ((flags_ & EMIT_TRAILING_ZERO_AFTER_POINT) != 0) ? 1 : 0;
+ if ((-decimal_point + 1 > max_leading_padding_zeroes_in_precision_mode_) ||
+ (decimal_point - precision + extra_zero >
+ max_trailing_padding_zeroes_in_precision_mode_)) {
+ // Fill buffer to contain 'precision' digits.
+ // Usually the buffer is already at the correct length, but 'DoubleToAscii'
+ // is allowed to return less characters.
+ for (int i = decimal_rep_length; i < precision; ++i) {
+ decimal_rep[i] = '0';
+ }
+
+ CreateExponentialRepresentation(decimal_rep,
+ precision,
+ exponent,
+ result_builder);
+ } else {
+ CreateDecimalRepresentation(decimal_rep, decimal_rep_length, decimal_point,
+ Max(0, precision - decimal_point),
+ result_builder);
+ }
+ return true;
+}
+
+
+static BignumDtoaMode DtoaToBignumDtoaMode(
+ DoubleToStringConverter::DtoaMode dtoa_mode) {
+ switch (dtoa_mode) {
+ case DoubleToStringConverter::SHORTEST: return BIGNUM_DTOA_SHORTEST;
+ case DoubleToStringConverter::SHORTEST_SINGLE:
+ return BIGNUM_DTOA_SHORTEST_SINGLE;
+ case DoubleToStringConverter::FIXED: return BIGNUM_DTOA_FIXED;
+ case DoubleToStringConverter::PRECISION: return BIGNUM_DTOA_PRECISION;
+ default:
+ UNREACHABLE();
+ return BIGNUM_DTOA_SHORTEST; // To silence compiler.
+ }
+}
+
+
+void DoubleToStringConverter::DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point) {
+ Vector<char> vector(buffer, buffer_length);
+ ASSERT(!Double(v).IsSpecial());
+ ASSERT(mode == SHORTEST || mode == SHORTEST_SINGLE || requested_digits >= 0);
+
+ if (Double(v).Sign() < 0) {
+ *sign = true;
+ v = -v;
+ } else {
+ *sign = false;
+ }
+
+ if (mode == PRECISION && requested_digits == 0) {
+ vector[0] = '\0';
+ *length = 0;
+ return;
+ }
+
+ if (v == 0) {
+ vector[0] = '0';
+ vector[1] = '\0';
+ *length = 1;
+ *point = 1;
+ return;
+ }
+
+ bool fast_worked;
+ switch (mode) {
+ case SHORTEST:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST, 0, vector, length, point);
+ break;
+ case SHORTEST_SINGLE:
+ fast_worked = FastDtoa(v, FAST_DTOA_SHORTEST_SINGLE, 0,
+ vector, length, point);
+ break;
+ case FIXED:
+ fast_worked = FastFixedDtoa(v, requested_digits, vector, length, point);
+ break;
+ case PRECISION:
+ fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
+ vector, length, point);
+ break;
+ default:
+ UNREACHABLE();
+ fast_worked = false;
+ }
+ if (fast_worked) return;
+
+ // If the fast dtoa didn't succeed use the slower bignum version.
+ BignumDtoaMode bignum_mode = DtoaToBignumDtoaMode(mode);
+ BignumDtoa(v, bignum_mode, requested_digits, vector, length, point);
+ vector[*length] = '\0';
+}
+
+
+// Consumes the given substring from the iterator.
+// Returns false, if the substring does not match.
+static bool ConsumeSubString(const char** current,
+ const char* end,
+ const char* substring) {
+ ASSERT(**current == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || **current != *substring) return false;
+ }
+ ++*current;
+ return true;
+}
+
+
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
+
+
+// Returns true if a nonspace found and false if the end has reached.
+static inline bool AdvanceToNonspace(const char** current, const char* end) {
+ while (*current != end) {
+ if (**current != ' ') return true;
+ ++*current;
+ }
+ return false;
+}
+
+
+static bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix)
+ || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+ || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool sign) {
+ return sign ? -0.0 : 0.0;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2>
+static double RadixStringToIeee(const char* current,
+ const char* end,
+ bool sign,
+ bool allow_trailing_junk,
+ double junk_string_value,
+ bool read_as_double,
+ const char** trailing_pointer) {
+ ASSERT(current != end);
+
+ const int kDoubleSize = Double::kSignificandSize;
+ const int kSingleSize = Single::kSignificandSize;
+ const int kSignificandSize = read_as_double? kDoubleSize: kSingleSize;
+
+ // Skip leading 0s.
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *trailing_pointer = end;
+ return SignedZero(sign);
+ }
+ }
+
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+
+ do {
+ int digit;
+ if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+ digit = static_cast<char>(*current) - '0';
+ } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+ digit = static_cast<char>(*current) - 'a' + 10;
+ } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+ digit = static_cast<char>(*current) - 'A' + 10;
+ } else {
+ if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
+ break;
+ } else {
+ return junk_string_value;
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> kSignificandSize);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent = overflow_bits_count;
+
+ bool zero_tail = true;
+ while (true) {
+ ++current;
+ if (current == end || !isDigit(*current, radix)) break;
+ zero_tail = zero_tail && *current == '0';
+ exponent += radix_log_2;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value;
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & ((int64_t)1 << kSignificandSize)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
+ }
+ ++current;
+ } while (current != end);
+
+ ASSERT(number < ((int64_t)1 << kSignificandSize));
+ ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ *trailing_pointer = current;
+
+ if (exponent == 0) {
+ if (sign) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
+ }
+
+ ASSERT(number != 0);
+ return Double(DiyFp(number, exponent)).value();
+}
+
+
+double StringToDoubleConverter::StringToIeee(
+ const char* input,
+ int length,
+ int* processed_characters_count,
+ bool read_as_double) {
+ const char* current = input;
+ const char* end = input + length;
+
+ *processed_characters_count = 0;
+
+ const bool allow_trailing_junk = (flags_ & ALLOW_TRAILING_JUNK) != 0;
+ const bool allow_leading_spaces = (flags_ & ALLOW_LEADING_SPACES) != 0;
+ const bool allow_trailing_spaces = (flags_ & ALLOW_TRAILING_SPACES) != 0;
+ const bool allow_spaces_after_sign = (flags_ & ALLOW_SPACES_AFTER_SIGN) != 0;
+
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (current == end) return empty_string_value_;
+
+ if (allow_leading_spaces || allow_trailing_spaces) {
+ if (!AdvanceToNonspace(&current, end)) {
+ *processed_characters_count = current - input;
+ return empty_string_value_;
+ }
+ if (!allow_leading_spaces && (input != current)) {
+ // No leading spaces allowed, but AdvanceToNonspace moved forward.
+ return junk_string_value_;
+ }
+ }
+
+ // The longest form of simplified number is: "-<significant digits>.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ bool sign = false;
+
+ if (*current == '+' || *current == '-') {
+ sign = (*current == '-');
+ ++current;
+ const char* next_non_space = current;
+ // Skip following spaces (if allowed).
+ if (!AdvanceToNonspace(&next_non_space, end)) return junk_string_value_;
+ if (!allow_spaces_after_sign && (current != next_non_space)) {
+ return junk_string_value_;
+ }
+ current = next_non_space;
+ }
+
+ if (infinity_symbol_ != NULL) {
+ if (*current == infinity_symbol_[0]) {
+ if (!ConsumeSubString(&current, end, infinity_symbol_)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ ASSERT(buffer_pos == 0);
+ *processed_characters_count = current - input;
+ return sign ? -Double::Infinity() : Double::Infinity();
+ }
+ }
+
+ if (nan_symbol_ != NULL) {
+ if (*current == nan_symbol_[0]) {
+ if (!ConsumeSubString(&current, end, nan_symbol_)) {
+ return junk_string_value_;
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+
+ ASSERT(buffer_pos == 0);
+ *processed_characters_count = current - input;
+ return sign ? -Double::NaN() : Double::NaN();
+ }
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if ((flags_ & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+ ++current;
+ if (current == end || !isDigit(*current, 16)) {
+ return junk_string_value_; // "0x".
+ }
+
+ const char* tail_pointer = NULL;
+ double result = RadixStringToIeee<4>(current,
+ end,
+ sign,
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &tail_pointer);
+ if (tail_pointer != NULL) {
+ if (allow_trailing_spaces) AdvanceToNonspace(&tail_pointer, end);
+ *processed_characters_count = tail_pointer - input;
+ }
+ return result;
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+ }
+ }
+
+ bool octal = leading_zero && (flags_ & ALLOW_OCTALS) != 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+
+ ++current;
+ if (current == end) {
+ if (significant_digits == 0 && !leading_zero) {
+ return junk_string_value_;
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ ++current;
+ if (current == end) {
+ *processed_characters_count = current - input;
+ return SignedZero(sign);
+ }
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is a fractional part.
+ // We don't emit a '.', but adjust the exponent instead.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ exponent--;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return junk_string_value_;
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal && !allow_trailing_junk) return junk_string_value_;
+ if (octal) goto parsing_done;
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ char sign = '+';
+ if (*current == '+' || *current == '-') {
+ sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return junk_string_value_;
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10
+ && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (sign == '-' ? -num : num);
+ }
+
+ if (!(allow_trailing_spaces || allow_trailing_junk) && (current != end)) {
+ return junk_string_value_;
+ }
+ if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+ return junk_string_value_;
+ }
+ if (allow_trailing_spaces) {
+ AdvanceToNonspace(&current, end);
+ }
+
+ parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ double result;
+ const char* tail_pointer = NULL;
+ result = RadixStringToIeee<3>(buffer,
+ buffer + buffer_pos,
+ sign,
+ allow_trailing_junk,
+ junk_string_value_,
+ read_as_double,
+ &tail_pointer);
+ ASSERT(tail_pointer != NULL);
+ *processed_characters_count = current - input;
+ return result;
+ }
+
+ if (nonzero_digit_dropped) {
+ buffer[buffer_pos++] = '1';
+ exponent--;
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ double converted;
+ if (read_as_double) {
+ converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
+ } else {
+ converted = Strtof(Vector<const char>(buffer, buffer_pos), exponent);
+ }
+ *processed_characters_count = current - input;
+ return sign? -converted: converted;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/double-conversion.h b/src/3rdparty/double-conversion/double-conversion.h
new file mode 100644
index 0000000000..f98edae75a
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.h
@@ -0,0 +1,536 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+#define DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+class DoubleToStringConverter {
+ public:
+ // When calling ToFixed with a double > 10^kMaxFixedDigitsBeforePoint
+ // or a requested_digits parameter > kMaxFixedDigitsAfterPoint then the
+ // function returns false.
+ static const int kMaxFixedDigitsBeforePoint = 60;
+ static const int kMaxFixedDigitsAfterPoint = 60;
+
+ // When calling ToExponential with a requested_digits
+ // parameter > kMaxExponentialDigits then the function returns false.
+ static const int kMaxExponentialDigits = 120;
+
+ // When calling ToPrecision with a requested_digits
+ // parameter < kMinPrecisionDigits or requested_digits > kMaxPrecisionDigits
+ // then the function returns false.
+ static const int kMinPrecisionDigits = 1;
+ static const int kMaxPrecisionDigits = 120;
+
+ enum Flags {
+ NO_FLAGS = 0,
+ EMIT_POSITIVE_EXPONENT_SIGN = 1,
+ EMIT_TRAILING_DECIMAL_POINT = 2,
+ EMIT_TRAILING_ZERO_AFTER_POINT = 4,
+ UNIQUE_ZERO = 8
+ };
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - EMIT_POSITIVE_EXPONENT_SIGN: when the number is converted into exponent
+ // form, emits a '+' for positive exponents. Example: 1.2e+2.
+ // - EMIT_TRAILING_DECIMAL_POINT: when the input number is an integer and is
+ // converted into decimal format then a trailing decimal point is appended.
+ // Example: 2345.0 is converted to "2345.".
+ // - EMIT_TRAILING_ZERO_AFTER_POINT: in addition to a trailing decimal point
+ // emits a trailing '0'-character. This flag requires the
+ // EXMIT_TRAILING_DECIMAL_POINT flag.
+ // Example: 2345.0 is converted to "2345.0".
+ // - UNIQUE_ZERO: "-0.0" is converted to "0.0".
+ //
+ // Infinity symbol and nan_symbol provide the string representation for these
+ // special values. If the string is NULL and the special value is encountered
+ // then the conversion functions return false.
+ //
+ // The exponent_character is used in exponential representations. It is
+ // usually 'e' or 'E'.
+ //
+ // When converting to the shortest representation the converter will
+ // represent input numbers in decimal format if they are in the interval
+ // [10^decimal_in_shortest_low; 10^decimal_in_shortest_high[
+ // (lower boundary included, greater boundary excluded).
+ // Example: with decimal_in_shortest_low = -6 and
+ // decimal_in_shortest_high = 21:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // When converting to precision mode the converter may add
+ // max_leading_padding_zeroes before returning the number in exponential
+ // format.
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarily the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ DoubleToStringConverter(int flags,
+ const char* infinity_symbol,
+ const char* nan_symbol,
+ char exponent_character,
+ int decimal_in_shortest_low,
+ int decimal_in_shortest_high,
+ int max_leading_padding_zeroes_in_precision_mode,
+ int max_trailing_padding_zeroes_in_precision_mode)
+ : flags_(flags),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol),
+ exponent_character_(exponent_character),
+ decimal_in_shortest_low_(decimal_in_shortest_low),
+ decimal_in_shortest_high_(decimal_in_shortest_high),
+ max_leading_padding_zeroes_in_precision_mode_(
+ max_leading_padding_zeroes_in_precision_mode),
+ max_trailing_padding_zeroes_in_precision_mode_(
+ max_trailing_padding_zeroes_in_precision_mode) {
+ // When 'trailing zero after the point' is set, then 'trailing point'
+ // must be set too.
+ ASSERT(((flags & EMIT_TRAILING_DECIMAL_POINT) != 0) ||
+ !((flags & EMIT_TRAILING_ZERO_AFTER_POINT) != 0));
+ }
+
+ // Returns a converter following the EcmaScript specification.
+ static const DoubleToStringConverter& EcmaScriptConverter();
+
+ // Computes the shortest string of digits that correctly represent the input
+ // number. Depending on decimal_in_shortest_low and decimal_in_shortest_high
+ // (see constructor) it then either returns a decimal representation, or an
+ // exponential representation.
+ // Example with decimal_in_shortest_low = -6,
+ // decimal_in_shortest_high = 21,
+ // EMIT_POSITIVE_EXPONENT_SIGN activated, and
+ // EMIT_TRAILING_DECIMAL_POINT deactived:
+ // ToShortest(0.000001) -> "0.000001"
+ // ToShortest(0.0000001) -> "1e-7"
+ // ToShortest(111111111111111111111.0) -> "111111111111111110000"
+ // ToShortest(100000000000000000000.0) -> "100000000000000000000"
+ // ToShortest(1111111111111111111111.0) -> "1.1111111111111111e+21"
+ //
+ // Note: the conversion may round the output if the returned string
+ // is accurate enough to uniquely identify the input-number.
+ // For example the most precise representation of the double 9e59 equals
+ // "899999999999999918767229449717619953810131273674690656206848", but
+ // the converter will return the shorter (but still correct) "9e59".
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except when the input value is special and no infinity_symbol or
+ // nan_symbol has been given to the constructor.
+ bool ToShortest(double value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST);
+ }
+
+ // Same as ToShortest, but for single-precision floats.
+ bool ToShortestSingle(float value, StringBuilder* result_builder) const {
+ return ToShortestIeeeNumber(value, result_builder, SHORTEST_SINGLE);
+ }
+
+
+ // Computes a decimal representation with a fixed number of digits after the
+ // decimal point. The last emitted digit is rounded.
+ //
+ // Examples:
+ // ToFixed(3.12, 1) -> "3.1"
+ // ToFixed(3.1415, 3) -> "3.142"
+ // ToFixed(1234.56789, 4) -> "1234.5679"
+ // ToFixed(1.23, 5) -> "1.23000"
+ // ToFixed(0.1, 4) -> "0.1000"
+ // ToFixed(1e30, 2) -> "1000000000000000019884624838656.00"
+ // ToFixed(0.1, 30) -> "0.100000000000000005551115123126"
+ // ToFixed(0.1, 17) -> "0.10000000000000001"
+ //
+ // If requested_digits equals 0, then the tail of the result depends on
+ // the EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples, for requested_digits == 0,
+ // let EMIT_TRAILING_DECIMAL_POINT and EMIT_TRAILING_ZERO_AFTER_POINT be
+ // - false and false: then 123.45 -> 123
+ // 0.678 -> 1
+ // - true and false: then 123.45 -> 123.
+ // 0.678 -> 1.
+ // - true and true: then 123.45 -> 123.0
+ // 0.678 -> 1.0
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'value' > 10^kMaxFixedDigitsBeforePoint, or
+ // - 'requested_digits' > kMaxFixedDigitsAfterPoint.
+ // The last two conditions imply that the result will never contain more than
+ // 1 + kMaxFixedDigitsBeforePoint + 1 + kMaxFixedDigitsAfterPoint characters
+ // (one additional character for the sign, and one for the decimal point).
+ bool ToFixed(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes a representation in exponential format with requested_digits
+ // after the decimal point. The last emitted digit is rounded.
+ // If requested_digits equals -1, then the shortest exponential representation
+ // is computed.
+ //
+ // Examples with EMIT_POSITIVE_EXPONENT_SIGN deactivated, and
+ // exponent_character set to 'e'.
+ // ToExponential(3.12, 1) -> "3.1e0"
+ // ToExponential(5.0, 3) -> "5.000e0"
+ // ToExponential(0.001, 2) -> "1.00e-3"
+ // ToExponential(3.1415, -1) -> "3.1415e0"
+ // ToExponential(3.1415, 4) -> "3.1415e0"
+ // ToExponential(3.1415, 3) -> "3.142e0"
+ // ToExponential(123456789000000, 3) -> "1.235e14"
+ // ToExponential(1000000000000000019884624838656.0, -1) -> "1e30"
+ // ToExponential(1000000000000000019884624838656.0, 32) ->
+ // "1.00000000000000001988462483865600e30"
+ // ToExponential(1234, 0) -> "1e3"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - 'requested_digits' > kMaxExponentialDigits.
+ // The last condition implies that the result will never contain more than
+ // kMaxExponentialDigits + 8 characters (the sign, the digit before the
+ // decimal point, the decimal point, the exponent character, the
+ // exponent's sign, and at most 3 exponent digits).
+ bool ToExponential(double value,
+ int requested_digits,
+ StringBuilder* result_builder) const;
+
+ // Computes 'precision' leading digits of the given 'value' and returns them
+ // either in exponential or decimal format, depending on
+ // max_{leading|trailing}_padding_zeroes_in_precision_mode (given to the
+ // constructor).
+ // The last computed digit is rounded.
+ //
+ // Example with max_leading_padding_zeroes_in_precision_mode = 6.
+ // ToPrecision(0.0000012345, 2) -> "0.0000012"
+ // ToPrecision(0.00000012345, 2) -> "1.2e-7"
+ // Similarily the converter may add up to
+ // max_trailing_padding_zeroes_in_precision_mode in precision mode to avoid
+ // returning an exponential representation. A zero added by the
+ // EMIT_TRAILING_ZERO_AFTER_POINT flag is counted for this limit.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 1:
+ // ToPrecision(230.0, 2) -> "230"
+ // ToPrecision(230.0, 2) -> "230." with EMIT_TRAILING_DECIMAL_POINT.
+ // ToPrecision(230.0, 2) -> "2.3e2" with EMIT_TRAILING_ZERO_AFTER_POINT.
+ // Examples for max_trailing_padding_zeroes_in_precision_mode = 3, and no
+ // EMIT_TRAILING_ZERO_AFTER_POINT:
+ // ToPrecision(123450.0, 6) -> "123450"
+ // ToPrecision(123450.0, 5) -> "123450"
+ // ToPrecision(123450.0, 4) -> "123500"
+ // ToPrecision(123450.0, 3) -> "123000"
+ // ToPrecision(123450.0, 2) -> "1.2e5"
+ //
+ // Returns true if the conversion succeeds. The conversion always succeeds
+ // except for the following cases:
+ // - the input value is special and no infinity_symbol or nan_symbol has
+ // been provided to the constructor,
+ // - precision < kMinPericisionDigits
+ // - precision > kMaxPrecisionDigits
+ // The last condition implies that the result will never contain more than
+ // kMaxPrecisionDigits + 7 characters (the sign, the decimal point, the
+ // exponent character, the exponent's sign, and at most 3 exponent digits).
+ bool ToPrecision(double value,
+ int precision,
+ StringBuilder* result_builder) const;
+
+ enum DtoaMode {
+ // Produce the shortest correct representation.
+ // For example the output of 0.299999999999999988897 is (the less accurate
+ // but correct) 0.3.
+ SHORTEST,
+ // Same as SHORTEST, but for single-precision floats.
+ SHORTEST_SINGLE,
+ // Produce a fixed number of digits after the decimal point.
+ // For instance fixed(0.1, 4) becomes 0.1000
+ // If the input number is big, the output will be big.
+ FIXED,
+ // Fixed number of digits (independent of the decimal point).
+ PRECISION
+ };
+
+ // The maximal number of digits that are needed to emit a double in base 10.
+ // A higher precision can be achieved by using more digits, but the shortest
+ // accurate representation of any double will never use more digits than
+ // kBase10MaximalLength.
+ // Note that DoubleToAscii null-terminates its input. So the given buffer
+ // should be at least kBase10MaximalLength + 1 characters long.
+ static const int kBase10MaximalLength = 17;
+
+ // Converts the given double 'v' to ascii. 'v' must not be NaN, +Infinity, or
+ // -Infinity. In SHORTEST_SINGLE-mode this restriction also applies to 'v'
+ // after it has been casted to a single-precision float. That is, in this
+ // mode static_cast<float>(v) must not be NaN, +Infinity or -Infinity.
+ //
+ // The result should be interpreted as buffer * 10^(point-length).
+ //
+ // The output depends on the given mode:
+ // - SHORTEST: produce the least amount of digits for which the internal
+ // identity requirement is still satisfied. If the digits are printed
+ // (together with the correct exponent) then reading this number will give
+ // 'v' again. The buffer will choose the representation that is closest to
+ // 'v'. If there are two at the same distance, than the one farther away
+ // from 0 is chosen (halfway cases - ending with 5 - are rounded up).
+ // In this mode the 'requested_digits' parameter is ignored.
+ // - SHORTEST_SINGLE: same as SHORTEST but with single-precision.
+ // - FIXED: produces digits necessary to print a given number with
+ // 'requested_digits' digits after the decimal point. The produced digits
+ // might be too short in which case the caller has to fill the remainder
+ // with '0's.
+ // Example: toFixed(0.001, 5) is allowed to return buffer="1", point=-2.
+ // Halfway cases are rounded towards +/-Infinity (away from 0). The call
+ // toFixed(0.15, 2) thus returns buffer="2", point=0.
+ // The returned buffer may contain digits that would be truncated from the
+ // shortest representation of the input.
+ // - PRECISION: produces 'requested_digits' where the first digit is not '0'.
+ // Even though the length of produced digits usually equals
+ // 'requested_digits', the function is allowed to return fewer digits, in
+ // which case the caller has to fill the missing digits with '0's.
+ // Halfway cases are again rounded away from 0.
+ // DoubleToAscii expects the given buffer to be big enough to hold all
+ // digits and a terminating null-character. In SHORTEST-mode it expects a
+ // buffer of at least kBase10MaximalLength + 1. In all other modes the
+ // requested_digits parameter and the padding-zeroes limit the size of the
+ // output. Don't forget the decimal point, the exponent character and the
+ // terminating null-character when computing the maximal output size.
+ // The given length is only used in debug mode to ensure the buffer is big
+ // enough.
+ static void DoubleToAscii(double v,
+ DtoaMode mode,
+ int requested_digits,
+ char* buffer,
+ int buffer_length,
+ bool* sign,
+ int* length,
+ int* point);
+
+ private:
+ // Implementation for ToShortest and ToShortestSingle.
+ bool ToShortestIeeeNumber(double value,
+ StringBuilder* result_builder,
+ DtoaMode mode) const;
+
+ // If the value is a special value (NaN or Infinity) constructs the
+ // corresponding string using the configured infinity/nan-symbol.
+ // If either of them is NULL or the value is not special then the
+ // function returns false.
+ bool HandleSpecialValues(double value, StringBuilder* result_builder) const;
+ // Constructs an exponential representation (i.e. 1.234e56).
+ // The given exponent assumes a decimal point after the first decimal digit.
+ void CreateExponentialRepresentation(const char* decimal_digits,
+ int length,
+ int exponent,
+ StringBuilder* result_builder) const;
+ // Creates a decimal representation (i.e 1234.5678).
+ void CreateDecimalRepresentation(const char* decimal_digits,
+ int length,
+ int decimal_point,
+ int digits_after_point,
+ StringBuilder* result_builder) const;
+
+ const int flags_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+ const char exponent_character_;
+ const int decimal_in_shortest_low_;
+ const int decimal_in_shortest_high_;
+ const int max_leading_padding_zeroes_in_precision_mode_;
+ const int max_trailing_padding_zeroes_in_precision_mode_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DoubleToStringConverter);
+};
+
+
+class StringToDoubleConverter {
+ public:
+ // Enumeration for allowing octals and ignoring junk when converting
+ // strings to numbers.
+ enum Flags {
+ NO_FLAGS = 0,
+ ALLOW_HEX = 1,
+ ALLOW_OCTALS = 2,
+ ALLOW_TRAILING_JUNK = 4,
+ ALLOW_LEADING_SPACES = 8,
+ ALLOW_TRAILING_SPACES = 16,
+ ALLOW_SPACES_AFTER_SIGN = 32
+ };
+
+ // Flags should be a bit-or combination of the possible Flags-enum.
+ // - NO_FLAGS: no special flags.
+ // - ALLOW_HEX: recognizes the prefix "0x". Hex numbers may only be integers.
+ // Ex: StringToDouble("0x1234") -> 4660.0
+ // In StringToDouble("0x1234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // With this flag "0x" is a junk-string. Even with ALLOW_TRAILING_JUNK,
+ // the string will not be parsed as "0" followed by junk.
+ //
+ // - ALLOW_OCTALS: recognizes the prefix "0" for octals:
+ // If a sequence of octal digits starts with '0', then the number is
+ // read as octal integer. Octal numbers may only be integers.
+ // Ex: StringToDouble("01234") -> 668.0
+ // StringToDouble("012349") -> 12349.0 // Not a sequence of octal
+ // // digits.
+ // In StringToDouble("01234.56") the characters ".56" are trailing
+ // junk. The result of the call is hence dependent on
+ // the ALLOW_TRAILING_JUNK flag and/or the junk value.
+ // In StringToDouble("01234e56") the characters "e56" are trailing
+ // junk, too.
+ // - ALLOW_TRAILING_JUNK: ignore trailing characters that are not part of
+ // a double literal.
+ // - ALLOW_LEADING_SPACES: skip over leading spaces.
+ // - ALLOW_TRAILING_SPACES: ignore trailing spaces.
+ // - ALLOW_SPACES_AFTER_SIGN: ignore spaces after the sign.
+ // Ex: StringToDouble("- 123.2") -> -123.2.
+ // StringToDouble("+ 123.2") -> 123.2
+ //
+ // empty_string_value is returned when an empty string is given as input.
+ // If ALLOW_LEADING_SPACES or ALLOW_TRAILING_SPACES are set, then a string
+ // containing only spaces is converted to the 'empty_string_value', too.
+ //
+ // junk_string_value is returned when
+ // a) ALLOW_TRAILING_JUNK is not set, and a junk character (a character not
+ // part of a double-literal) is found.
+ // b) ALLOW_TRAILING_JUNK is set, but the string does not start with a
+ // double literal.
+ //
+ // infinity_symbol and nan_symbol are strings that are used to detect
+ // inputs that represent infinity and NaN. They can be null, in which case
+ // they are ignored.
+ // The conversion routine first reads any possible signs. Then it compares the
+ // following character of the input-string with the first character of
+ // the infinity, and nan-symbol. If either matches, the function assumes, that
+ // a match has been found, and expects the following input characters to match
+ // the remaining characters of the special-value symbol.
+ // This means that the following restrictions apply to special-value symbols:
+ // - they must not start with signs ('+', or '-'),
+ // - they must not have the same first character.
+ // - they must not start with digits.
+ //
+ // Examples:
+ // flags = ALLOW_HEX | ALLOW_TRAILING_JUNK,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = "infinity",
+ // nan_symbol = "nan":
+ // StringToDouble("0x1234") -> 4660.0.
+ // StringToDouble("0x1234K") -> 4660.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> NaN // junk_string_value.
+ // StringToDouble(" 1") -> NaN // junk_string_value.
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("-123.45") -> -123.45.
+ // StringToDouble("--123.45") -> NaN // junk_string_value.
+ // StringToDouble("123e45") -> 123e45.
+ // StringToDouble("123E45") -> 123e45.
+ // StringToDouble("123e+45") -> 123e45.
+ // StringToDouble("123E-45") -> 123e-45.
+ // StringToDouble("123e") -> 123.0 // trailing junk ignored.
+ // StringToDouble("123e-") -> 123.0 // trailing junk ignored.
+ // StringToDouble("+NaN") -> NaN // NaN string literal.
+ // StringToDouble("-infinity") -> -inf. // infinity literal.
+ // StringToDouble("Infinity") -> NaN // junk_string_value.
+ //
+ // flags = ALLOW_OCTAL | ALLOW_LEADING_SPACES,
+ // empty_string_value = 0.0,
+ // junk_string_value = NaN,
+ // infinity_symbol = NULL,
+ // nan_symbol = NULL:
+ // StringToDouble("0x1234") -> NaN // junk_string_value.
+ // StringToDouble("01234") -> 668.0.
+ // StringToDouble("") -> 0.0 // empty_string_value.
+ // StringToDouble(" ") -> 0.0 // empty_string_value.
+ // StringToDouble(" 1") -> 1.0
+ // StringToDouble("0x") -> NaN // junk_string_value.
+ // StringToDouble("0123e45") -> NaN // junk_string_value.
+ // StringToDouble("01239E45") -> 1239e45.
+ // StringToDouble("-infinity") -> NaN // junk_string_value.
+ // StringToDouble("NaN") -> NaN // junk_string_value.
+ StringToDoubleConverter(int flags,
+ double empty_string_value,
+ double junk_string_value,
+ const char* infinity_symbol,
+ const char* nan_symbol)
+ : flags_(flags),
+ empty_string_value_(empty_string_value),
+ junk_string_value_(junk_string_value),
+ infinity_symbol_(infinity_symbol),
+ nan_symbol_(nan_symbol) {
+ }
+
+ // Performs the conversion.
+ // The output parameter 'processed_characters_count' is set to the number
+ // of characters that have been processed to read the number.
+ // Spaces than are processed with ALLOW_{LEADING|TRAILING}_SPACES are included
+ // in the 'processed_characters_count'. Trailing junk is never included.
+ double StringToDouble(const char* buffer,
+ int length,
+ int* processed_characters_count) {
+ return StringToIeee(buffer, length, processed_characters_count, true);
+ }
+
+ // Same as StringToDouble but reads a float.
+ // Note that this is not equivalent to static_cast<float>(StringToDouble(...))
+ // due to potential double-rounding.
+ float StringToFloat(const char* buffer,
+ int length,
+ int* processed_characters_count) {
+ return static_cast<float>(StringToIeee(buffer, length,
+ processed_characters_count, false));
+ }
+
+ private:
+ const int flags_;
+ const double empty_string_value_;
+ const double junk_string_value_;
+ const char* const infinity_symbol_;
+ const char* const nan_symbol_;
+
+ double StringToIeee(const char* buffer,
+ int length,
+ int* processed_characters_count,
+ bool read_as_double);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_
diff --git a/src/3rdparty/double-conversion/double-conversion.pri b/src/3rdparty/double-conversion/double-conversion.pri
new file mode 100644
index 0000000000..4ad5f9f7a7
--- /dev/null
+++ b/src/3rdparty/double-conversion/double-conversion.pri
@@ -0,0 +1,4 @@
+INCLUDEPATH += $$PWD
+VPATH += $$PWD
+SOURCES += $$PWD/*.cc
+HEADERS += $$PWD/*.h
diff --git a/src/3rdparty/double-conversion/fast-dtoa.cc b/src/3rdparty/double-conversion/fast-dtoa.cc
new file mode 100644
index 0000000000..1a0f823509
--- /dev/null
+++ b/src/3rdparty/double-conversion/fast-dtoa.cc
@@ -0,0 +1,664 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "fast-dtoa.h"
+
+#include "cached-powers.h"
+#include "diy-fp.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// The minimal and maximal target exponent define the range of w's binary
+// exponent, where 'w' is the result of multiplying the input by a cached power
+// of ten.
+//
+// A different range might be chosen on a different platform, to optimize digit
+// generation, but a smaller range requires more powers of ten to be cached.
+static const int kMinimalTargetExponent = -60;
+static const int kMaximalTargetExponent = -32;
+
+
+// Adjusts the last digit of the generated number, and screens out generated
+// solutions that may be inaccurate. A solution may be inaccurate if it is
+// outside the safe interval, or if we cannot prove that it is closer to the
+// input than a neighboring representation of the same length.
+//
+// Input: * buffer containing the digits of too_high / 10^kappa
+// * the buffer's length
+// * distance_too_high_w == (too_high - w).f() * unit
+// * unsafe_interval == (too_high - too_low).f() * unit
+// * rest = (too_high - buffer * 10^kappa).f() * unit
+// * ten_kappa = 10^kappa * unit
+// * unit = the common multiplier
+// Output: returns true if the buffer is guaranteed to contain the closest
+// representable number to the input.
+// Modifies the generated digits in the buffer to approach (round towards) w.
+static bool RoundWeed(Vector<char> buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
+ uint64_t small_distance = distance_too_high_w - unit;
+ uint64_t big_distance = distance_too_high_w + unit;
+ // Let w_low = too_high - big_distance, and
+ // w_high = too_high - small_distance.
+ // Note: w_low < w < w_high
+ //
+ // The real w (* unit) must lie somewhere inside the interval
+ // ]w_low; w_high[ (often written as "(w_low; w_high)")
+
+ // Basically the buffer currently contains a number in the unsafe interval
+ // ]too_low; too_high[ with too_low < w < too_high
+ //
+ // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ // ^v 1 unit ^ ^ ^ ^
+ // boundary_high --------------------- . . . .
+ // ^v 1 unit . . . .
+ // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
+ // . . ^ . .
+ // . big_distance . . .
+ // . . . . rest
+ // small_distance . . . .
+ // v . . . .
+ // w_high - - - - - - - - - - - - - - - - - - . . . .
+ // ^v 1 unit . . . .
+ // w ---------------------------------------- . . . .
+ // ^v 1 unit v . . .
+ // w_low - - - - - - - - - - - - - - - - - - - - - . . .
+ // . . v
+ // buffer --------------------------------------------------+-------+--------
+ // . .
+ // safe_interval .
+ // v .
+ // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
+ // ^v 1 unit .
+ // boundary_low ------------------------- unsafe_interval
+ // ^v 1 unit v
+ // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ //
+ //
+ // Note that the value of buffer could lie anywhere inside the range too_low
+ // to too_high.
+ //
+ // boundary_low, boundary_high and w are approximations of the real boundaries
+ // and v (the input number). They are guaranteed to be precise up to one unit.
+ // In fact the error is guaranteed to be strictly less than one unit.
+ //
+ // Anything that lies outside the unsafe interval is guaranteed not to round
+ // to v when read again.
+ // Anything that lies inside the safe interval is guaranteed to round to v
+ // when read again.
+ // If the number inside the buffer lies inside the unsafe interval but not
+ // inside the safe interval then we simply do not know and bail out (returning
+ // false).
+ //
+ // Similarly we have to take into account the imprecision of 'w' when finding
+ // the closest representation of 'w'. If we have two potential
+ // representations, and one is closer to both w_low and w_high, then we know
+ // it is closer to the actual value v.
+ //
+ // By generating the digits of too_high we got the largest (closest to
+ // too_high) buffer that is still in the unsafe interval. In the case where
+ // w_high < buffer < too_high we try to decrement the buffer.
+ // This way the buffer approaches (rounds towards) w.
+ // There are 3 conditions that stop the decrementation process:
+ // 1) the buffer is already below w_high
+ // 2) decrementing the buffer would make it leave the unsafe interval
+ // 3) decrementing the buffer would yield a number below w_high and farther
+ // away than the current number. In other words:
+ // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
+ // Instead of using the buffer directly we use its distance to too_high.
+ // Conceptually rest ~= too_high - buffer
+ // We need to do the following tests in this order to avoid over- and
+ // underflows.
+ ASSERT(rest <= unsafe_interval);
+ while (rest < small_distance && // Negated condition 1
+ unsafe_interval - rest >= ten_kappa && // Negated condition 2
+ (rest + ten_kappa < small_distance || // buffer{-1} > w_high
+ small_distance - rest >= rest + ten_kappa - small_distance)) {
+ buffer[length - 1]--;
+ rest += ten_kappa;
+ }
+
+ // We have approached w+ as much as possible. We now test if approaching w-
+ // would require changing the buffer. If yes, then we have two possible
+ // representations close to w, but we cannot decide which one is closer.
+ if (rest < big_distance &&
+ unsafe_interval - rest >= ten_kappa &&
+ (rest + ten_kappa < big_distance ||
+ big_distance - rest > rest + ten_kappa - big_distance)) {
+ return false;
+ }
+
+ // Weeding test.
+ // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
+ // Since too_low = too_high - unsafe_interval this is equivalent to
+ // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
+ // Conceptually we have: rest ~= too_high - buffer
+ return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
+}
+
+
+// Rounds the buffer upwards if the result is closer to v by possibly adding
+// 1 to the buffer. If the precision of the calculation is not sufficient to
+// round correctly, return false.
+// The rounding might shift the whole buffer in which case the kappa is
+// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
+//
+// If 2*rest > ten_kappa then the buffer needs to be round up.
+// rest can have an error of +/- 1 unit. This function accounts for the
+// imprecision and returns false, if the rounding direction cannot be
+// unambiguously determined.
+//
+// Precondition: rest < ten_kappa.
+static bool RoundWeedCounted(Vector<char> buffer,
+ int length,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit,
+ int* kappa) {
+ ASSERT(rest < ten_kappa);
+ // The following tests are done in a specific order to avoid overflows. They
+ // will work correctly with any uint64 values of rest < ten_kappa and unit.
+ //
+ // If the unit is too big, then we don't know which way to round. For example
+ // a unit of 50 means that the real number lies within rest +/- 50. If
+ // 10^kappa == 40 then there is no way to tell which way to round.
+ if (unit >= ten_kappa) return false;
+ // Even if unit is just half the size of 10^kappa we are already completely
+ // lost. (And after the previous test we know that the expression will not
+ // over/underflow.)
+ if (ten_kappa - unit <= unit) return false;
+ // If 2 * (rest + unit) <= 10^kappa we can safely round down.
+ if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
+ return true;
+ }
+ // If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
+ if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
+ // Increment the last digit recursively until we find a non '9' digit.
+ buffer[length - 1]++;
+ for (int i = length - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) break;
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
+ // exception of the first digit all digits are now '0'. Simply switch the
+ // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
+ // the power (the kappa) is increased.
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*kappa) += 1;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Returns the biggest power of ten that is less than or equal to the given
+// number. We furthermore receive the maximum number of bits 'number' has.
+//
+// Returns power == 10^(exponent_plus_one-1) such that
+// power <= number < power * 10.
+// If number_bits == 0 then 0^(0-1) is returned.
+// The number of bits must be <= 32.
+// Precondition: number < (1 << (number_bits + 1)).
+
+// Inspired by the method for finding an integer log base 10 from here:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+static unsigned int const kSmallPowersOfTen[] =
+ {0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000,
+ 1000000000};
+
+static void BiggestPowerTen(uint32_t number,
+ int number_bits,
+ uint32_t* power,
+ int* exponent_plus_one) {
+ ASSERT(number < (1u << (number_bits + 1)));
+ // 1233/4096 is approximately 1/lg(10).
+ int exponent_plus_one_guess = ((number_bits + 1) * 1233 >> 12);
+ // We increment to skip over the first entry in the kPowersOf10 table.
+ // Note: kPowersOf10[i] == 10^(i-1).
+ exponent_plus_one_guess++;
+ // We don't have any guarantees that 2^number_bits <= number.
+ // TODO(floitsch): can we change the 'while' into an 'if'? We definitely see
+ // number < (2^number_bits - 1), but I haven't encountered
+ // number < (2^number_bits - 2) yet.
+ while (number < kSmallPowersOfTen[exponent_plus_one_guess]) {
+ exponent_plus_one_guess--;
+ }
+ *power = kSmallPowersOfTen[exponent_plus_one_guess];
+ *exponent_plus_one = exponent_plus_one_guess;
+}
+
+// Generates the digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * low, w and high are correct up to 1 ulp (unit in the last place). That
+// is, their error must be less than a unit of their last digits.
+// * low.e() == w.e() == high.e()
+// * low < w < high, and taking into account their error: low~ <= high~
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but len contains the number of digits.
+// * buffer contains the shortest possible decimal digit-sequence
+// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
+// correct values of low and high (without their error).
+// * if more than one decimal representation gives the minimal number of
+// decimal digits then the one closest to W (where W is the correct value
+// of w) is chosen.
+// Remark: this procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely (~0.5%).
+//
+// Say, for the sake of example, that
+// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w's value can be computed by w.f() * 2^w.e()
+// We can obtain w's integral digits by simply shifting w.f() by -w.e().
+// -> w's integral part is 0x1234
+// w's fractional part is therefore 0x567890abcdef.
+// Printing w's integral part is easy (simply print 0x1234 in decimal).
+// In order to print its fraction we repeatedly multiply the fraction by 10 and
+// get each digit. Example the first digit after the point would be computed by
+// (0x567890abcdef * 10) >> 48. -> 3
+// The whole thing becomes slightly more complicated because we want to stop
+// once we have enough digits. That is, once the digits inside the buffer
+// represent 'w' we can stop. Everything inside the interval low - high
+// represents w. However we have to pay attention to low, high and w's
+// imprecision.
+static bool DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ ASSERT(low.e() == w.e() && w.e() == high.e());
+ ASSERT(low.f() + 1 <= high.f() - 1);
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ // low, w and high are imprecise, but by less than one ulp (unit in the last
+ // place).
+ // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
+ // the new numbers are outside of the interval we want the final
+ // representation to lie in.
+ // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
+ // numbers that are certain to lie in the interval. We will use this fact
+ // later on.
+ // We will now start by generating the digits within the uncertain
+ // interval. Later we will weed out representations that lie outside the safe
+ // interval and thus _might_ lie outside the correct interval.
+ uint64_t unit = 1;
+ DiyFp too_low = DiyFp(low.f() - unit, low.e());
+ DiyFp too_high = DiyFp(high.f() + unit, high.e());
+ // too_low and too_high are guaranteed to lie outside the interval we want the
+ // generated number in.
+ DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
+ // We now cut the input number into two parts: the integral digits and the
+ // fractionals. We will not write any decimal separator though, but adapt
+ // kappa instead.
+ // Reminder: we are currently computing the digits (stored inside the buffer)
+ // such that: too_low < buffer * 10^kappa < too_high
+ // We use too_high for the digit_generation and stop as soon as possible.
+ // If we stop early we effectively round down.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = too_high.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+ // Loop invariant: buffer = too_high / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than integrals.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
+ // Reminder: unsafe_interval.e() == one.e()
+ if (rest < unsafe_interval.f()) {
+ // Rounding down (by not emitting the remaining digits) yields a number
+ // that lies within the unsafe interval.
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+ unsafe_interval.f(), rest,
+ static_cast<uint64_t>(divisor) << -one.e(), unit);
+ }
+ divisor /= 10;
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (like the interval or 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (true) {
+ fractionals *= 10;
+ unit *= 10;
+ unsafe_interval.set_f(unsafe_interval.f() * 10);
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ if (fractionals < unsafe_interval.f()) {
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+ unsafe_interval.f(), fractionals, one.f(), unit);
+ }
+ }
+}
+
+
+
+// Generates (at most) requested_digits digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by kMinimalTargetExponent and
+// kMaximalTargetExponent.
+// Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * w is correct up to 1 ulp (unit in the last place). That
+// is, its error must be strictly less than a unit of its last digit.
+// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
+//
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but length contains the number of
+// digits.
+// * the representation in buffer is the most precise representation of
+// requested_digits digits.
+// * buffer contains at most requested_digits digits of w. If there are less
+// than requested_digits digits then some trailing '0's have been removed.
+// * kappa is such that
+// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
+//
+// Remark: This procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely, but the failure-rate
+// increases with higher requested_digits.
+static bool DigitGenCounted(DiyFp w,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* kappa) {
+ ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+ ASSERT(kMinimalTargetExponent >= -60);
+ ASSERT(kMaximalTargetExponent <= -32);
+ // w is assumed to have an error less than 1 unit. Whenever w is scaled we
+ // also scale its error.
+ uint64_t w_error = 1;
+ // We cut the input number into two parts: the integral digits and the
+ // fractional digits. We don't emit any decimal separator, but adapt kappa
+ // instead. Example: instead of writing "1.2" we put "12" into the buffer and
+ // increase kappa by 1.
+ DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = w.f() & (one.f() - 1);
+ uint32_t divisor;
+ int divisor_exponent_plus_one;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ &divisor, &divisor_exponent_plus_one);
+ *kappa = divisor_exponent_plus_one;
+ *length = 0;
+
+ // Loop invariant: buffer = w / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divisor exponent + 1. And the divisor is the biggest power of ten
+ // that is smaller than 'integrals'.
+ while (*kappa > 0) {
+ int digit = integrals / divisor;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ integrals %= divisor;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divisor and that the
+ // invariant thus holds again.
+ if (requested_digits == 0) break;
+ divisor /= 10;
+ }
+
+ if (requested_digits == 0) {
+ uint64_t rest =
+ (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+ return RoundWeedCounted(buffer, *length, rest,
+ static_cast<uint64_t>(divisor) << -one.e(), w_error,
+ kappa);
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (the 'unit'), too.
+ // Note that the multiplication by 10 does not overflow, because w.e >= -60
+ // and thus one.e >= -60.
+ ASSERT(one.e() >= -60);
+ ASSERT(fractionals < one.f());
+ ASSERT(UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+ while (requested_digits > 0 && fractionals > w_error) {
+ fractionals *= 10;
+ w_error *= 10;
+ // Integer division by one.
+ int digit = static_cast<int>(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ requested_digits--;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ }
+ if (requested_digits != 0) return false;
+ return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
+ kappa);
+}
+
+
+// Provides a decimal representation of v.
+// Returns true if it succeeds, otherwise the result cannot be trusted.
+// There will be *length digits inside the buffer (not null-terminated).
+// If the function returns true then
+// v == (double) (buffer * 10^decimal_exponent).
+// The digits in the buffer are the shortest representation possible: no
+// 0.09999999999999999 instead of 0.1. The shorter representation will even be
+// chosen even if the longer one would be closer to v.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the closest will be
+// computed.
+static bool Grisu3(double v,
+ FastDtoaMode mode,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ // boundary_minus and boundary_plus are the boundaries between v and its
+ // closest floating-point neighbors. Any number strictly between
+ // boundary_minus and boundary_plus will round to v when convert to a double.
+ // Grisu3 will never output representations that lie exactly on a boundary.
+ DiyFp boundary_minus, boundary_plus;
+ if (mode == FAST_DTOA_SHORTEST) {
+ Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ } else {
+ ASSERT(mode == FAST_DTOA_SHORTEST_SINGLE);
+ float single_v = static_cast<float>(v);
+ Single(single_v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ }
+ ASSERT(boundary_plus.e() == w.e());
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+ ASSERT(scaled_w.e() ==
+ boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
+ // In theory it would be possible to avoid some recomputations by computing
+ // the difference between w and boundary_minus/plus (a power of 2) and to
+ // compute scaled_boundary_minus/plus by subtracting/adding from
+ // scaled_w. However the code becomes much less readable and the speed
+ // enhancements are not terriffic.
+ DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
+ DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
+
+ // DigitGen will generate the digits of scaled_w. Therefore we have
+ // v == (double) (scaled_w * 10^-mk).
+ // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
+ // integer than it will be updated. For instance if scaled_w == 1.23 then
+ // the buffer will be filled with "123" und the decimal_exponent will be
+ // decreased by 2.
+ int kappa;
+ bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+// The "counted" version of grisu3 (see above) only generates requested_digits
+// number of digits. This version does not generate the shortest representation,
+// and with enough requested digits 0.1 will at some point print as 0.9999999...
+// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
+// therefore the rounding strategy for halfway cases is irrelevant.
+static bool Grisu3Counted(double v,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ int ten_mk_minimal_binary_exponent =
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ int ten_mk_maximal_binary_exponent =
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
+ ten_mk_minimal_binary_exponent,
+ ten_mk_maximal_binary_exponent,
+ &ten_mk, &mk);
+ ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() +
+ DiyFp::kSignificandSize));
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+
+ // We now have (double) (scaled_w * 10^-mk).
+ // DigitGen will generate the first requested_digits digits of scaled_w and
+ // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
+ // will not always be exactly the same since DigitGenCounted only produces a
+ // limited number of digits.)
+ int kappa;
+ bool result = DigitGenCounted(scaled_w, requested_digits,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+
+bool FastDtoa(double v,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ ASSERT(v > 0);
+ ASSERT(!Double(v).IsSpecial());
+
+ bool result = false;
+ int decimal_exponent = 0;
+ switch (mode) {
+ case FAST_DTOA_SHORTEST:
+ case FAST_DTOA_SHORTEST_SINGLE:
+ result = Grisu3(v, mode, buffer, length, &decimal_exponent);
+ break;
+ case FAST_DTOA_PRECISION:
+ result = Grisu3Counted(v, requested_digits,
+ buffer, length, &decimal_exponent);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (result) {
+ *decimal_point = *length + decimal_exponent;
+ buffer[*length] = '\0';
+ }
+ return result;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/fast-dtoa.h b/src/3rdparty/double-conversion/fast-dtoa.h
new file mode 100644
index 0000000000..5f1e8eee5e
--- /dev/null
+++ b/src/3rdparty/double-conversion/fast-dtoa.h
@@ -0,0 +1,88 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FAST_DTOA_H_
+#define DOUBLE_CONVERSION_FAST_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+enum FastDtoaMode {
+ // Computes the shortest representation of the given input. The returned
+ // result will be the most accurate number of this length. Longer
+ // representations might be more accurate.
+ FAST_DTOA_SHORTEST,
+ // Same as FAST_DTOA_SHORTEST but for single-precision floats.
+ FAST_DTOA_SHORTEST_SINGLE,
+ // Computes a representation where the precision (number of digits) is
+ // given as input. The precision is independent of the decimal point.
+ FAST_DTOA_PRECISION
+};
+
+// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
+// include the terminating '\0' character.
+static const int kFastDtoaMaximalLength = 17;
+// Same for single-precision numbers.
+static const int kFastDtoaMaximalSingleLength = 9;
+
+// Provides a decimal representation of v.
+// The result should be interpreted as buffer * 10^(point - length).
+//
+// Precondition:
+// * v must be a strictly positive finite double.
+//
+// Returns true if it succeeds, otherwise the result can not be trusted.
+// There will be *length digits inside the buffer followed by a null terminator.
+// If the function returns true and mode equals
+// - FAST_DTOA_SHORTEST, then
+// the parameter requested_digits is ignored.
+// The result satisfies
+// v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible. E.g.
+// if 0.099999999999 and 0.1 represent the same double then "1" is returned
+// with point = 0.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain
+// the one closest to v.
+// - FAST_DTOA_PRECISION, then
+// the buffer contains requested_digits digits.
+// the difference v - (buffer * 10^(point-length)) is closest to zero for
+// all possible representations of requested_digits digits.
+// If there are two values that are equally close, then FastDtoa returns
+// false.
+// For both modes the buffer must be large enough to hold the result.
+bool FastDtoa(double d,
+ FastDtoaMode mode,
+ int requested_digits,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FAST_DTOA_H_
diff --git a/src/3rdparty/double-conversion/fixed-dtoa.cc b/src/3rdparty/double-conversion/fixed-dtoa.cc
new file mode 100644
index 0000000000..d56b1449b2
--- /dev/null
+++ b/src/3rdparty/double-conversion/fixed-dtoa.cc
@@ -0,0 +1,402 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <math.h>
+
+#include "fixed-dtoa.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// Represents a 128bit type. This class should be replaced by a native type on
+// platforms that support 128bit integers.
+class UInt128 {
+ public:
+ UInt128() : high_bits_(0), low_bits_(0) { }
+ UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
+
+ void Multiply(uint32_t multiplicand) {
+ uint64_t accumulator;
+
+ accumulator = (low_bits_ & kMask32) * multiplicand;
+ uint32_t part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (low_bits_ >> 32) * multiplicand;
+ low_bits_ = (accumulator << 32) + part;
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ & kMask32) * multiplicand;
+ part = static_cast<uint32_t>(accumulator & kMask32);
+ accumulator >>= 32;
+ accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
+ high_bits_ = (accumulator << 32) + part;
+ ASSERT((accumulator >> 32) == 0);
+ }
+
+ void Shift(int shift_amount) {
+ ASSERT(-64 <= shift_amount && shift_amount <= 64);
+ if (shift_amount == 0) {
+ return;
+ } else if (shift_amount == -64) {
+ high_bits_ = low_bits_;
+ low_bits_ = 0;
+ } else if (shift_amount == 64) {
+ low_bits_ = high_bits_;
+ high_bits_ = 0;
+ } else if (shift_amount <= 0) {
+ high_bits_ <<= -shift_amount;
+ high_bits_ += low_bits_ >> (64 + shift_amount);
+ low_bits_ <<= -shift_amount;
+ } else {
+ low_bits_ >>= shift_amount;
+ low_bits_ += high_bits_ << (64 - shift_amount);
+ high_bits_ >>= shift_amount;
+ }
+ }
+
+ // Modifies *this to *this MOD (2^power).
+ // Returns *this DIV (2^power).
+ int DivModPowerOf2(int power) {
+ if (power >= 64) {
+ int result = static_cast<int>(high_bits_ >> (power - 64));
+ high_bits_ -= static_cast<uint64_t>(result) << (power - 64);
+ return result;
+ } else {
+ uint64_t part_low = low_bits_ >> power;
+ uint64_t part_high = high_bits_ << (64 - power);
+ int result = static_cast<int>(part_low + part_high);
+ high_bits_ = 0;
+ low_bits_ -= part_low << power;
+ return result;
+ }
+ }
+
+ bool IsZero() const {
+ return high_bits_ == 0 && low_bits_ == 0;
+ }
+
+ int BitAt(int position) {
+ if (position >= 64) {
+ return static_cast<int>(high_bits_ >> (position - 64)) & 1;
+ } else {
+ return static_cast<int>(low_bits_ >> position) & 1;
+ }
+ }
+
+ private:
+ static const uint64_t kMask32 = 0xFFFFFFFF;
+ // Value == (high_bits_ << 64) + low_bits_
+ uint64_t high_bits_;
+ uint64_t low_bits_;
+};
+
+
+static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
+
+
+static void FillDigits32FixedLength(uint32_t number, int requested_length,
+ Vector<char> buffer, int* length) {
+ for (int i = requested_length - 1; i >= 0; --i) {
+ buffer[(*length) + i] = '0' + number % 10;
+ number /= 10;
+ }
+ *length += requested_length;
+}
+
+
+static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
+ int number_length = 0;
+ // We fill the digits in reverse order and exchange them afterwards.
+ while (number != 0) {
+ int digit = number % 10;
+ number /= 10;
+ buffer[(*length) + number_length] = '0' + digit;
+ number_length++;
+ }
+ // Exchange the digits.
+ int i = *length;
+ int j = *length + number_length - 1;
+ while (i < j) {
+ char tmp = buffer[i];
+ buffer[i] = buffer[j];
+ buffer[j] = tmp;
+ i++;
+ j--;
+ }
+ *length += number_length;
+}
+
+
+static void FillDigits64FixedLength(uint64_t number, int requested_length,
+ Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ FillDigits32FixedLength(part0, 3, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+}
+
+
+static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
+ const uint32_t kTen7 = 10000000;
+ // For efficiency cut the number into 3 uint32_t parts, and print those.
+ uint32_t part2 = static_cast<uint32_t>(number % kTen7);
+ number /= kTen7;
+ uint32_t part1 = static_cast<uint32_t>(number % kTen7);
+ uint32_t part0 = static_cast<uint32_t>(number / kTen7);
+
+ if (part0 != 0) {
+ FillDigits32(part0, buffer, length);
+ FillDigits32FixedLength(part1, 7, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else if (part1 != 0) {
+ FillDigits32(part1, buffer, length);
+ FillDigits32FixedLength(part2, 7, buffer, length);
+ } else {
+ FillDigits32(part2, buffer, length);
+ }
+}
+
+
+static void RoundUp(Vector<char> buffer, int* length, int* decimal_point) {
+ // An empty buffer represents 0.
+ if (*length == 0) {
+ buffer[0] = '1';
+ *decimal_point = 1;
+ *length = 1;
+ return;
+ }
+ // Round the last digit until we either have a digit that was not '9' or until
+ // we reached the first digit.
+ buffer[(*length) - 1]++;
+ for (int i = (*length) - 1; i > 0; --i) {
+ if (buffer[i] != '0' + 10) {
+ return;
+ }
+ buffer[i] = '0';
+ buffer[i - 1]++;
+ }
+ // If the first digit is now '0' + 10, we would need to set it to '0' and add
+ // a '1' in front. However we reach the first digit only if all following
+ // digits had been '9' before rounding up. Now all trailing digits are '0' and
+ // we simply switch the first digit to '1' and update the decimal-point
+ // (indicating that the point is now one digit to the right).
+ if (buffer[0] == '0' + 10) {
+ buffer[0] = '1';
+ (*decimal_point)++;
+ }
+}
+
+
+// The given fractionals number represents a fixed-point number with binary
+// point at bit (-exponent).
+// Preconditions:
+// -128 <= exponent <= 0.
+// 0 <= fractionals * 2^exponent < 1
+// The buffer holds the result.
+// The function will round its result. During the rounding-process digits not
+// generated by this function might be updated, and the decimal-point variable
+// might be updated. If this function generates the digits 99 and the buffer
+// already contained "199" (thus yielding a buffer of "19999") then a
+// rounding-up will change the contents of the buffer to "20000".
+static void FillFractionals(uint64_t fractionals, int exponent,
+ int fractional_count, Vector<char> buffer,
+ int* length, int* decimal_point) {
+ ASSERT(-128 <= exponent && exponent <= 0);
+ // 'fractionals' is a fixed-point number, with binary point at bit
+ // (-exponent). Inside the function the non-converted remainder of fractionals
+ // is a fixed-point number, with binary point at bit 'point'.
+ if (-exponent <= 64) {
+ // One 64 bit number is sufficient.
+ ASSERT(fractionals >> 56 == 0);
+ int point = -exponent;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals == 0) break;
+ // Instead of multiplying by 10 we multiply by 5 and adjust the point
+ // location. This way the fractionals variable will not overflow.
+ // Invariant at the beginning of the loop: fractionals < 2^point.
+ // Initially we have: point <= 64 and fractionals < 2^56
+ // After each iteration the point is decremented by one.
+ // Note that 5^3 = 125 < 128 = 2^7.
+ // Therefore three iterations of this loop will not overflow fractionals
+ // (even without the subtraction at the end of the loop body). At this
+ // time point will satisfy point <= 61 and therefore fractionals < 2^point
+ // and any further multiplication of fractionals by 5 will not overflow.
+ fractionals *= 5;
+ point--;
+ int digit = static_cast<int>(fractionals >> point);
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ fractionals -= static_cast<uint64_t>(digit) << point;
+ }
+ // If the first bit after the point is set we have to round up.
+ if (((fractionals >> (point - 1)) & 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ } else { // We need 128 bits.
+ ASSERT(64 < -exponent && -exponent <= 128);
+ UInt128 fractionals128 = UInt128(fractionals, 0);
+ fractionals128.Shift(-exponent - 64);
+ int point = 128;
+ for (int i = 0; i < fractional_count; ++i) {
+ if (fractionals128.IsZero()) break;
+ // As before: instead of multiplying by 10 we multiply by 5 and adjust the
+ // point location.
+ // This multiplication will not overflow for the same reasons as before.
+ fractionals128.Multiply(5);
+ point--;
+ int digit = fractionals128.DivModPowerOf2(point);
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ }
+ if (fractionals128.BitAt(point - 1) == 1) {
+ RoundUp(buffer, length, decimal_point);
+ }
+ }
+}
+
+
+// Removes leading and trailing zeros.
+// If leading zeros are removed then the decimal point position is adjusted.
+static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
+ while (*length > 0 && buffer[(*length) - 1] == '0') {
+ (*length)--;
+ }
+ int first_non_zero = 0;
+ while (first_non_zero < *length && buffer[first_non_zero] == '0') {
+ first_non_zero++;
+ }
+ if (first_non_zero != 0) {
+ for (int i = first_non_zero; i < *length; ++i) {
+ buffer[i - first_non_zero] = buffer[i];
+ }
+ *length -= first_non_zero;
+ *decimal_point -= first_non_zero;
+ }
+}
+
+
+bool FastFixedDtoa(double v,
+ int fractional_count,
+ Vector<char> buffer,
+ int* length,
+ int* decimal_point) {
+ const uint32_t kMaxUInt32 = 0xFFFFFFFF;
+ uint64_t significand = Double(v).Significand();
+ int exponent = Double(v).Exponent();
+ // v = significand * 2^exponent (with significand a 53bit integer).
+ // If the exponent is larger than 20 (i.e. we may have a 73bit number) then we
+ // don't know how to compute the representation. 2^73 ~= 9.5*10^21.
+ // If necessary this limit could probably be increased, but we don't need
+ // more.
+ if (exponent > 20) return false;
+ if (fractional_count > 20) return false;
+ *length = 0;
+ // At most kDoubleSignificandSize bits of the significand are non-zero.
+ // Given a 64 bit integer we have 11 0s followed by 53 potentially non-zero
+ // bits: 0..11*..0xxx..53*..xx
+ if (exponent + kDoubleSignificandSize > 64) {
+ // The exponent must be > 11.
+ //
+ // We know that v = significand * 2^exponent.
+ // And the exponent > 11.
+ // We simplify the task by dividing v by 10^17.
+ // The quotient delivers the first digits, and the remainder fits into a 64
+ // bit number.
+ // Dividing by 10^17 is equivalent to dividing by 5^17*2^17.
+ const uint64_t kFive17 = UINT64_2PART_C(0xB1, A2BC2EC5); // 5^17
+ uint64_t divisor = kFive17;
+ int divisor_power = 17;
+ uint64_t dividend = significand;
+ uint32_t quotient;
+ uint64_t remainder;
+ // Let v = f * 2^e with f == significand and e == exponent.
+ // Then need q (quotient) and r (remainder) as follows:
+ // v = q * 10^17 + r
+ // f * 2^e = q * 10^17 + r
+ // f * 2^e = q * 5^17 * 2^17 + r
+ // If e > 17 then
+ // f * 2^(e-17) = q * 5^17 + r/2^17
+ // else
+ // f = q * 5^17 * 2^(17-e) + r/2^e
+ if (exponent > divisor_power) {
+ // We only allow exponents of up to 20 and therefore (17 - e) <= 3
+ dividend <<= exponent - divisor_power;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << divisor_power;
+ } else {
+ divisor <<= divisor_power - exponent;
+ quotient = static_cast<uint32_t>(dividend / divisor);
+ remainder = (dividend % divisor) << exponent;
+ }
+ FillDigits32(quotient, buffer, length);
+ FillDigits64FixedLength(remainder, divisor_power, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent >= 0) {
+ // 0 <= exponent <= 11
+ significand <<= exponent;
+ FillDigits64(significand, buffer, length);
+ *decimal_point = *length;
+ } else if (exponent > -kDoubleSignificandSize) {
+ // We have to cut the number.
+ uint64_t integrals = significand >> -exponent;
+ uint64_t fractionals = significand - (integrals << -exponent);
+ if (integrals > kMaxUInt32) {
+ FillDigits64(integrals, buffer, length);
+ } else {
+ FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
+ }
+ *decimal_point = *length;
+ FillFractionals(fractionals, exponent, fractional_count,
+ buffer, length, decimal_point);
+ } else if (exponent < -128) {
+ // This configuration (with at most 20 digits) means that all digits must be
+ // 0.
+ ASSERT(fractional_count <= 20);
+ buffer[0] = '\0';
+ *length = 0;
+ *decimal_point = -fractional_count;
+ } else {
+ *decimal_point = 0;
+ FillFractionals(significand, exponent, fractional_count,
+ buffer, length, decimal_point);
+ }
+ TrimZeros(buffer, length, decimal_point);
+ buffer[*length] = '\0';
+ if ((*length) == 0) {
+ // The string is empty and the decimal_point thus has no importance. Mimick
+ // Gay's dtoa and and set it to -fractional_count.
+ *decimal_point = -fractional_count;
+ }
+ return true;
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/fixed-dtoa.h b/src/3rdparty/double-conversion/fixed-dtoa.h
new file mode 100644
index 0000000000..3bdd08e21f
--- /dev/null
+++ b/src/3rdparty/double-conversion/fixed-dtoa.h
@@ -0,0 +1,56 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_FIXED_DTOA_H_
+#define DOUBLE_CONVERSION_FIXED_DTOA_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// Produces digits necessary to print a given number with
+// 'fractional_count' digits after the decimal point.
+// The buffer must be big enough to hold the result plus one terminating null
+// character.
+//
+// The produced digits might be too short in which case the caller has to fill
+// the gaps with '0's.
+// Example: FastFixedDtoa(0.001, 5, ...) is allowed to return buffer = "1", and
+// decimal_point = -2.
+// Halfway cases are rounded towards +/-Infinity (away from 0). The call
+// FastFixedDtoa(0.15, 2, ...) thus returns buffer = "2", decimal_point = 0.
+// The returned buffer may contain digits that would be truncated from the
+// shortest representation of the input.
+//
+// This method only works for some parameters. If it can't handle the input it
+// returns false. The output is null-terminated when the function succeeds.
+bool FastFixedDtoa(double v, int fractional_count,
+ Vector<char> buffer, int* length, int* decimal_point);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_
diff --git a/src/3rdparty/double-conversion/ieee.h b/src/3rdparty/double-conversion/ieee.h
new file mode 100644
index 0000000000..839dc47d45
--- /dev/null
+++ b/src/3rdparty/double-conversion/ieee.h
@@ -0,0 +1,398 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_DOUBLE_H_
+#define DOUBLE_CONVERSION_DOUBLE_H_
+
+#include "diy-fp.h"
+
+namespace double_conversion {
+
+// We assume that doubles and uint64_t have the same endianness.
+static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+static uint32_t float_to_uint32(float f) { return BitCast<uint32_t>(f); }
+static float uint32_to_float(uint32_t d32) { return BitCast<float>(d32); }
+
+// Helper functions for doubles.
+class Double {
+ public:
+ static const uint64_t kSignMask = UINT64_2PART_C(0x80000000, 00000000);
+ static const uint64_t kExponentMask = UINT64_2PART_C(0x7FF00000, 00000000);
+ static const uint64_t kSignificandMask = UINT64_2PART_C(0x000FFFFF, FFFFFFFF);
+ static const uint64_t kHiddenBit = UINT64_2PART_C(0x00100000, 00000000);
+ static const int kPhysicalSignificandSize = 52; // Excludes the hidden bit.
+ static const int kSignificandSize = 53;
+
+ Double() : d64_(0) {}
+ explicit Double(double d) : d64_(double_to_uint64(d)) {}
+ explicit Double(uint64_t d64) : d64_(d64) {}
+ explicit Double(DiyFp diy_fp)
+ : d64_(DiyFpToUint64(diy_fp)) {}
+
+ // The value encoded by this Double must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ ASSERT(Sign() > 0);
+ ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // The value encoded by this Double must be strictly greater than 0.
+ DiyFp AsNormalizedDiyFp() const {
+ ASSERT(value() > 0.0);
+ uint64_t f = Significand();
+ int e = Exponent();
+
+ // The current double could be a denormal.
+ while ((f & kHiddenBit) == 0) {
+ f <<= 1;
+ e--;
+ }
+ // Do the final shifts in one go.
+ f <<= DiyFp::kSignificandSize - kSignificandSize;
+ e -= DiyFp::kSignificandSize - kSignificandSize;
+ return DiyFp(f, e);
+ }
+
+ // Returns the double's bit as uint64.
+ uint64_t AsUint64() const {
+ return d64_;
+ }
+
+ // Returns the next greater double. Returns +infinity on input +infinity.
+ double NextDouble() const {
+ if (d64_ == kInfinity) return Double(kInfinity).value();
+ if (Sign() < 0 && Significand() == 0) {
+ // -0.0
+ return 0.0;
+ }
+ if (Sign() < 0) {
+ return Double(d64_ - 1).value();
+ } else {
+ return Double(d64_ + 1).value();
+ }
+ }
+
+ double PreviousDouble() const {
+ if (d64_ == (kInfinity | kSignMask)) return -Double::Infinity();
+ if (Sign() < 0) {
+ return Double(d64_ + 1).value();
+ } else {
+ if (Significand() == 0) return -0.0;
+ return Double(d64_ - 1).value();
+ }
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint64_t d64 = AsUint64();
+ int biased_e =
+ static_cast<int>((d64 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint64_t Significand() const {
+ uint64_t d64 = AsUint64();
+ uint64_t significand = d64 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the double is a denormal.
+ bool IsDenormal() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) != 0);
+ }
+
+ bool IsInfinite() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kSignMask) == 0? 1: -1;
+ }
+
+ // Precondition: the value encoded by this Double must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Double must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint64() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ double value() const { return uint64_to_double(d64_); }
+
+ // Returns the significand size for a given order of magnitude.
+ // If v = f*2^e with 2^p-1 <= f <= 2^p then p+e is v's order of magnitude.
+ // This function returns the number of significant binary digits v will have
+ // once it's encoded into a double. In almost all cases this is equal to
+ // kSignificandSize. The only exceptions are denormals. They start with
+ // leading zeroes and their effective significand-size is hence smaller.
+ static int SignificandSizeForOrderOfMagnitude(int order) {
+ if (order >= (kDenormalExponent + kSignificandSize)) {
+ return kSignificandSize;
+ }
+ if (order <= kDenormalExponent) return 0;
+ return order - kDenormalExponent;
+ }
+
+ static double Infinity() {
+ return Double(kInfinity).value();
+ }
+
+ static double NaN() {
+ return Double(kNaN).value();
+ }
+
+ private:
+ static const int kExponentBias = 0x3FF + kPhysicalSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const int kMaxExponent = 0x7FF - kExponentBias;
+ static const uint64_t kInfinity = UINT64_2PART_C(0x7FF00000, 00000000);
+ static const uint64_t kNaN = UINT64_2PART_C(0x7FF80000, 00000000);
+
+ const uint64_t d64_;
+
+ static uint64_t DiyFpToUint64(DiyFp diy_fp) {
+ uint64_t significand = diy_fp.f();
+ int exponent = diy_fp.e();
+ while (significand > kHiddenBit + kSignificandMask) {
+ significand >>= 1;
+ exponent++;
+ }
+ if (exponent >= kMaxExponent) {
+ return kInfinity;
+ }
+ if (exponent < kDenormalExponent) {
+ return 0;
+ }
+ while (exponent > kDenormalExponent && (significand & kHiddenBit) == 0) {
+ significand <<= 1;
+ exponent--;
+ }
+ uint64_t biased_exponent;
+ if (exponent == kDenormalExponent && (significand & kHiddenBit) == 0) {
+ biased_exponent = 0;
+ } else {
+ biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
+ }
+ return (significand & kSignificandMask) |
+ (biased_exponent << kPhysicalSignificandSize);
+ }
+};
+
+class Single {
+ public:
+ static const uint32_t kSignMask = 0x80000000;
+ static const uint32_t kExponentMask = 0x7F800000;
+ static const uint32_t kSignificandMask = 0x007FFFFF;
+ static const uint32_t kHiddenBit = 0x00800000;
+ static const int kPhysicalSignificandSize = 23; // Excludes the hidden bit.
+ static const int kSignificandSize = 24;
+
+ Single() : d32_(0) {}
+ explicit Single(float f) : d32_(float_to_uint32(f)) {}
+ explicit Single(uint32_t d32) : d32_(d32) {}
+
+ // The value encoded by this Single must be greater or equal to +0.0.
+ // It must not be special (infinity, or NaN).
+ DiyFp AsDiyFp() const {
+ ASSERT(Sign() > 0);
+ ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // Returns the single's bit as uint64.
+ uint32_t AsUint32() const {
+ return d32_;
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint32_t d32 = AsUint32();
+ int biased_e =
+ static_cast<int>((d32 & kExponentMask) >> kPhysicalSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint32_t Significand() const {
+ uint32_t d32 = AsUint32();
+ uint32_t significand = d32 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the single is a denormal.
+ bool IsDenormal() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) != 0);
+ }
+
+ bool IsInfinite() const {
+ uint32_t d32 = AsUint32();
+ return ((d32 & kExponentMask) == kExponentMask) &&
+ ((d32 & kSignificandMask) == 0);
+ }
+
+ int Sign() const {
+ uint32_t d32 = AsUint32();
+ return (d32 & kSignMask) == 0? 1: -1;
+ }
+
+ // Computes the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ // Precondition: the value encoded by this Single must be greater than 0.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ ASSERT(value() > 0.0);
+ DiyFp v = this->AsDiyFp();
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (LowerBoundaryIsCloser()) {
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ // Precondition: the value encoded by this Single must be greater or equal
+ // than +0.0.
+ DiyFp UpperBoundary() const {
+ ASSERT(Sign() > 0);
+ return DiyFp(Significand() * 2 + 1, Exponent() - 1);
+ }
+
+ bool LowerBoundaryIsCloser() const {
+ // The boundary is closer if the significand is of the form f == 2^p-1 then
+ // the lower boundary is closer.
+ // Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ bool physical_significand_is_zero = ((AsUint32() & kSignificandMask) == 0);
+ return physical_significand_is_zero && (Exponent() != kDenormalExponent);
+ }
+
+ float value() const { return uint32_to_float(d32_); }
+
+ static float Infinity() {
+ return Single(kInfinity).value();
+ }
+
+ static float NaN() {
+ return Single(kNaN).value();
+ }
+
+ private:
+ static const int kExponentBias = 0x7F + kPhysicalSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+ static const int kMaxExponent = 0xFF - kExponentBias;
+ static const uint32_t kInfinity = 0x7F800000;
+ static const uint32_t kNaN = 0x7FC00000;
+
+ const uint32_t d32_;
+};
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_DOUBLE_H_
diff --git a/src/3rdparty/double-conversion/strtod.cc b/src/3rdparty/double-conversion/strtod.cc
new file mode 100644
index 0000000000..9758989f71
--- /dev/null
+++ b/src/3rdparty/double-conversion/strtod.cc
@@ -0,0 +1,554 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+
+#include "strtod.h"
+#include "bignum.h"
+#include "cached-powers.h"
+#include "ieee.h"
+
+namespace double_conversion {
+
+// 2^53 = 9007199254740992.
+// Any integer with at most 15 decimal digits will hence fit into a double
+// (which has a 53bit significand) without loss of precision.
+static const int kMaxExactDoubleIntegerDecimalDigits = 15;
+// 2^64 = 18446744073709551616 > 10^19
+static const int kMaxUint64DecimalDigits = 19;
+
+// Max double: 1.7976931348623157 x 10^308
+// Min non-zero double: 4.9406564584124654 x 10^-324
+// Any x >= 10^309 is interpreted as +infinity.
+// Any x <= 10^-324 is interpreted as 0.
+// Note that 2.5e-324 (despite being smaller than the min double) will be read
+// as non-zero (equal to the min non-zero double).
+static const int kMaxDecimalPower = 309;
+static const int kMinDecimalPower = -324;
+
+// 2^64 = 18446744073709551616
+static const uint64_t kMaxUint64 = UINT64_2PART_C(0xFFFFFFFF, FFFFFFFF);
+
+
+static const double exact_powers_of_ten[] = {
+ 1.0, // 10^0
+ 10.0,
+ 100.0,
+ 1000.0,
+ 10000.0,
+ 100000.0,
+ 1000000.0,
+ 10000000.0,
+ 100000000.0,
+ 1000000000.0,
+ 10000000000.0, // 10^10
+ 100000000000.0,
+ 1000000000000.0,
+ 10000000000000.0,
+ 100000000000000.0,
+ 1000000000000000.0,
+ 10000000000000000.0,
+ 100000000000000000.0,
+ 1000000000000000000.0,
+ 10000000000000000000.0,
+ 100000000000000000000.0, // 10^20
+ 1000000000000000000000.0,
+ // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
+ 10000000000000000000000.0
+};
+static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
+
+// Maximum number of significant digits in the decimal representation.
+// In fact the value is 772 (see conversions.cc), but to give us some margin
+// we round up to 780.
+static const int kMaxSignificantDecimalDigits = 780;
+
+static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
+ for (int i = 0; i < buffer.length(); i++) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(i, buffer.length());
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+
+static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
+ for (int i = buffer.length() - 1; i >= 0; --i) {
+ if (buffer[i] != '0') {
+ return buffer.SubVector(0, i + 1);
+ }
+ }
+ return Vector<const char>(buffer.start(), 0);
+}
+
+
+static void CutToMaxSignificantDigits(Vector<const char> buffer,
+ int exponent,
+ char* significant_buffer,
+ int* significant_exponent) {
+ for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
+ significant_buffer[i] = buffer[i];
+ }
+ // The input buffer has been trimmed. Therefore the last digit must be
+ // different from '0'.
+ ASSERT(buffer[buffer.length() - 1] != '0');
+ // Set the last digit to be non-zero. This is sufficient to guarantee
+ // correct rounding.
+ significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
+ *significant_exponent =
+ exponent + (buffer.length() - kMaxSignificantDecimalDigits);
+}
+
+
+// Trims the buffer and cuts it to at most kMaxSignificantDecimalDigits.
+// If possible the input-buffer is reused, but if the buffer needs to be
+// modified (due to cutting), then the input needs to be copied into the
+// buffer_copy_space.
+static void TrimAndCut(Vector<const char> buffer, int exponent,
+ char* buffer_copy_space, int space_size,
+ Vector<const char>* trimmed, int* updated_exponent) {
+ Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
+ Vector<const char> right_trimmed = TrimTrailingZeros(left_trimmed);
+ exponent += left_trimmed.length() - right_trimmed.length();
+ if (right_trimmed.length() > kMaxSignificantDecimalDigits) {
+ ASSERT(space_size >= kMaxSignificantDecimalDigits);
+ CutToMaxSignificantDigits(right_trimmed, exponent,
+ buffer_copy_space, updated_exponent);
+ *trimmed = Vector<const char>(buffer_copy_space,
+ kMaxSignificantDecimalDigits);
+ } else {
+ *trimmed = right_trimmed;
+ *updated_exponent = exponent;
+ }
+}
+
+
+// Reads digits from the buffer and converts them to a uint64.
+// Reads in as many digits as fit into a uint64.
+// When the string starts with "1844674407370955161" no further digit is read.
+// Since 2^64 = 18446744073709551616 it would still be possible read another
+// digit if it was less or equal than 6, but this would complicate the code.
+static uint64_t ReadUint64(Vector<const char> buffer,
+ int* number_of_read_digits) {
+ uint64_t result = 0;
+ int i = 0;
+ while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
+ int digit = buffer[i++] - '0';
+ ASSERT(0 <= digit && digit <= 9);
+ result = 10 * result + digit;
+ }
+ *number_of_read_digits = i;
+ return result;
+}
+
+
+// Reads a DiyFp from the buffer.
+// The returned DiyFp is not necessarily normalized.
+// If remaining_decimals is zero then the returned DiyFp is accurate.
+// Otherwise it has been rounded and has error of at most 1/2 ulp.
+static void ReadDiyFp(Vector<const char> buffer,
+ DiyFp* result,
+ int* remaining_decimals) {
+ int read_digits;
+ uint64_t significand = ReadUint64(buffer, &read_digits);
+ if (buffer.length() == read_digits) {
+ *result = DiyFp(significand, 0);
+ *remaining_decimals = 0;
+ } else {
+ // Round the significand.
+ if (buffer[read_digits] >= '5') {
+ significand++;
+ }
+ // Compute the binary exponent.
+ int exponent = 0;
+ *result = DiyFp(significand, exponent);
+ *remaining_decimals = buffer.length() - read_digits;
+ }
+}
+
+
+static bool DoubleStrtod(Vector<const char> trimmed,
+ int exponent,
+ double* result) {
+#if !defined(DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS)
+ // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
+ // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
+ // result is not accurate.
+ // We know that Windows32 uses 64 bits and is therefore accurate.
+ // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
+ // the same problem.
+ return false;
+#endif
+ if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
+ int read_digits;
+ // The trimmed input fits into a double.
+ // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
+ // can compute the result-double simply by multiplying (resp. dividing) the
+ // two numbers.
+ // This is possible because IEEE guarantees that floating-point operations
+ // return the best possible approximation.
+ if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
+ // 10^-exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result /= exact_powers_of_ten[-exponent];
+ return true;
+ }
+ if (0 <= exponent && exponent < kExactPowersOfTenSize) {
+ // 10^exponent fits into a double.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[exponent];
+ return true;
+ }
+ int remaining_digits =
+ kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
+ if ((0 <= exponent) &&
+ (exponent - remaining_digits < kExactPowersOfTenSize)) {
+ // The trimmed string was short and we can multiply it with
+ // 10^remaining_digits. As a result the remaining exponent now fits
+ // into a double too.
+ *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
+ ASSERT(read_digits == trimmed.length());
+ *result *= exact_powers_of_ten[remaining_digits];
+ *result *= exact_powers_of_ten[exponent - remaining_digits];
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// Returns 10^exponent as an exact DiyFp.
+// The given exponent must be in the range [1; kDecimalExponentDistance[.
+static DiyFp AdjustmentPowerOfTen(int exponent) {
+ ASSERT(0 < exponent);
+ ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
+ // Simply hardcode the remaining powers for the given decimal exponent
+ // distance.
+ ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
+ switch (exponent) {
+ case 1: return DiyFp(UINT64_2PART_C(0xa0000000, 00000000), -60);
+ case 2: return DiyFp(UINT64_2PART_C(0xc8000000, 00000000), -57);
+ case 3: return DiyFp(UINT64_2PART_C(0xfa000000, 00000000), -54);
+ case 4: return DiyFp(UINT64_2PART_C(0x9c400000, 00000000), -50);
+ case 5: return DiyFp(UINT64_2PART_C(0xc3500000, 00000000), -47);
+ case 6: return DiyFp(UINT64_2PART_C(0xf4240000, 00000000), -44);
+ case 7: return DiyFp(UINT64_2PART_C(0x98968000, 00000000), -40);
+ default:
+ UNREACHABLE();
+ return DiyFp(0, 0);
+ }
+}
+
+
+// If the function returns true then the result is the correct double.
+// Otherwise it is either the correct double or the double that is just below
+// the correct double.
+static bool DiyFpStrtod(Vector<const char> buffer,
+ int exponent,
+ double* result) {
+ DiyFp input;
+ int remaining_decimals;
+ ReadDiyFp(buffer, &input, &remaining_decimals);
+ // Since we may have dropped some digits the input is not accurate.
+ // If remaining_decimals is different than 0 than the error is at most
+ // .5 ulp (unit in the last place).
+ // We don't want to deal with fractions and therefore keep a common
+ // denominator.
+ const int kDenominatorLog = 3;
+ const int kDenominator = 1 << kDenominatorLog;
+ // Move the remaining decimals into the exponent.
+ exponent += remaining_decimals;
+ int error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
+
+ int old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
+ if (exponent < PowersOfTenCache::kMinDecimalExponent) {
+ *result = 0.0;
+ return true;
+ }
+ DiyFp cached_power;
+ int cached_decimal_exponent;
+ PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
+ &cached_power,
+ &cached_decimal_exponent);
+
+ if (cached_decimal_exponent != exponent) {
+ int adjustment_exponent = exponent - cached_decimal_exponent;
+ DiyFp adjustment_power = AdjustmentPowerOfTen(adjustment_exponent);
+ input.Multiply(adjustment_power);
+ if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
+ // The product of input with the adjustment power fits into a 64 bit
+ // integer.
+ ASSERT(DiyFp::kSignificandSize == 64);
+ } else {
+ // The adjustment power is exact. There is hence only an error of 0.5.
+ error += kDenominator / 2;
+ }
+ }
+
+ input.Multiply(cached_power);
+ // The error introduced by a multiplication of a*b equals
+ // error_a + error_b + error_a*error_b/2^64 + 0.5
+ // Substituting a with 'input' and b with 'cached_power' we have
+ // error_b = 0.5 (all cached powers have an error of less than 0.5 ulp),
+ // error_ab = 0 or 1 / kDenominator > error_a*error_b/ 2^64
+ int error_b = kDenominator / 2;
+ int error_ab = (error == 0 ? 0 : 1); // We round up to 1.
+ int fixed_error = kDenominator / 2;
+ error += error_b + error_ab + fixed_error;
+
+ old_e = input.e();
+ input.Normalize();
+ error <<= old_e - input.e();
+
+ // See if the double's significand changes if we add/subtract the error.
+ int order_of_magnitude = DiyFp::kSignificandSize + input.e();
+ int effective_significand_size =
+ Double::SignificandSizeForOrderOfMagnitude(order_of_magnitude);
+ int precision_digits_count =
+ DiyFp::kSignificandSize - effective_significand_size;
+ if (precision_digits_count + kDenominatorLog >= DiyFp::kSignificandSize) {
+ // This can only happen for very small denormals. In this case the
+ // half-way multiplied by the denominator exceeds the range of an uint64.
+ // Simply shift everything to the right.
+ int shift_amount = (precision_digits_count + kDenominatorLog) -
+ DiyFp::kSignificandSize + 1;
+ input.set_f(input.f() >> shift_amount);
+ input.set_e(input.e() + shift_amount);
+ // We add 1 for the lost precision of error, and kDenominator for
+ // the lost precision of input.f().
+ error = (error >> shift_amount) + 1 + kDenominator;
+ precision_digits_count -= shift_amount;
+ }
+ // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
+ ASSERT(DiyFp::kSignificandSize == 64);
+ ASSERT(precision_digits_count < 64);
+ uint64_t one64 = 1;
+ uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
+ uint64_t precision_bits = input.f() & precision_bits_mask;
+ uint64_t half_way = one64 << (precision_digits_count - 1);
+ precision_bits *= kDenominator;
+ half_way *= kDenominator;
+ DiyFp rounded_input(input.f() >> precision_digits_count,
+ input.e() + precision_digits_count);
+ if (precision_bits >= half_way + error) {
+ rounded_input.set_f(rounded_input.f() + 1);
+ }
+ // If the last_bits are too close to the half-way case than we are too
+ // inaccurate and round down. In this case we return false so that we can
+ // fall back to a more precise algorithm.
+
+ *result = Double(rounded_input).value();
+ if (half_way - error < precision_bits && precision_bits < half_way + error) {
+ // Too imprecise. The caller will have to fall back to a slower version.
+ // However the returned number is guaranteed to be either the correct
+ // double, or the next-lower double.
+ return false;
+ } else {
+ return true;
+ }
+}
+
+
+// Returns
+// - -1 if buffer*10^exponent < diy_fp.
+// - 0 if buffer*10^exponent == diy_fp.
+// - +1 if buffer*10^exponent > diy_fp.
+// Preconditions:
+// buffer.length() + exponent <= kMaxDecimalPower + 1
+// buffer.length() + exponent > kMinDecimalPower
+// buffer.length() <= kMaxDecimalSignificantDigits
+static int CompareBufferWithDiyFp(Vector<const char> buffer,
+ int exponent,
+ DiyFp diy_fp) {
+ ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
+ ASSERT(buffer.length() + exponent > kMinDecimalPower);
+ ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
+ // Make sure that the Bignum will be able to hold all our numbers.
+ // Our Bignum implementation has a separate field for exponents. Shifts will
+ // consume at most one bigit (< 64 bits).
+ // ln(10) == 3.3219...
+ ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+ Bignum buffer_bignum;
+ Bignum diy_fp_bignum;
+ buffer_bignum.AssignDecimalString(buffer);
+ diy_fp_bignum.AssignUInt64(diy_fp.f());
+ if (exponent >= 0) {
+ buffer_bignum.MultiplyByPowerOfTen(exponent);
+ } else {
+ diy_fp_bignum.MultiplyByPowerOfTen(-exponent);
+ }
+ if (diy_fp.e() > 0) {
+ diy_fp_bignum.ShiftLeft(diy_fp.e());
+ } else {
+ buffer_bignum.ShiftLeft(-diy_fp.e());
+ }
+ return Bignum::Compare(buffer_bignum, diy_fp_bignum);
+}
+
+
+// Returns true if the guess is the correct double.
+// Returns false, when guess is either correct or the next-lower double.
+static bool ComputeGuess(Vector<const char> trimmed, int exponent,
+ double* guess) {
+ if (trimmed.length() == 0) {
+ *guess = 0.0;
+ return true;
+ }
+ if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) {
+ *guess = Double::Infinity();
+ return true;
+ }
+ if (exponent + trimmed.length() <= kMinDecimalPower) {
+ *guess = 0.0;
+ return true;
+ }
+
+ if (DoubleStrtod(trimmed, exponent, guess) ||
+ DiyFpStrtod(trimmed, exponent, guess)) {
+ return true;
+ }
+ if (*guess == Double::Infinity()) {
+ return true;
+ }
+ return false;
+}
+
+double Strtod(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ exponent = updated_exponent;
+
+ double guess;
+ bool is_correct = ComputeGuess(trimmed, exponent, &guess);
+ if (is_correct) return guess;
+
+ DiyFp upper_boundary = Double(guess).UpperBoundary();
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return Double(guess).NextDouble();
+ } else if ((Double(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return Double(guess).NextDouble();
+ }
+}
+
+float Strtof(Vector<const char> buffer, int exponent) {
+ char copy_buffer[kMaxSignificantDecimalDigits];
+ Vector<const char> trimmed;
+ int updated_exponent;
+ TrimAndCut(buffer, exponent, copy_buffer, kMaxSignificantDecimalDigits,
+ &trimmed, &updated_exponent);
+ exponent = updated_exponent;
+
+ double double_guess;
+ bool is_correct = ComputeGuess(trimmed, exponent, &double_guess);
+
+ float float_guess = static_cast<float>(double_guess);
+ if (float_guess == double_guess) {
+ // This shortcut triggers for integer values.
+ return float_guess;
+ }
+
+ // We must catch double-rounding. Say the double has been rounded up, and is
+ // now a boundary of a float, and rounds up again. This is why we have to
+ // look at previous too.
+ // Example (in decimal numbers):
+ // input: 12349
+ // high-precision (4 digits): 1235
+ // low-precision (3 digits):
+ // when read from input: 123
+ // when rounded from high precision: 124.
+ // To do this we simply look at the neigbors of the correct result and see
+ // if they would round to the same float. If the guess is not correct we have
+ // to look at four values (since two different doubles could be the correct
+ // double).
+
+ double double_next = Double(double_guess).NextDouble();
+ double double_previous = Double(double_guess).PreviousDouble();
+
+ float f1 = static_cast<float>(double_previous);
+ float f2 = float_guess;
+ float f3 = static_cast<float>(double_next);
+ float f4;
+ if (is_correct) {
+ f4 = f3;
+ } else {
+ double double_next2 = Double(double_next).NextDouble();
+ f4 = static_cast<float>(double_next2);
+ }
+ ASSERT(f1 <= f2 && f2 <= f3 && f3 <= f4);
+
+ // If the guess doesn't lie near a single-precision boundary we can simply
+ // return its float-value.
+ if (f1 == f4) {
+ return float_guess;
+ }
+
+ ASSERT((f1 != f2 && f2 == f3 && f3 == f4) ||
+ (f1 == f2 && f2 != f3 && f3 == f4) ||
+ (f1 == f2 && f2 == f3 && f3 != f4));
+
+ // guess and next are the two possible canditates (in the same way that
+ // double_guess was the lower candidate for a double-precision guess).
+ float guess = f1;
+ float next = f4;
+ DiyFp upper_boundary;
+ if (guess == 0.0f) {
+ float min_float = 1e-45f;
+ upper_boundary = Double(static_cast<double>(min_float) / 2).AsDiyFp();
+ } else {
+ upper_boundary = Single(guess).UpperBoundary();
+ }
+ int comparison = CompareBufferWithDiyFp(trimmed, exponent, upper_boundary);
+ if (comparison < 0) {
+ return guess;
+ } else if (comparison > 0) {
+ return next;
+ } else if ((Single(guess).Significand() & 1) == 0) {
+ // Round towards even.
+ return guess;
+ } else {
+ return next;
+ }
+}
+
+} // namespace double_conversion
diff --git a/src/3rdparty/double-conversion/strtod.h b/src/3rdparty/double-conversion/strtod.h
new file mode 100644
index 0000000000..ed0293b8f5
--- /dev/null
+++ b/src/3rdparty/double-conversion/strtod.h
@@ -0,0 +1,45 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_STRTOD_H_
+#define DOUBLE_CONVERSION_STRTOD_H_
+
+#include "utils.h"
+
+namespace double_conversion {
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+double Strtod(Vector<const char> buffer, int exponent);
+
+// The buffer must only contain digits in the range [0-9]. It must not
+// contain a dot or a sign. It must not start with '0', and must not be empty.
+float Strtof(Vector<const char> buffer, int exponent);
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_STRTOD_H_
diff --git a/src/3rdparty/double-conversion/utils.h b/src/3rdparty/double-conversion/utils.h
new file mode 100644
index 0000000000..767094b8b7
--- /dev/null
+++ b/src/3rdparty/double-conversion/utils.h
@@ -0,0 +1,313 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef DOUBLE_CONVERSION_UTILS_H_
+#define DOUBLE_CONVERSION_UTILS_H_
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <assert.h>
+#ifndef ASSERT
+#define ASSERT(condition) (assert(condition))
+#endif
+#ifndef UNIMPLEMENTED
+#define UNIMPLEMENTED() (abort())
+#endif
+#ifndef UNREACHABLE
+#define UNREACHABLE() (abort())
+#endif
+
+// Double operations detection based on target architecture.
+// Linux uses a 80bit wide floating point stack on x86. This induces double
+// rounding, which in turn leads to wrong results.
+// An easy way to test if the floating-point operations are correct is to
+// evaluate: 89255.0/1e22. If the floating-point stack is 64 bits wide then
+// the result is equal to 89255e-22.
+// The best way to test this, is to create a division-function and to compare
+// the output of the division with the expected result. (Inlining must be
+// disabled.)
+// On Linux,x86 89255e-22 != Div_double(89255.0/1e22)
+#if defined(_M_X64) || defined(__x86_64__) || \
+ defined(__ARMEL__) || defined(__avr32__) || \
+ defined(__hppa__) || defined(__ia64__) || \
+ defined(__mips__) || defined(__powerpc__) || \
+ defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
+ defined(__SH4__) || defined(__alpha__) || \
+ defined(_MIPS_ARCH_MIPS32R2)
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
+#if defined(_WIN32)
+// Windows uses a 64bit wide floating point stack.
+#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+#else
+#undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
+#endif // _WIN32
+#else
+#error Target architecture was not detected as supported by Double-Conversion.
+#endif
+
+
+#if defined(_WIN32) && !defined(__MINGW32__)
+
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t; // NOLINT
+typedef unsigned short uint16_t; // NOLINT
+typedef int int32_t;
+typedef unsigned int uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+// intptr_t and friends are defined in crtdefs.h through stdio.h.
+
+#else
+
+#include <stdint.h>
+
+#endif
+
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write UINT64_2PART_C(0x12345678,90123456);
+#define UINT64_2PART_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
+
+// The expression ARRAY_SIZE(a) is a compile-time constant of type
+// size_t which represents the number of elements of the given
+// array. You should only use ARRAY_SIZE on statically allocated
+// arrays.
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+#endif
+
+// A macro to disallow the evil copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#ifndef DISALLOW_COPY_AND_ASSIGN
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+#endif
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#ifndef DISALLOW_IMPLICIT_CONSTRUCTORS
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+#endif
+
+namespace double_conversion {
+
+static const int kCharSize = sizeof(char);
+
+// Returns the maximum of the two parameters.
+template <typename T>
+static T Max(T a, T b) {
+ return a < b ? b : a;
+}
+
+
+// Returns the minimum of the two parameters.
+template <typename T>
+static T Min(T a, T b) {
+ return a < b ? a : b;
+}
+
+
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+// This is a simplified version of V8's Vector class.
+template <typename T>
+class Vector {
+ public:
+ Vector() : start_(NULL), length_(0) {}
+ Vector(T* data, int length) : start_(data), length_(length) {
+ ASSERT(length == 0 || (length > 0 && data != NULL));
+ }
+
+ // Returns a vector using the same backing storage as this one,
+ // spanning from and including 'from', to but not including 'to'.
+ Vector<T> SubVector(int from, int to) {
+ ASSERT(to <= length_);
+ ASSERT(from < to);
+ ASSERT(0 <= from);
+ return Vector<T>(start() + from, to - from);
+ }
+
+ // Returns the length of the vector.
+ int length() const { return length_; }
+
+ // Returns whether or not the vector is empty.
+ bool is_empty() const { return length_ == 0; }
+
+ // Returns the pointer to the start of the data in the vector.
+ T* start() const { return start_; }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](int index) const {
+ ASSERT(0 <= index && index < length_);
+ return start_[index];
+ }
+
+ T& first() { return start_[0]; }
+
+ T& last() { return start_[length_ - 1]; }
+
+ private:
+ T* start_;
+ int length_;
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+class StringBuilder {
+ public:
+ StringBuilder(char* buffer, int size)
+ : buffer_(buffer, size), position_(0) { }
+
+ ~StringBuilder() { if (!is_finalized()) Finalize(); }
+
+ int size() const { return buffer_.length(); }
+
+ // Get the current position in the builder.
+ int position() const {
+ ASSERT(!is_finalized());
+ return position_;
+ }
+
+ // Reset the position.
+ void Reset() { position_ = 0; }
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_++] = c;
+ }
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n) {
+ ASSERT(!is_finalized() && position_ + n < buffer_.length());
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ memmove(&buffer_[position_], s, n * kCharSize);
+ position_ += n;
+ }
+
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count) {
+ for (int i = 0; i < count; i++) {
+ AddCharacter(c);
+ }
+ }
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize() {
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_] = '\0';
+ // Make sure nobody managed to add a 0-character to the
+ // buffer while building the string.
+ ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
+ position_ = -1;
+ ASSERT(is_finalized());
+ return buffer_.start();
+ }
+
+ private:
+ Vector<char> buffer_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
+
+// The type-based aliasing rule allows the compiler to assume that pointers of
+// different types (for some definition of different) never alias each other.
+// Thus the following code does not work:
+//
+// float f = foo();
+// int fbits = *(int*)(&f);
+//
+// The compiler 'knows' that the int pointer can't refer to f since the types
+// don't match, so the compiler may cache f in a register, leaving random data
+// in fbits. Using C++ style casts makes no difference, however a pointer to
+// char data is assumed to alias any other pointer. This is the 'memcpy
+// exception'.
+//
+// Bit_cast uses the memcpy exception to move the bits from a variable of one
+// type of a variable of another type. Of course the end result is likely to
+// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
+// will completely optimize BitCast away.
+//
+// There is an additional use for BitCast.
+// Recent gccs will warn when they see casts that may result in breakage due to
+// the type-based aliasing rule. If you have checked that there is no breakage
+// you can use BitCast to cast one pointer type to another. This confuses gcc
+// enough that it can no longer see that you have cast one pointer type to
+// another thus avoiding the warning.
+template <class Dest, class Source>
+inline Dest BitCast(const Source& source) {
+ // Compile time assertion: sizeof(Dest) == sizeof(Source)
+ // A compile error here means your Dest and Source have different sizes.
+ typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+
+ Dest dest;
+ memmove(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+template <class Dest, class Source>
+inline Dest BitCast(Source* source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+}
+
+} // namespace double_conversion
+
+#endif // DOUBLE_CONVERSION_UTILS_H_
diff --git a/src/3rdparty/masm/WeakRandom.h b/src/3rdparty/masm/WeakRandom.h
new file mode 100644
index 0000000000..325d1f6ac6
--- /dev/null
+++ b/src/3rdparty/masm/WeakRandom.h
@@ -0,0 +1,52 @@
+/****************************************************************************
+**
+** Copyright (C) 2012 Digia Plc and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/legal
+**
+** This file is part of the V4VM module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and Digia. For licensing terms and
+** conditions see http://qt.digia.com/licensing. For further information
+** use the contact form at http://qt.digia.com/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Digia gives you certain additional
+** rights. These rights are described in the Digia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3.0 as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU General Public License version 3.0 requirements will be
+** met: http://www.gnu.org/copyleft/gpl.html.
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef MASM_WEAKRANDOM_H
+#define MASM_WEAKRANDOM_H
+
+#include <stdint.h>
+
+struct WeakRandom {
+ WeakRandom(int) {}
+ uint32_t getUint32() { return 0; }
+};
+
+#endif // MASM_WEAKRANDOM_H
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.cpp b/src/3rdparty/masm/assembler/ARMAssembler.cpp
new file mode 100644
index 0000000000..6912d1ea39
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "ARMAssembler.h"
+
+namespace JSC {
+
+// Patching helpers
+
+void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+{
+ ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr);
+ ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
+ ARMWord index = (*ldr & 0xfff) >> 1;
+
+ ASSERT(diff >= 1);
+ if (diff >= 2 || index > 0) {
+ diff = (diff + index - 2) * sizeof(ARMWord);
+ ASSERT(diff <= 0xfff);
+ *ldr = (*ldr & ~0xfff) | diff;
+ } else
+ *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord);
+}
+
+// Handle immediates
+
+ARMWord ARMAssembler::getOp2(ARMWord imm)
+{
+ int rol;
+
+ if (imm <= 0xff)
+ return Op2Immediate | imm;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol = 8;
+ }
+ else {
+ imm = (imm << 24) | (imm >> 8);
+ rol = 0;
+ }
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ return Op2Immediate | (imm >> 24) | (rol << 8);
+
+ return InvalidImmediate;
+}
+
+int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
+{
+ // Step1: Search a non-immediate part
+ ARMWord mask;
+ ARMWord imm1;
+ ARMWord imm2;
+ int rol;
+
+ mask = 0xff000000;
+ rol = 8;
+ while(1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = 4 + (rol >> 1);
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3) {
+ // rol 8
+ imm = (imm << 8) | (imm >> 24);
+ mask = 0xff00;
+ rol = 24;
+ while (1) {
+ if ((imm & mask) == 0) {
+ imm = (imm << rol) | (imm >> (32 - rol));
+ rol = (rol >> 1) - 8;
+ break;
+ }
+ rol += 2;
+ mask >>= 2;
+ if (mask & 0x3)
+ return 0;
+ }
+ break;
+ }
+ }
+
+ ASSERT((imm & 0xff) == 0);
+
+ if ((imm & 0xff000000) == 0) {
+ imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
+ imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8);
+ } else if (imm & 0xc0000000) {
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xff000000) == 0) {
+ imm <<= 8;
+ rol += 4;
+ }
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ } else {
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8);
+ imm <<= 8;
+ rol += 4;
+
+ if ((imm & 0xf0000000) == 0) {
+ imm <<= 4;
+ rol += 2;
+ }
+
+ if ((imm & 0xc0000000) == 0) {
+ imm <<= 2;
+ rol += 1;
+ }
+
+ if ((imm & 0x00ffffff) == 0)
+ imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8);
+ else
+ return 0;
+ }
+
+ if (positive) {
+ mov(reg, imm1);
+ orr(reg, reg, imm2);
+ } else {
+ mvn(reg, imm1);
+ bic(reg, reg, imm2);
+ }
+
+ return 1;
+}
+
+ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate)
+ return tmp;
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ if (invert)
+ return tmp | Op2InvertedImmediate;
+ mvn(tmpReg, tmp);
+ return tmpReg;
+ }
+
+ return encodeComplexImm(imm, tmpReg);
+}
+
+void ARMAssembler::moveImm(ARMWord imm, int dest)
+{
+ ARMWord tmp;
+
+ // Do it by 1 instruction
+ tmp = getOp2(imm);
+ if (tmp != InvalidImmediate) {
+ mov(dest, tmp);
+ return;
+ }
+
+ tmp = getOp2(~imm);
+ if (tmp != InvalidImmediate) {
+ mvn(dest, tmp);
+ return;
+ }
+
+ encodeComplexImm(imm, dest);
+}
+
+ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest)
+{
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ ARMWord tmp = getImm16Op2(imm);
+ if (tmp != InvalidImmediate) {
+ movw(dest, tmp);
+ return dest;
+ }
+ movw(dest, getImm16Op2(imm & 0xffff));
+ movt(dest, getImm16Op2(imm >> 16));
+ return dest;
+#else
+ // Do it by 2 instruction
+ if (genInt(dest, imm, true))
+ return dest;
+ if (genInt(dest, ~imm, false))
+ return dest;
+
+ ldrImmediate(dest, imm);
+ return dest;
+#endif
+}
+
+// Memory load/store helpers
+
+void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xfff)
+ dtrUp(transferType, srcDst, base, offset);
+ else if (offset <= 0xfffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8));
+ dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xfff)
+ dtrDown(transferType, srcDst, base, -offset);
+ else if (offset >= -0xfffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8));
+ dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ ASSERT(scale >= 0 && scale <= 3);
+ ARMWord op2 = lsl(index, scale);
+
+ if (!offset) {
+ dtrUpRegister(transferType, srcDst, base, op2);
+ return;
+ }
+
+ if (offset <= 0xfffff && offset >= -0xfffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset)
+{
+ if (offset >= 0) {
+ if (offset <= 0xff)
+ halfDtrUp(transferType, srcDst, base, getOp2Half(offset));
+ else if (offset <= 0xffff) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8));
+ halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ } else {
+ if (offset >= -0xff)
+ halfDtrDown(transferType, srcDst, base, getOp2Half(-offset));
+ else if (offset >= -0xffff) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8));
+ halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff));
+ } else {
+ moveImm(offset, ARMRegisters::S0);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+ }
+ }
+}
+
+void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ if (!scale && !offset) {
+ halfDtrUpRegister(transferType, srcDst, base, index);
+ return;
+ }
+
+ ARMWord op2 = lsl(index, scale);
+
+ if (offset <= 0xffff && offset >= -0xffff) {
+ add(ARMRegisters::S0, base, op2);
+ dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset);
+ return;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, op2);
+ halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0);
+}
+
+void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset)
+{
+ // VFP cannot directly access memory that is not four-byte-aligned
+ if (!(offset & 0x3)) {
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrUp(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+
+ if (offset <= 0x3ff && offset >= 0) {
+ doubleDtrDown(transferType, srcDst, base, offset >> 2);
+ return;
+ }
+ if (offset <= 0x3ffff && offset >= 0) {
+ sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8));
+ doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff);
+ return;
+ }
+ offset = -offset;
+ }
+
+ moveImm(offset, ARMRegisters::S0);
+ add(ARMRegisters::S0, ARMRegisters::S0, base);
+ doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0);
+}
+
+void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset)
+{
+ add(ARMRegisters::S1, base, lsl(index, scale));
+ dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset);
+}
+
+PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+{
+ // 64-bit alignment is required for next constant pool and JIT code as well
+ m_buffer.flushWithoutBarrier(true);
+ if (!m_buffer.isAligned(8))
+ bkpt(0);
+
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ char* data = reinterpret_cast<char*>(result->start());
+
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ // The last bit is set if the constant must be placed on constant pool.
+ int pos = (iter->m_offset) & (~0x1);
+ ARMWord* ldrAddr = reinterpret_cast_ptr<ARMWord*>(data + pos);
+ ARMWord* addr = getLdrImmAddress(ldrAddr);
+ if (*addr != InvalidBranchTarget) {
+ if (!(iter->m_offset & 1)) {
+ intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetchOffset);
+
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask);
+ continue;
+ }
+ }
+ *addr = reinterpret_cast<ARMWord>(data + *addr);
+ }
+ }
+
+ return result;
+}
+
+#if OS(LINUX) && COMPILER(RVCT)
+
+__asm void ARMAssembler::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
diff --git a/src/3rdparty/masm/assembler/ARMAssembler.h b/src/3rdparty/masm/assembler/ARMAssembler.h
new file mode 100644
index 0000000000..3888226b21
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMAssembler.h
@@ -0,0 +1,1129 @@
+/*
+ * Copyright (C) 2009, 2010 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "AssemblerBufferWithConstantPool.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+namespace JSC {
+
+ typedef uint32_t ARMWord;
+
+ namespace ARMRegisters {
+ typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3, S0 = r3, /* Same as thumb assembler. */
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12, S1 = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15
+ } RegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7, SD0 = d7, /* Same as thumb assembler. */
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31
+ } FPRegisterID;
+
+ } // namespace ARMRegisters
+
+ class ARMAssembler {
+ public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPRegisterID FPRegisterID;
+ typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ ARMAssembler()
+ : m_indexOfTailOfLastWatchpoint(1)
+ {
+ }
+
+ // ARM conditional constants
+ typedef enum {
+ EQ = 0x00000000, // Zero
+ NE = 0x10000000, // Non-zero
+ CS = 0x20000000,
+ CC = 0x30000000,
+ MI = 0x40000000,
+ PL = 0x50000000,
+ VS = 0x60000000,
+ VC = 0x70000000,
+ HI = 0x80000000,
+ LS = 0x90000000,
+ GE = 0xa0000000,
+ LT = 0xb0000000,
+ GT = 0xc0000000,
+ LE = 0xd0000000,
+ AL = 0xe0000000
+ } Condition;
+
+ // ARM instruction constants
+ enum {
+ AND = (0x0 << 21),
+ EOR = (0x1 << 21),
+ SUB = (0x2 << 21),
+ RSB = (0x3 << 21),
+ ADD = (0x4 << 21),
+ ADC = (0x5 << 21),
+ SBC = (0x6 << 21),
+ RSC = (0x7 << 21),
+ TST = (0x8 << 21),
+ TEQ = (0x9 << 21),
+ CMP = (0xa << 21),
+ CMN = (0xb << 21),
+ ORR = (0xc << 21),
+ MOV = (0xd << 21),
+ BIC = (0xe << 21),
+ MVN = (0xf << 21),
+ MUL = 0x00000090,
+ MULL = 0x00c00090,
+ VMOV_F64 = 0x0eb00b40,
+ VADD_F64 = 0x0e300b00,
+ VDIV_F64 = 0x0e800b00,
+ VSUB_F64 = 0x0e300b40,
+ VMUL_F64 = 0x0e200b00,
+ VCMP_F64 = 0x0eb40b40,
+ VSQRT_F64 = 0x0eb10bc0,
+ VABS_F64 = 0x0eb00bc0,
+ VNEG_F64 = 0x0eb10b40,
+ STMDB = 0x09200000,
+ LDMIA = 0x08b00000,
+ B = 0x0a000000,
+ BL = 0x0b000000,
+ BX = 0x012fff10,
+ VMOV_VFP64 = 0x0c400a10,
+ VMOV_ARM64 = 0x0c500a10,
+ VMOV_VFP32 = 0x0e000a10,
+ VMOV_ARM32 = 0x0e100a10,
+ VCVT_F64_S32 = 0x0eb80bc0,
+ VCVT_S32_F64 = 0x0ebd0b40,
+ VCVT_U32_F64 = 0x0ebc0b40,
+ VCVT_F32_F64 = 0x0eb70bc0,
+ VCVT_F64_F32 = 0x0eb70ac0,
+ VMRS_APSR = 0x0ef1fa10,
+ CLZ = 0x016f0f10,
+ BKPT = 0xe1200070,
+ BLX = 0x012fff30,
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ MOVW = 0x03000000,
+ MOVT = 0x03400000,
+#endif
+ NOP = 0xe1a00000,
+ };
+
+ enum {
+ Op2Immediate = (1 << 25),
+ ImmediateForHalfWordTransfer = (1 << 22),
+ Op2InvertedImmediate = (1 << 26),
+ SetConditionalCodes = (1 << 20),
+ Op2IsRegisterArgument = (1 << 25),
+ // Data transfer flags.
+ DataTransferUp = (1 << 23),
+ DataTransferWriteBack = (1 << 21),
+ DataTransferPostUpdate = (1 << 24),
+ DataTransferLoad = (1 << 20),
+ ByteDataTransfer = (1 << 22),
+ };
+
+ enum DataTransferTypeA {
+ LoadUint32 = 0x05000000 | DataTransferLoad,
+ LoadUint8 = 0x05400000 | DataTransferLoad,
+ StoreUint32 = 0x05000000,
+ StoreUint8 = 0x05400000,
+ };
+
+ enum DataTransferTypeB {
+ LoadUint16 = 0x010000b0 | DataTransferLoad,
+ LoadInt16 = 0x010000f0 | DataTransferLoad,
+ LoadInt8 = 0x010000d0 | DataTransferLoad,
+ StoreUint16 = 0x010000b0,
+ };
+
+ enum DataTransferTypeFloat {
+ LoadFloat = 0x0d000a00 | DataTransferLoad,
+ LoadDouble = 0x0d000b00 | DataTransferLoad,
+ StoreFloat = 0x0d000a00,
+ StoreDouble = 0x0d000b00,
+ };
+
+ // Masks of ARM instructions
+ enum {
+ BranchOffsetMask = 0x00ffffff,
+ ConditionalFieldMask = 0xf0000000,
+ DataTransferOffsetMask = 0xfff,
+ };
+
+ enum {
+ MinimumBranchOffsetDistance = -0x00800000,
+ MaximumBranchOffsetDistance = 0x007fffff,
+ };
+
+ enum {
+ padForAlign8 = 0x00,
+ padForAlign16 = 0x0000,
+ padForAlign32 = 0xe12fff7f // 'bkpt 0xffff' instruction.
+ };
+
+ static const ARMWord InvalidImmediate = 0xf0000000;
+ static const ARMWord InvalidBranchTarget = 0xffffffff;
+ static const int DefaultPrefetchOffset = 2;
+
+ static const ARMWord BlxInstructionMask = 0x012fff30;
+ static const ARMWord LdrOrAddInstructionMask = 0x0ff00000;
+ static const ARMWord LdrPcImmediateInstructionMask = 0x0f7f0000;
+
+ static const ARMWord AddImmediateInstruction = 0x02800000;
+ static const ARMWord BlxInstruction = 0x012fff30;
+ static const ARMWord LdrImmediateInstruction = 0x05900000;
+ static const ARMWord LdrPcImmediateInstruction = 0x051f0000;
+
+ // Instruction formating
+
+ void emitInstruction(ARMWord op, int rd, int rn, ARMWord op2)
+ {
+ ASSERT(((op2 & ~Op2Immediate) <= 0xfff) || (((op2 & ~ImmediateForHalfWordTransfer) <= 0xfff)));
+ m_buffer.putInt(op | RN(rn) | RD(rd) | op2);
+ }
+
+ void emitDoublePrecisionInstruction(ARMWord op, int dd, int dn, int dm)
+ {
+ ASSERT((dd >= 0 && dd <= 31) && (dn >= 0 && dn <= 31) && (dm >= 0 && dm <= 31));
+ m_buffer.putInt(op | ((dd & 0xf) << 12) | ((dd & 0x10) << (22 - 4))
+ | ((dn & 0xf) << 16) | ((dn & 0x10) << (7 - 4))
+ | (dm & 0xf) | ((dm & 0x10) << (5 - 4)));
+ }
+
+ void emitSinglePrecisionInstruction(ARMWord op, int sd, int sn, int sm)
+ {
+ ASSERT((sd >= 0 && sd <= 31) && (sn >= 0 && sn <= 31) && (sm >= 0 && sm <= 31));
+ m_buffer.putInt(op | ((sd >> 1) << 12) | ((sd & 0x1) << 22)
+ | ((sn >> 1) << 16) | ((sn & 0x1) << 7)
+ | (sm >> 1) | ((sm & 0x1) << 5));
+ }
+
+ void bitAnd(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND, rd, rn, op2);
+ }
+
+ void bitAnds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | AND | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void eor(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR, rd, rn, op2);
+ }
+
+ void eors(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | EOR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sub(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB, rd, rn, op2);
+ }
+
+ void subs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SUB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsb(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB, rd, rn, op2);
+ }
+
+ void rsbs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSB | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void add(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD, rd, rn, op2);
+ }
+
+ void adds(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADD | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void adc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC, rd, rn, op2);
+ }
+
+ void adcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ADC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void sbc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC, rd, rn, op2);
+ }
+
+ void sbcs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | SBC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void rsc(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC, rd, rn, op2);
+ }
+
+ void rscs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | RSC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void tst(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TST | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void teq(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | TEQ | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmp(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMP | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void cmn(int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | CMN | SetConditionalCodes, 0, rn, op2);
+ }
+
+ void orr(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR, rd, rn, op2);
+ }
+
+ void orrs(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | ORR | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mov(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV, rd, ARMRegisters::r0, op2);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ void movw(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVW | RD(rd) | op2);
+ }
+
+ void movt(int rd, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT((op2 | 0xf0fff) == 0xf0fff);
+ m_buffer.putInt(toARMWord(cc) | MOVT | RD(rd) | op2);
+ }
+#endif
+
+ void movs(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MOV | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void bic(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC, rd, rn, op2);
+ }
+
+ void bics(int rd, int rn, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BIC | SetConditionalCodes, rd, rn, op2);
+ }
+
+ void mvn(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN, rd, ARMRegisters::r0, op2);
+ }
+
+ void mvns(int rd, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | MVN | SetConditionalCodes, rd, ARMRegisters::r0, op2);
+ }
+
+ void mul(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void muls(int rd, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MUL | SetConditionalCodes | RN(rd) | RS(rn) | RM(rm));
+ }
+
+ void mull(int rdhi, int rdlo, int rn, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm));
+ }
+
+ void vmov_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm);
+ }
+
+ void vadd_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VADD_F64, dd, dn, dm);
+ }
+
+ void vdiv_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VDIV_F64, dd, dn, dm);
+ }
+
+ void vsub_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSUB_F64, dd, dn, dm);
+ }
+
+ void vmul_f64(int dd, int dn, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VMUL_F64, dd, dn, dm);
+ }
+
+ void vcmp_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCMP_F64, dd, 0, dm);
+ }
+
+ void vsqrt_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VSQRT_F64, dd, 0, dm);
+ }
+
+ void vabs_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VABS_F64, dd, 0, dm);
+ }
+
+ void vneg_f64(int dd, int dm, Condition cc = AL)
+ {
+ emitDoublePrecisionInstruction(toARMWord(cc) | VNEG_F64, dd, 0, dm);
+ }
+
+ void ldrImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm, true);
+ }
+
+ void ldrUniqueImmediate(int rd, ARMWord imm, Condition cc = AL)
+ {
+ m_buffer.putIntWithConstantInt(toARMWord(cc) | LoadUint32 | DataTransferUp | RN(ARMRegisters::pc) | RD(rd), imm);
+ }
+
+ void dtrUp(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void dtrUpRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void dtrDown(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void dtrDownRegister(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | Op2IsRegisterArgument, rd, rb, rm);
+ }
+
+ void halfDtrUp(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rb, op2);
+ }
+
+ void halfDtrUpRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType | DataTransferUp, rd, rn, rm);
+ }
+
+ void halfDtrDown(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rb, op2);
+ }
+
+ void halfDtrDownRegister(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | transferType, rd, rn, rm);
+ }
+
+ void doubleDtrUp(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | DataTransferUp | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void doubleDtrDown(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL)
+ {
+ ASSERT(op2 <= 0xff && rd <= 15);
+ /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */
+ m_buffer.putInt(toARMWord(cc) | type | (rd << 12) | RN(rb) | op2);
+ }
+
+ void push(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | StoreUint32 | DataTransferWriteBack | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ void pop(int reg, Condition cc = AL)
+ {
+ ASSERT(ARMWord(reg) <= 0xf);
+ m_buffer.putInt(toARMWord(cc) | (LoadUint32 ^ DataTransferPostUpdate) | DataTransferUp | RN(ARMRegisters::sp) | RD(reg) | 0x4);
+ }
+
+ inline void poke(int reg, Condition cc = AL)
+ {
+ dtrDown(StoreUint32, ARMRegisters::sp, 0, reg, cc);
+ }
+
+ inline void peek(int reg, Condition cc = AL)
+ {
+ dtrUp(LoadUint32, reg, ARMRegisters::sp, 0, cc);
+ }
+
+ void vmov_vfp64(int sm, int rt, int rt2, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_arm64(int rt, int rt2, int sm, Condition cc = AL)
+ {
+ ASSERT(rt != rt2);
+ m_buffer.putInt(toARMWord(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4)));
+ }
+
+ void vmov_vfp32(int sn, int rt, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_VFP32, rt << 1, sn, 0);
+ }
+
+ void vmov_arm32(int rt, int sn, Condition cc = AL)
+ {
+ ASSERT(rt <= 15);
+ emitSinglePrecisionInstruction(toARMWord(cc) | VMOV_ARM32, rt << 1, sn, 0);
+ }
+
+ void vcvt_f64_s32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(!(sm & 0x1)); // sm must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_S32, dd, 0, (sm >> 1));
+ }
+
+ void vcvt_s32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_S32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_u32_f64(int sd, int dm, Condition cc = AL)
+ {
+ ASSERT(!(sd & 0x1)); // sd must be divisible by 2
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_U32_F64, (sd >> 1), 0, dm);
+ }
+
+ void vcvt_f64_f32(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F64_F32, dd, 0, sm);
+ }
+
+ void vcvt_f32_f64(int dd, int sm, Condition cc = AL)
+ {
+ ASSERT(dd <= 15 && sm <= 15);
+ emitDoublePrecisionInstruction(toARMWord(cc) | VCVT_F32_F64, dd, 0, sm);
+ }
+
+ void vmrs_apsr(Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | VMRS_APSR);
+ }
+
+ void clz(int rd, int rm, Condition cc = AL)
+ {
+ m_buffer.putInt(toARMWord(cc) | CLZ | RD(rd) | RM(rm));
+ }
+
+ void bkpt(ARMWord value)
+ {
+ m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf));
+ }
+
+ void nop()
+ {
+ m_buffer.putInt(NOP);
+ }
+
+ void bx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm));
+ }
+
+ AssemblerLabel blx(int rm, Condition cc = AL)
+ {
+ emitInstruction(toARMWord(cc) | BLX, 0, 0, RM(rm));
+ return m_buffer.label();
+ }
+
+ static ARMWord lsl(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x00;
+ }
+
+ static ARMWord lsr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x20;
+ }
+
+ static ARMWord asr(int reg, ARMWord value)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(value <= 0x1f);
+ return reg | (value << 7) | 0x40;
+ }
+
+ static ARMWord lslRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x10;
+ }
+
+ static ARMWord lsrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x30;
+ }
+
+ static ARMWord asrRegister(int reg, int shiftReg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ ASSERT(shiftReg <= ARMRegisters::pc);
+ return reg | (shiftReg << 8) | 0x50;
+ }
+
+ // General helpers
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ m_buffer.ensureSpace(insnSpace, constSpace);
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_buffer.sizeOfConstantPool();
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ m_buffer.ensureSpaceForAnyInstruction();
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord));
+ AssemblerLabel result = m_buffer.label();
+ if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize()))
+ result = label();
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = labelIgnoringWatchpoints();
+ while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) {
+ nop();
+ // The available number of instructions are ensured by labelForWatchpoint.
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ mov(ARMRegisters::r0, ARMRegisters::r0);
+
+ return label();
+ }
+
+ AssemblerLabel loadBranchTarget(int rd, Condition cc = AL, int useConstantPool = 0)
+ {
+ ensureSpace(sizeof(ARMWord), sizeof(ARMWord));
+ m_jumps.append(m_buffer.codeSize() | (useConstantPool & 0x1));
+ ldrUniqueImmediate(rd, InvalidBranchTarget, cc);
+ return m_buffer.label();
+ }
+
+ AssemblerLabel jmp(Condition cc = AL, int useConstantPool = 0)
+ {
+ return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID, JITCompilationEffort);
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // DFG assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+ vmov_arm64(rd1, rd2, rn);
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ vmov_vfp64(rd, rn1, rn2);
+ }
+
+ // Patching helpers
+
+ static ARMWord* getLdrImmAddress(ARMWord* insn)
+ {
+ // Check for call
+ if ((*insn & LdrPcImmediateInstructionMask) != LdrPcImmediateInstruction) {
+ // Must be BLX
+ ASSERT((*insn & BlxInstructionMask) == BlxInstruction);
+ insn--;
+ }
+
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetchOffset * sizeof(ARMWord);
+ if (*insn & DataTransferUp)
+ return reinterpret_cast<ARMWord*>(addr + (*insn & DataTransferOffsetMask));
+ return reinterpret_cast<ARMWord*>(addr - (*insn & DataTransferOffsetMask));
+ }
+
+ static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
+ {
+ // Must be an ldr ..., [pc +/- imm]
+ ASSERT((*insn & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+
+ if (*insn & 0x1)
+ return reinterpret_cast<ARMWord*>(constPool + ((*insn & DataTransferOffsetMask) >> 1));
+ return getLdrImmAddress(insn);
+ }
+
+ static void patchPointerInternal(intptr_t from, void* to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(from);
+ ARMWord* addr = getLdrImmAddress(insn);
+ *addr = reinterpret_cast<ARMWord>(to);
+ }
+
+ static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
+ {
+ value = (value << 1) + 1;
+ ASSERT(!(value & ~DataTransferOffsetMask));
+ return (load & ~DataTransferOffsetMask) | value;
+ }
+
+ static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+
+ // Read pointers
+ static void* readPointer(void* from)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(from);
+ ARMWord* address = getLdrImmAddress(instruction);
+ return *reinterpret_cast<void**>(address);
+ }
+
+ // Patch pointers
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(code) + from.m_offset, to);
+ }
+
+ static void repatchInt32(void* where, int32_t to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(where), reinterpret_cast<void*>(to));
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(where);
+ ASSERT((*instruction & 0x0f700000) == LoadUint32);
+ if (value >= 0)
+ *instruction = (*instruction & 0xff7ff000) | DataTransferUp | value;
+ else
+ *instruction = (*instruction & 0xff7ff000) | -value;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ patchPointerInternal(reinterpret_cast<intptr_t>(from), to);
+ }
+
+ // Linkers
+ static intptr_t getAbsoluteJumpAddress(void* base, int offset = 0)
+ {
+ return reinterpret_cast<intptr_t>(base) + offset - sizeof(ARMWord);
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ARMWord* insn = reinterpret_cast<ARMWord*>(getAbsoluteJumpAddress(m_buffer.data(), from.m_offset));
+ ARMWord* addr = getLdrImmAddressOnPool(insn, m_buffer.poolAddress());
+ *addr = toARMWord(to.m_offset);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ patchPointerInternal(getAbsoluteJumpAddress(from), to);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from))));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetchOffset * sizeof(ARMWord));
+
+ if (!(difference & 1)) {
+ difference >>= 2;
+ if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) {
+ // Direct branch.
+ instruction[0] = B | AL | (difference & BranchOffsetMask);
+ cacheFlush(instruction, sizeof(ARMWord));
+ return;
+ }
+ }
+
+ // Load target.
+ instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4;
+ instruction[1] = reinterpret_cast<ARMWord>(to);
+ cacheFlush(instruction, sizeof(ARMWord) * 2);
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(ARMWord) * 2;
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | LdrImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+ cacheFlush(instruction, sizeof(ARMWord));
+
+ ASSERT((*instruction & LdrOrAddInstructionMask) == AddImmediateInstruction || (*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction);
+ if ((*instruction & LdrOrAddInstructionMask) == LdrImmediateInstruction) {
+ *instruction = (*instruction & ~LdrOrAddInstructionMask) | AddImmediateInstruction;
+ cacheFlush(instruction, sizeof(ARMWord));
+ }
+ }
+
+ static void revertBranchPtrWithPatch(void* instructionStart, RegisterID rn, ARMWord imm)
+ {
+ ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart);
+
+ ASSERT((instruction[2] & LdrPcImmediateInstructionMask) == LdrPcImmediateInstruction);
+ instruction[0] = toARMWord(AL) | ((instruction[2] & 0x0fff0fff) + sizeof(ARMWord)) | RD(ARMRegisters::S1);
+ *getLdrImmAddress(instruction) = imm;
+ instruction[1] = toARMWord(AL) | CMP | SetConditionalCodes | RN(rn) | RM(ARMRegisters::S1);
+ cacheFlush(instruction, 2 * sizeof(ARMWord));
+ }
+
+ // Address operations
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ // Address differences
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ return call.m_offset;
+ }
+
+ // Handle immediates
+
+ static ARMWord getOp2(ARMWord imm);
+
+ // Fast case if imm is known to be between 0 and 0xff
+ static ARMWord getOp2Byte(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return Op2Immediate | imm;
+ }
+
+ static ARMWord getOp2Half(ARMWord imm)
+ {
+ ASSERT(imm <= 0xff);
+ return ImmediateForHalfWordTransfer | (imm & 0x0f) | ((imm & 0xf0) << 4);
+ }
+
+#if WTF_ARM_ARCH_AT_LEAST(7)
+ static ARMWord getImm16Op2(ARMWord imm)
+ {
+ if (imm <= 0xffff)
+ return (imm & 0xf000) << 4 | (imm & 0xfff);
+ return InvalidImmediate;
+ }
+#endif
+ ARMWord getImm(ARMWord imm, int tmpReg, bool invert = false);
+ void moveImm(ARMWord imm, int dest);
+ ARMWord encodeComplexImm(ARMWord imm, int dest);
+
+ // Memory load/store helpers
+
+ void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+ void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset);
+ void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset);
+
+ // Constant pool hnadlers
+
+ static ARMWord placeConstantPoolBarrier(int offset)
+ {
+ offset = (offset - sizeof(ARMWord)) >> 2;
+ ASSERT((offset <= MaximumBranchOffsetDistance && offset >= MinimumBranchOffsetDistance));
+ return AL | B | (offset & BranchOffsetMask);
+ }
+
+#if OS(LINUX) && COMPILER(GCC)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "mov r7, #0xf0000\n"
+ "add r7, r7, #0x2\n"
+ "mov r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+#if OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t);
+#else
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(LINUX) && COMPILER(GCC)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX) && ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#elif OS(QNX)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+#endif
+
+ private:
+ static ARMWord RM(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg;
+ }
+
+ static ARMWord RS(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 8;
+ }
+
+ static ARMWord RD(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 12;
+ }
+
+ static ARMWord RN(int reg)
+ {
+ ASSERT(reg <= ARMRegisters::pc);
+ return reg << 16;
+ }
+
+ static ARMWord getConditionalField(ARMWord i)
+ {
+ return i & ConditionalFieldMask;
+ }
+
+ static ARMWord toARMWord(Condition cc)
+ {
+ return static_cast<ARMWord>(cc);
+ }
+
+ static ARMWord toARMWord(uint32_t u)
+ {
+ return static_cast<ARMWord>(u);
+ }
+
+ int genInt(int reg, ARMWord imm, bool positive);
+
+ ARMBuffer m_buffer;
+ Jumps m_jumps;
+ uint32_t m_indexOfTailOfLastWatchpoint;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.cpp b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
new file mode 100644
index 0000000000..faca66421b
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+}
+
+#endif
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h
new file mode 100644
index 0000000000..7dcf656921
--- /dev/null
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h
@@ -0,0 +1,2790 @@
+/*
+ * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 University of Szeged
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ARMAssembler_h
+#define ARMAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "AssemblerBuffer.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <stdint.h>
+
+namespace JSC {
+
+namespace ARMRegisters {
+ typedef enum {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7, wr = r7, // thumb work register
+ r8,
+ r9, sb = r9, // static base
+ r10, sl = r10, // stack limit
+ r11, fp = r11, // frame pointer
+ r12, ip = r12,
+ r13, sp = r13,
+ r14, lr = r14,
+ r15, pc = r15,
+ } RegisterID;
+
+ typedef enum {
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
+ } FPSingleRegisterID;
+
+ typedef enum {
+ d0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
+ } FPDoubleRegisterID;
+
+ typedef enum {
+ q0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
+ q16,
+ q17,
+ q18,
+ q19,
+ q20,
+ q21,
+ q22,
+ q23,
+ q24,
+ q25,
+ q26,
+ q27,
+ q28,
+ q29,
+ q30,
+ q31,
+ } FPQuadRegisterID;
+
+ inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
+ {
+ ASSERT(reg < d16);
+ return (FPSingleRegisterID)(reg << 1);
+ }
+
+ inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
+ {
+ ASSERT(!(reg & 1));
+ return (FPDoubleRegisterID)(reg >> 1);
+ }
+}
+
+class ARMv7Assembler;
+class ARMThumbImmediate {
+ friend class ARMv7Assembler;
+
+ typedef uint8_t ThumbImmediateType;
+ static const ThumbImmediateType TypeInvalid = 0;
+ static const ThumbImmediateType TypeEncoded = 1;
+ static const ThumbImmediateType TypeUInt16 = 2;
+
+ typedef union {
+ int16_t asInt;
+ struct {
+ unsigned imm8 : 8;
+ unsigned imm3 : 3;
+ unsigned i : 1;
+ unsigned imm4 : 4;
+ };
+ // If this is an encoded immediate, then it may describe a shift, or a pattern.
+ struct {
+ unsigned shiftValue7 : 7;
+ unsigned shiftAmount : 5;
+ };
+ struct {
+ unsigned immediate : 8;
+ unsigned pattern : 4;
+ };
+ } ThumbImmediateValue;
+
+ // byte0 contains least significant bit; not using an array to make client code endian agnostic.
+ typedef union {
+ int32_t asInt;
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ };
+ } PatternBytes;
+
+ ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
+ {
+ if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
+ value >>= N; /* if any were set, lose the bottom N */
+ else /* if none of the top N bits are set, */
+ zeros += N; /* then we have identified N leading zeros */
+ }
+
+ static int32_t countLeadingZeros(uint32_t value)
+ {
+ if (!value)
+ return 32;
+
+ int32_t zeros = 0;
+ countLeadingZerosPartial(value, zeros, 16);
+ countLeadingZerosPartial(value, zeros, 8);
+ countLeadingZerosPartial(value, zeros, 4);
+ countLeadingZerosPartial(value, zeros, 2);
+ countLeadingZerosPartial(value, zeros, 1);
+ return zeros;
+ }
+
+ ARMThumbImmediate()
+ : m_type(TypeInvalid)
+ {
+ m_value.asInt = 0;
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
+ : m_type(type)
+ , m_value(value)
+ {
+ }
+
+ ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
+ : m_type(TypeUInt16)
+ {
+ // Make sure this constructor is only reached with type TypeUInt16;
+ // this extra parameter makes the code a little clearer by making it
+ // explicit at call sites which type is being constructed
+ ASSERT_UNUSED(type, type == TypeUInt16);
+
+ m_value.asInt = value;
+ }
+
+public:
+ static ARMThumbImmediate makeEncodedImm(uint32_t value)
+ {
+ ThumbImmediateValue encoding;
+ encoding.asInt = 0;
+
+ // okay, these are easy.
+ if (value < 256) {
+ encoding.immediate = value;
+ encoding.pattern = 0;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ int32_t leadingZeros = countLeadingZeros(value);
+ // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
+ ASSERT(leadingZeros < 24);
+
+ // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
+ // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
+ // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
+ int32_t rightShiftAmount = 24 - leadingZeros;
+ if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
+ // Shift the value down to the low byte position. The assign to
+ // shiftValue7 drops the implicit top bit.
+ encoding.shiftValue7 = value >> rightShiftAmount;
+ // The endoded shift amount is the magnitude of a right rotate.
+ encoding.shiftAmount = 8 + leadingZeros;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ PatternBytes bytes;
+ bytes.asInt = value;
+
+ if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 3;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
+ encoding.immediate = bytes.byte0;
+ encoding.pattern = 1;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
+ encoding.immediate = bytes.byte1;
+ encoding.pattern = 2;
+ return ARMThumbImmediate(TypeEncoded, encoding);
+ }
+
+ return ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12(int32_t value)
+ {
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : ARMThumbImmediate();
+ }
+
+ static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
+ {
+ // If this is not a 12-bit unsigned it, try making an encoded immediate.
+ return (!(value & 0xfffff000))
+ ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
+ : makeEncodedImm(value);
+ }
+
+ // The 'make' methods, above, return a !isValid() value if the argument
+ // cannot be represented as the requested type. This methods is called
+ // 'get' since the argument can always be represented.
+ static ARMThumbImmediate makeUInt16(uint16_t value)
+ {
+ return ARMThumbImmediate(TypeUInt16, value);
+ }
+
+ bool isValid()
+ {
+ return m_type != TypeInvalid;
+ }
+
+ uint16_t asUInt16() const { return m_value.asInt; }
+
+ // These methods rely on the format of encoded byte values.
+ bool isUInt3() { return !(m_value.asInt & 0xfff8); }
+ bool isUInt4() { return !(m_value.asInt & 0xfff0); }
+ bool isUInt5() { return !(m_value.asInt & 0xffe0); }
+ bool isUInt6() { return !(m_value.asInt & 0xffc0); }
+ bool isUInt7() { return !(m_value.asInt & 0xff80); }
+ bool isUInt8() { return !(m_value.asInt & 0xff00); }
+ bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
+ bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
+ bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
+ bool isUInt16() { return m_type == TypeUInt16; }
+ uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
+ uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
+ uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
+ uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
+ uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
+ uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
+ uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
+ uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
+ uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
+ uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
+
+ bool isEncodedImm() { return m_type == TypeEncoded; }
+
+private:
+ ThumbImmediateType m_type;
+ ThumbImmediateValue m_value;
+};
+
+typedef enum {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+
+ SRType_RRX = SRType_ROR
+} ARMShiftType;
+
+class ShiftTypeAndAmount {
+ friend class ARMv7Assembler;
+
+public:
+ ShiftTypeAndAmount()
+ {
+ m_u.type = (ARMShiftType)0;
+ m_u.amount = 0;
+ }
+
+ ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
+ {
+ m_u.type = type;
+ m_u.amount = amount & 31;
+ }
+
+ unsigned lo4() { return m_u.lo4; }
+ unsigned hi4() { return m_u.hi4; }
+
+private:
+ union {
+ struct {
+ unsigned lo4 : 4;
+ unsigned hi4 : 4;
+ };
+ struct {
+ unsigned type : 2;
+ unsigned amount : 6;
+ };
+ } m_u;
+};
+
+class ARMv7Assembler {
+public:
+ typedef ARMRegisters::RegisterID RegisterID;
+ typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
+ typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
+ typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
+
+ // (HS, LO, HI, LS) -> (AE, B, A, BE)
+ // (VS, VC) -> (O, NO)
+ typedef enum {
+ ConditionEQ,
+ ConditionNE,
+ ConditionHS, ConditionCS = ConditionHS,
+ ConditionLO, ConditionCC = ConditionLO,
+ ConditionMI,
+ ConditionPL,
+ ConditionVS,
+ ConditionVC,
+ ConditionHI,
+ ConditionLS,
+ ConditionGE,
+ ConditionLT,
+ ConditionGT,
+ ConditionLE,
+ ConditionAL,
+ ConditionInvalid
+ } Condition;
+
+#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
+#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
+ enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
+ JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
+ JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
+ JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
+ JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
+ };
+ enum JumpLinkType {
+ LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
+ LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
+ LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
+ LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
+ LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
+ LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
+ LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
+ LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
+ };
+
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ {
+ data.realTypes.m_from = from;
+ data.realTypes.m_to = to;
+ data.realTypes.m_type = type;
+ data.realTypes.m_linkType = LinkInvalid;
+ data.realTypes.m_condition = condition;
+ }
+ void operator=(const LinkRecord& other)
+ {
+ data.copyTypes.content[0] = other.data.copyTypes.content[0];
+ data.copyTypes.content[1] = other.data.copyTypes.content[1];
+ data.copyTypes.content[2] = other.data.copyTypes.content[2];
+ }
+ intptr_t from() const { return data.realTypes.m_from; }
+ void setFrom(intptr_t from) { data.realTypes.m_from = from; }
+ intptr_t to() const { return data.realTypes.m_to; }
+ JumpType type() const { return data.realTypes.m_type; }
+ JumpLinkType linkType() const { return data.realTypes.m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
+ Condition condition() const { return data.realTypes.m_condition; }
+ private:
+ union {
+ struct RealTypes {
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 8;
+ JumpLinkType m_linkType : 8;
+ Condition m_condition : 16;
+ } realTypes;
+ struct CopyTypes {
+ uint32_t content[3];
+ } copyTypes;
+ COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
+ } data;
+ };
+
+ ARMv7Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+private:
+
+ // ARMv7, Appx-A.6.3
+ static bool BadReg(RegisterID reg)
+ {
+ return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
+ }
+
+ uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
+ {
+ uint32_t rdMask = (rdNum >> 1) << highBitsShift;
+ if (rdNum & 1)
+ rdMask |= 1 << lowBitShift;
+ return rdMask;
+ }
+
+ uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
+ {
+ uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
+ if (rdNum & 16)
+ rdMask |= 1 << highBitShift;
+ return rdMask;
+ }
+
+ typedef enum {
+ OP_ADD_reg_T1 = 0x1800,
+ OP_SUB_reg_T1 = 0x1A00,
+ OP_ADD_imm_T1 = 0x1C00,
+ OP_SUB_imm_T1 = 0x1E00,
+ OP_MOV_imm_T1 = 0x2000,
+ OP_CMP_imm_T1 = 0x2800,
+ OP_ADD_imm_T2 = 0x3000,
+ OP_SUB_imm_T2 = 0x3800,
+ OP_AND_reg_T1 = 0x4000,
+ OP_EOR_reg_T1 = 0x4040,
+ OP_TST_reg_T1 = 0x4200,
+ OP_RSB_imm_T1 = 0x4240,
+ OP_CMP_reg_T1 = 0x4280,
+ OP_ORR_reg_T1 = 0x4300,
+ OP_MVN_reg_T1 = 0x43C0,
+ OP_ADD_reg_T2 = 0x4400,
+ OP_MOV_reg_T1 = 0x4600,
+ OP_BLX = 0x4700,
+ OP_BX = 0x4700,
+ OP_STR_reg_T1 = 0x5000,
+ OP_STRH_reg_T1 = 0x5200,
+ OP_STRB_reg_T1 = 0x5400,
+ OP_LDRSB_reg_T1 = 0x5600,
+ OP_LDR_reg_T1 = 0x5800,
+ OP_LDRH_reg_T1 = 0x5A00,
+ OP_LDRB_reg_T1 = 0x5C00,
+ OP_LDRSH_reg_T1 = 0x5E00,
+ OP_STR_imm_T1 = 0x6000,
+ OP_LDR_imm_T1 = 0x6800,
+ OP_STRB_imm_T1 = 0x7000,
+ OP_LDRB_imm_T1 = 0x7800,
+ OP_STRH_imm_T1 = 0x8000,
+ OP_LDRH_imm_T1 = 0x8800,
+ OP_STR_imm_T2 = 0x9000,
+ OP_LDR_imm_T2 = 0x9800,
+ OP_ADD_SP_imm_T1 = 0xA800,
+ OP_ADD_SP_imm_T2 = 0xB000,
+ OP_SUB_SP_imm_T1 = 0xB080,
+ OP_BKPT = 0xBE00,
+ OP_IT = 0xBF00,
+ OP_NOP_T1 = 0xBF00,
+ } OpcodeID;
+
+ typedef enum {
+ OP_B_T1 = 0xD000,
+ OP_B_T2 = 0xE000,
+ OP_AND_reg_T2 = 0xEA00,
+ OP_TST_reg_T2 = 0xEA10,
+ OP_ORR_reg_T2 = 0xEA40,
+ OP_ORR_S_reg_T2 = 0xEA50,
+ OP_ASR_imm_T1 = 0xEA4F,
+ OP_LSL_imm_T1 = 0xEA4F,
+ OP_LSR_imm_T1 = 0xEA4F,
+ OP_ROR_imm_T1 = 0xEA4F,
+ OP_MVN_reg_T2 = 0xEA6F,
+ OP_EOR_reg_T2 = 0xEA80,
+ OP_ADD_reg_T3 = 0xEB00,
+ OP_ADD_S_reg_T3 = 0xEB10,
+ OP_SUB_reg_T2 = 0xEBA0,
+ OP_SUB_S_reg_T2 = 0xEBB0,
+ OP_CMP_reg_T2 = 0xEBB0,
+ OP_VMOV_CtoD = 0xEC00,
+ OP_VMOV_DtoC = 0xEC10,
+ OP_FSTS = 0xED00,
+ OP_VSTR = 0xED00,
+ OP_FLDS = 0xED10,
+ OP_VLDR = 0xED10,
+ OP_VMOV_CtoS = 0xEE00,
+ OP_VMOV_StoC = 0xEE10,
+ OP_VMUL_T2 = 0xEE20,
+ OP_VADD_T2 = 0xEE30,
+ OP_VSUB_T2 = 0xEE30,
+ OP_VDIV = 0xEE80,
+ OP_VABS_T2 = 0xEEB0,
+ OP_VCMP = 0xEEB0,
+ OP_VCVT_FPIVFP = 0xEEB0,
+ OP_VMOV_T2 = 0xEEB0,
+ OP_VMOV_IMM_T2 = 0xEEB0,
+ OP_VMRS = 0xEEB0,
+ OP_VNEG_T2 = 0xEEB0,
+ OP_VSQRT_T1 = 0xEEB0,
+ OP_VCVTSD_T1 = 0xEEB0,
+ OP_VCVTDS_T1 = 0xEEB0,
+ OP_B_T3a = 0xF000,
+ OP_B_T4a = 0xF000,
+ OP_AND_imm_T1 = 0xF000,
+ OP_TST_imm = 0xF010,
+ OP_ORR_imm_T1 = 0xF040,
+ OP_MOV_imm_T2 = 0xF040,
+ OP_MVN_imm = 0xF060,
+ OP_EOR_imm_T1 = 0xF080,
+ OP_ADD_imm_T3 = 0xF100,
+ OP_ADD_S_imm_T3 = 0xF110,
+ OP_CMN_imm = 0xF110,
+ OP_ADC_imm = 0xF140,
+ OP_SUB_imm_T3 = 0xF1A0,
+ OP_SUB_S_imm_T3 = 0xF1B0,
+ OP_CMP_imm_T2 = 0xF1B0,
+ OP_RSB_imm_T2 = 0xF1C0,
+ OP_RSB_S_imm_T2 = 0xF1D0,
+ OP_ADD_imm_T4 = 0xF200,
+ OP_MOV_imm_T3 = 0xF240,
+ OP_SUB_imm_T4 = 0xF2A0,
+ OP_MOVT = 0xF2C0,
+ OP_UBFX_T1 = 0xF3C0,
+ OP_NOP_T2a = 0xF3AF,
+ OP_STRB_imm_T3 = 0xF800,
+ OP_STRB_reg_T2 = 0xF800,
+ OP_LDRB_imm_T3 = 0xF810,
+ OP_LDRB_reg_T2 = 0xF810,
+ OP_STRH_imm_T3 = 0xF820,
+ OP_STRH_reg_T2 = 0xF820,
+ OP_LDRH_reg_T2 = 0xF830,
+ OP_LDRH_imm_T3 = 0xF830,
+ OP_STR_imm_T4 = 0xF840,
+ OP_STR_reg_T2 = 0xF840,
+ OP_LDR_imm_T4 = 0xF850,
+ OP_LDR_reg_T2 = 0xF850,
+ OP_STRB_imm_T2 = 0xF880,
+ OP_LDRB_imm_T2 = 0xF890,
+ OP_STRH_imm_T2 = 0xF8A0,
+ OP_LDRH_imm_T2 = 0xF8B0,
+ OP_STR_imm_T3 = 0xF8C0,
+ OP_LDR_imm_T3 = 0xF8D0,
+ OP_LDRSB_reg_T2 = 0xF910,
+ OP_LDRSH_reg_T2 = 0xF930,
+ OP_LSL_reg_T2 = 0xFA00,
+ OP_LSR_reg_T2 = 0xFA20,
+ OP_ASR_reg_T2 = 0xFA40,
+ OP_ROR_reg_T2 = 0xFA60,
+ OP_CLZ = 0xFAB0,
+ OP_SMULL_T1 = 0xFB80,
+#if CPU(APPLE_ARMV7S)
+ OP_SDIV_T1 = 0xFB90,
+ OP_UDIV_T1 = 0xFBB0,
+#endif
+ } OpcodeID1;
+
+ typedef enum {
+ OP_VADD_T2b = 0x0A00,
+ OP_VDIVb = 0x0A00,
+ OP_FLDSb = 0x0A00,
+ OP_VLDRb = 0x0A00,
+ OP_VMOV_IMM_T2b = 0x0A00,
+ OP_VMOV_T2b = 0x0A40,
+ OP_VMUL_T2b = 0x0A00,
+ OP_FSTSb = 0x0A00,
+ OP_VSTRb = 0x0A00,
+ OP_VMOV_StoCb = 0x0A10,
+ OP_VMOV_CtoSb = 0x0A10,
+ OP_VMOV_DtoCb = 0x0A10,
+ OP_VMOV_CtoDb = 0x0A10,
+ OP_VMRSb = 0x0A10,
+ OP_VABS_T2b = 0x0A40,
+ OP_VCMPb = 0x0A40,
+ OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
+ OP_VSUB_T2b = 0x0A40,
+ OP_VSQRT_T1b = 0x0A40,
+ OP_VCVTSD_T1b = 0x0A40,
+ OP_VCVTDS_T1b = 0x0A40,
+ OP_NOP_T2b = 0x8000,
+ OP_B_T3b = 0x8000,
+ OP_B_T4b = 0x9000,
+ } OpcodeID2;
+
+ struct FourFours {
+ FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
+ {
+ m_u.f0 = f0;
+ m_u.f1 = f1;
+ m_u.f2 = f2;
+ m_u.f3 = f3;
+ }
+
+ union {
+ unsigned value;
+ struct {
+ unsigned f0 : 4;
+ unsigned f1 : 4;
+ unsigned f2 : 4;
+ unsigned f3 : 4;
+ };
+ } m_u;
+ };
+
+ class ARMInstructionFormatter;
+
+ // false means else!
+ bool ifThenElseConditionBit(Condition condition, bool isIf)
+ {
+ return isIf ? (condition & 1) : !(condition & 1);
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | (ifThenElseConditionBit(condition, inst4if) << 1)
+ | 1;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | (ifThenElseConditionBit(condition, inst3if) << 2)
+ | 2;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+ uint8_t ifThenElse(Condition condition, bool inst2if)
+ {
+ int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
+ | 4;
+ ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
+ return (condition << 4) | mask;
+ }
+
+ uint8_t ifThenElse(Condition condition)
+ {
+ int mask = 8;
+ return (condition << 4) | mask;
+ }
+
+public:
+
+ void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
+ }
+
+ void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if (rn == ARMRegisters::sp) {
+ ASSERT(!(imm.getUInt16() & 3));
+ if (!(rd & 8) && imm.isUInt10()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ return;
+ } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
+ m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ }
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (rd == rn)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
+ else if (rd == rm)
+ m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
+ else if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
+ else
+ add_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
+ else
+ ARM_and(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel b()
+ {
+ m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
+ {
+ ASSERT(rm != ARMRegisters::pc);
+ m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
+ return m_formatter.label();
+ }
+
+ // Only allowed in IT (if then) block if last instruction.
+ ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return m_formatter.label();
+ }
+
+ void bkpt(uint8_t imm = 0)
+ {
+ m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
+ }
+
+ ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
+ }
+
+ ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isEncodedImm());
+
+ if (!(rn & 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ cmp(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // xor is not spelled with an 'e'. :-(
+ void eor(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
+ else
+ eor(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void it(Condition cond)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
+ }
+
+ ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
+ {
+ m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
+ }
+
+ ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt7());
+ ASSERT(!((rt | rn) & 8));
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed load;
+ // if index is not set then is is a post-index load.
+ //
+ // If wback is set rn is updated - this is a pre or post index load,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(!BadReg(rt)); // Memory hint
+ ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt5())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
+ }
+
+ ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc); // LDR (literal)
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+ ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
+ }
+
+#if OS(LINUX) || OS(QNX)
+ static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
+ {
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
+ address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
+ address[4] = OP_CMP_reg_T2 | left;
+ cacheFlush(address, sizeof(uint16_t) * 5);
+ }
+#else
+ static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ uint16_t* address = static_cast<uint16_t*>(instructionStart);
+ address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
+ address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+ cacheFlush(address, sizeof(uint16_t) * 2);
+ }
+#endif
+
+ ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isValid());
+ ASSERT(!BadReg(rd));
+
+ if ((rd < 8) && imm.isUInt8())
+ m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
+ else if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
+ else
+ movT3(rd, imm);
+ }
+
+ ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
+ }
+
+ ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isUInt16());
+ ASSERT(!BadReg(rd));
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
+ {
+ ASSERT(imm.isEncodedImm());
+ ASSERT(!BadReg(rd));
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
+ {
+ if (!((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
+ else
+ mvn(rd, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ sub(rd, zero, rm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if ((rd == rn) && !((rd | rm) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
+ else if ((rd == rm) && !((rd | rn) & 8))
+ m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
+ else
+ orr_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rm));
+ ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
+ m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rdLo));
+ ASSERT(!BadReg(rdHi));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ ASSERT(rdLo != rdHi);
+ m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
+ m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT((offset & ~0xff) == 0);
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isUInt12());
+
+ if (!((rt | rn) & 8) && imm.isUInt7())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
+ }
+
+ // If index is set, this is a regular offset or a pre-indexed store;
+ // if index is not set then is is a post-index store.
+ //
+ // If wback is set rn is updated - this is a pre or post index store,
+ // if wback is not set this is a regular offset memory access.
+ //
+ // (-255 <= offset <= 255)
+ // _reg = REG[rn]
+ // _tmp = _reg + offset
+ // MEM[index ? _tmp : _reg] = REG[rt]
+ // if (wback) REG[rn] = _tmp
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
+ {
+ ASSERT(rt != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(index || wback);
+ ASSERT(!wback | (rt != rn));
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+ ASSERT(!(offset & ~0xff));
+
+ offset |= (wback << 8);
+ offset |= (add << 9);
+ offset |= (index << 10);
+ offset |= (1 << 11);
+
+ m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
+ }
+
+ // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
+ ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
+ {
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ ASSERT(shift <= 3);
+
+ if (!shift && !((rt | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
+ else
+ m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ if (imm.isEncodedImm())
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
+ else {
+ ASSERT(imm.isUInt12());
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
+ }
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ if (!((rd | rn) & 8) && !imm.getUInt12())
+ m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
+ else
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // NOTE: In an IT block, add doesn't modify the flags register.
+ ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ // Not allowed in an IT (if then) block.
+ void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
+ {
+ // Rd can only be SP if Rn is also SP.
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+
+ if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
+ ASSERT(!(imm.getUInt16() & 3));
+ m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
+ return;
+ } else if (!((rd | rn) & 8)) {
+ if (imm.isUInt3()) {
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
+ return;
+ } else if ((rd == rn) && imm.isUInt8()) {
+ m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
+ return;
+ }
+ }
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
+ }
+
+ ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
+ {
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt12());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
+ }
+
+ // Not allowed in an IT (if then) block?
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
+ ASSERT(rd != ARMRegisters::pc);
+ ASSERT(rn != ARMRegisters::pc);
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
+ }
+
+ // Not allowed in an IT (if then) block.
+ ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ if (!((rd | rn | rm) & 8))
+ m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
+ else
+ sub_S(rd, rn, rm, ShiftTypeAndAmount());
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(imm.isEncodedImm());
+
+ m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
+ {
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
+ }
+
+ ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
+ {
+ if ((rn | rm) & 8)
+ tst(rn, rm, ShiftTypeAndAmount());
+ else
+ m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
+ }
+
+ ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
+ {
+ ASSERT(lsb < 32);
+ ASSERT((width >= 1) && (width <= 32));
+ ASSERT((lsb + width) <= 32);
+ m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
+ }
+
+#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
+ {
+ ASSERT(!BadReg(rd));
+ ASSERT(!BadReg(rn));
+ ASSERT(!BadReg(rm));
+ m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
+ }
+#endif
+
+ void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
+ }
+
+ void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
+ }
+
+ void vcmpz(FPDoubleRegisterID rd)
+ {
+ m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
+ }
+
+ void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
+ }
+
+ void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
+ }
+
+ void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ // boolean values are 64bit (toInt, unsigned, roundZero)
+ m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
+ }
+
+ void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
+ }
+
+ void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
+ }
+
+ void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
+ }
+
+ void vmov(RegisterID rd, FPSingleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd));
+ m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
+ }
+
+ void vmov(FPSingleRegisterID rd, RegisterID rn)
+ {
+ ASSERT(!BadReg(rn));
+ m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
+ }
+
+ void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
+ {
+ ASSERT(!BadReg(rd1));
+ ASSERT(!BadReg(rd2));
+ m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
+ }
+
+ void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ ASSERT(!BadReg(rn1));
+ ASSERT(!BadReg(rn2));
+ m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
+ }
+
+ void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
+ {
+ m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
+ }
+
+ void vmrs(RegisterID reg = ARMRegisters::pc)
+ {
+ ASSERT(reg != ARMRegisters::sp);
+ m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
+ }
+
+ void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
+ }
+
+ void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
+ }
+
+ void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
+ {
+ m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
+ }
+
+ void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
+ }
+
+ void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
+ }
+
+ void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
+ }
+
+ void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
+ }
+
+ void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
+ }
+
+ void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
+ {
+ m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
+ }
+
+ void nop()
+ {
+ m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
+ }
+
+ void nopw()
+ {
+ m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
+ }
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
+ nopw();
+ else
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_formatter.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+
+ // Assembler admin methods:
+
+ static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
+ {
+ return a.from() < b.from();
+ }
+
+ bool canCompact(JumpType jumpType)
+ {
+ // The following cannot be compacted:
+ // JumpFixed: represents custom jump sequence
+ // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
+ // JumpConditionFixedSize: represents conditional jump that must remain a fixed size
+ return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
+ }
+
+ JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ {
+ if (jumpType == JumpFixed)
+ return LinkInvalid;
+
+ // for patchable jump we must leave space for the longest code sequence
+ if (jumpType == JumpNoConditionFixedSize)
+ return LinkBX;
+ if (jumpType == JumpConditionFixedSize)
+ return LinkConditionalBX;
+
+ const int paddingSize = JUMP_ENUM_SIZE(jumpType);
+
+ if (jumpType == JumpCondition) {
+ // 2-byte conditional T1
+ const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
+ if (canBeJumpT1(jumpT1Location, to))
+ return LinkJumpT1;
+ // 4-byte conditional T3
+ const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
+ if (canBeJumpT3(jumpT3Location, to))
+ return LinkJumpT3;
+ // 4-byte conditional T4 with IT
+ const uint16_t* conditionalJumpT4Location =
+ reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
+ if (canBeJumpT4(conditionalJumpT4Location, to))
+ return LinkConditionalJumpT4;
+ } else {
+ // 2-byte unconditional T2
+ const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
+ if (canBeJumpT2(jumpT2Location, to))
+ return LinkJumpT2;
+ // 4-byte unconditional T4
+ const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
+ if (canBeJumpT4(jumpT4Location, to))
+ return LinkJumpT4;
+ // use long jump sequence
+ return LinkBX;
+ }
+
+ ASSERT(jumpType == JumpCondition);
+ return LinkConditionalBX;
+ }
+
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ JumpLinkType linkType = computeJumpType(record.type(), from, to);
+ record.setLinkType(linkType);
+ return linkType;
+ }
+
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ switch (record.linkType()) {
+ case LinkJumpT1:
+ linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT2:
+ linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT3:
+ linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkJumpT4:
+ linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalJumpT4:
+ linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkConditionalBX:
+ linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ case LinkBX:
+ linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ void* unlinkedCode() { return m_formatter.data(); }
+ size_t codeSize() const { return m_formatter.codeSize(); }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkJumpAbsolute(location, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
+ ASSERT(from.isSet());
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
+ {
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
+
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+
+ cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
+ ASSERT(reinterpret_cast<intptr_t>(to) & 1);
+
+ setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setInt32(where, value, true);
+ }
+
+ static void repatchCompact(void* where, int32_t offset)
+ {
+ ASSERT(offset >= -255 && offset <= 255);
+
+ bool add = true;
+ if (offset < 0) {
+ add = false;
+ offset = -offset;
+ }
+
+ offset |= (add << 9);
+ offset |= (1 << 10);
+ offset |= (1 << 11);
+
+ uint16_t* location = reinterpret_cast<uint16_t*>(where);
+ location[1] &= ~((1 << 12) - 1);
+ location[1] |= offset;
+ cacheFlush(location, sizeof(uint16_t) * 2);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
+
+ setPointer(where, value, true);
+ }
+
+ static void* readPointer(void* where)
+ {
+ return reinterpret_cast<void*>(readInt32(where));
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
+
+#if OS(LINUX) || OS(QNX)
+ if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+ } else {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
+ linkBX(ptr, to);
+ cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
+ }
+#else
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
+ linkJumpT4(ptr, to);
+ cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+#if OS(LINUX) || OS(QNX)
+ return 10;
+#else
+ return 4;
+#endif
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ break;
+ case OP_ADD_imm_T3:
+ ASSERT(!(ptr[1] & 0xF000));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_LDR_imm_T3;
+ ptr[1] |= (ptr[1] & 0x0F00) << 4;
+ ptr[1] &= 0xF0FF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
+ switch (ptr[0] & 0xFFF0) {
+ case OP_LDR_imm_T3:
+ ASSERT(!(ptr[1] & 0x0F00));
+ ptr[0] &= 0x000F;
+ ptr[0] |= OP_ADD_imm_T3;
+ ptr[1] |= (ptr[1] & 0xF000) >> 4;
+ ptr[1] &= 0x0FFF;
+ cacheFlush(ptr, sizeof(uint16_t) * 2);
+ break;
+ case OP_ADD_imm_T3:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+#if OS(LINUX)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ asm volatile(
+ "push {r7}\n"
+ "mov r0, %0\n"
+ "mov r1, %1\n"
+ "movw r7, #0x2\n"
+ "movt r7, #0xf\n"
+ "movs r2, #0x0\n"
+ "svc 0x0\n"
+ "pop {r7}\n"
+ :
+ : "r" (begin), "r" (end)
+ : "r0", "r1", "r2");
+ }
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if OS(IOS)
+ sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
+#elif OS(WINCE)
+ CacheRangeFlush(code, size, CACHE_SYNC_ALL);
+#elif OS(QNX)
+#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ msync(code, size, MS_INVALIDATE_ICACHE);
+#else
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#endif
+#else
+#error "The cacheFlush support is missing on this platform."
+#endif
+ }
+
+private:
+ // VFP operations commonly take one or more 5-bit operands, typically representing a
+ // floating point register number. This will commonly be encoded in the instruction
+ // in two parts, with one single bit field, and one 4-bit field. In the case of
+ // double precision operands the high bit of the register number will be encoded
+ // separately, and for single precision operands the high bit of the register number
+ // will be encoded individually.
+ // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
+ // field to be encoded together in the instruction (the low 4-bits of a double
+ // register number, or the high 4-bits of a single register number), and bit 4
+ // contains the bit value to be encoded individually.
+ struct VFPOperand {
+ explicit VFPOperand(uint32_t value)
+ : m_value(value)
+ {
+ ASSERT(!(m_value & ~0x1f));
+ }
+
+ VFPOperand(FPDoubleRegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(RegisterID reg)
+ : m_value(reg)
+ {
+ }
+
+ VFPOperand(FPSingleRegisterID reg)
+ : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
+ {
+ }
+
+ uint32_t bits1()
+ {
+ return m_value >> 4;
+ }
+
+ uint32_t bits4()
+ {
+ return m_value & 0xf;
+ }
+
+ uint32_t m_value;
+ };
+
+ VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
+ {
+ // Cannot specify rounding when converting to float.
+ ASSERT(toInteger || !isRoundZero);
+
+ uint32_t op = 0x8;
+ if (toInteger) {
+ // opc2 indicates both toInteger & isUnsigned.
+ op |= isUnsigned ? 0x4 : 0x5;
+ // 'op' field in instruction is isRoundZero
+ if (isRoundZero)
+ op |= 0x10;
+ } else {
+ ASSERT(!isRoundZero);
+ // 'op' field in instruction is isUnsigned
+ if (!isUnsigned)
+ op |= 0x10;
+ }
+ return VFPOperand(op);
+ }
+
+ static void setInt32(void* code, uint32_t value, bool flush)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
+ location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+
+ if (flush)
+ cacheFlush(location - 4, 4 * sizeof(uint16_t));
+ }
+
+ static int32_t readInt32(void* code)
+ {
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
+
+ ARMThumbImmediate lo16;
+ ARMThumbImmediate hi16;
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
+ decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
+ uint32_t result = hi16.asUInt16();
+ result <<= 16;
+ result |= lo16.asUInt16();
+ return static_cast<int32_t>(result);
+ }
+
+ static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
+ {
+ // Requires us to have planted a LDR_imm_T1
+ ASSERT(imm.isValid());
+ ASSERT(imm.isUInt7());
+ uint16_t* location = reinterpret_cast<uint16_t*>(code);
+ location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
+ location[0] |= (imm.getUInt7() >> 2) << 6;
+ cacheFlush(location, sizeof(uint16_t));
+ }
+
+ static void setPointer(void* code, void* value, bool flush)
+ {
+ setInt32(code, reinterpret_cast<uint32_t>(value), flush);
+ }
+
+ static bool isB(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
+ }
+
+ static bool isBX(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] & 0xff87) == OP_BX;
+ }
+
+ static bool isMOV_imm_T3(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isMOVT(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
+ }
+
+ static bool isNOP_T1(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return instruction[0] == OP_NOP_T1;
+ }
+
+ static bool isNOP_T2(void* address)
+ {
+ uint16_t* instruction = static_cast<uint16_t*>(address);
+ return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
+ }
+
+ static bool canBeJumpT1(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 23) >> 23) == relative;
+ }
+
+ static bool canBeJumpT2(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+ return ((relative << 20) >> 20) == relative;
+ }
+
+ static bool canBeJumpT3(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 11) >> 11) == relative;
+ }
+
+ static bool canBeJumpT4(const uint16_t* instruction, const void* target)
+ {
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ return ((relative << 7) >> 7) == relative;
+ }
+
+ void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT1(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T1 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ }
+
+ static void linkJumpT2(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT2(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // It does not appear to be documented in the ARM ARM (big surprise), but
+ // for OP_B_T2 the branch displacement encoded in the instruction is 2
+ // less than the actual displacement.
+ relative -= 2;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ }
+
+ void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT3(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpT4(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+ ASSERT(canBeJumpT4(instruction, target));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ instruction[-3] = ifThenElse(cond) | OP_IT;
+ linkJumpT4(instruction, target);
+ }
+
+ static void linkBX(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+
+ void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ linkBX(instruction, target);
+ instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ if (canBeJumpT4(instruction, target)) {
+ // There may be a better way to fix this, but right now put the NOPs first, since in the
+ // case of an conditional branch this will be coming after an ITTT predicating *three*
+ // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
+ // variable wdith encoding - the previous instruction might *look* like an ITTT but
+ // actually be the second half of a 2-word op.
+ instruction[-5] = OP_NOP_T1;
+ instruction[-4] = OP_NOP_T2a;
+ instruction[-3] = OP_NOP_T2b;
+ linkJumpT4(instruction, target);
+ } else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
+ ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
+ ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
+ instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ }
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
+ {
+ return op | (imm.m_value.i << 10) | imm.m_value.imm4;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.i = (value >> 10) & 1;
+ result.m_value.imm4 = value & 15;
+ }
+
+ static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
+ {
+ return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
+ }
+
+ static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
+ {
+ result.m_value.imm3 = (value >> 12) & 7;
+ result.m_value.imm8 = value & 255;
+ }
+
+ class ARMInstructionFormatter {
+ public:
+ ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
+ {
+ m_buffer.putShort(op | (rd << 8) | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
+ {
+ m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
+ }
+
+ ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
+ }
+
+ ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
+ ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
+ {
+ m_buffer.putShort(op | (reg1 << 3) | reg2);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
+ {
+ m_buffer.putShort(op | reg);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
+ {
+ m_buffer.putShort(op);
+ m_buffer.putShort(ff.m_u.value);
+ }
+
+ ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(op2);
+ }
+
+ ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
+ {
+ ARMThumbImmediate newImm = imm;
+ newImm.m_value.imm4 = imm4;
+
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
+ m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((reg2 << 12) | imm);
+ }
+
+ ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
+ {
+ m_buffer.putShort(op | reg1);
+ m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
+ }
+
+ // Formats up instructions of the pattern:
+ // 111111111B11aaaa:bbbb222SA2C2cccc
+ // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
+ // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
+ ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
+ {
+ ASSERT(!(op1 & 0x004f));
+ ASSERT(!(op2 & 0xf1af));
+ m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
+ m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
+ }
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ // (i.e. +/-(0..255) 32-bit words)
+ ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
+ {
+ bool up = true;
+ if (imm < 0) {
+ imm = -imm;
+ up = false;
+ }
+
+ uint32_t offset = imm;
+ ASSERT(!(offset & ~0x3fc));
+ offset >>= 2;
+
+ m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
+ m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
+ }
+
+ // Administrative methods:
+
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
+ bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
+ void* data() const { return m_buffer.data(); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ private:
+ AssemblerBuffer m_buffer;
+ } m_formatter;
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#endif // ARMAssembler_h
diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
new file mode 100644
index 0000000000..95eaf7d99d
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractMacroAssembler_h
+#define AbstractMacroAssembler_h
+
+#include "AssemblerBuffer.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/UnusedParam.h>
+
+#if ENABLE(ASSEMBLER)
+
+
+#if PLATFORM(QT)
+#define ENABLE_JIT_CONSTANT_BLINDING 0
+#endif
+
+#ifndef ENABLE_JIT_CONSTANT_BLINDING
+#define ENABLE_JIT_CONSTANT_BLINDING 1
+#endif
+
+namespace JSC {
+
+class JumpReplacementWatchpoint;
+class LinkBuffer;
+class RepatchBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
+
+template <class AssemblerType>
+class AbstractMacroAssembler {
+public:
+ friend class JITWriteBarrierBase;
+ typedef AssemblerType AssemblerType_T;
+
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssemblerCodeRef CodeRef;
+
+ class Jump;
+
+ typedef typename AssemblerType::RegisterID RegisterID;
+
+ // Section 1: MacroAssembler operand types
+ //
+ // The following types are used as operands to MacroAssembler operations,
+ // describing immediate and memory operands to the instructions to be planted.
+
+ enum Scale {
+ TimesOne,
+ TimesTwo,
+ TimesFour,
+ TimesEight,
+ };
+
+ // Address:
+ //
+ // Describes a simple base-offset address.
+ struct Address {
+ explicit Address(RegisterID base, int32_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
+ // ImplicitAddress:
+ //
+ // This class is used for explicit 'load' and 'store' operations
+ // (as opposed to situations in which a memory operand is provided
+ // to a generic operation, such as an integer arithmetic instruction).
+ //
+ // In the case of a load (or store) operation we want to permit
+ // addresses to be implicitly constructed, e.g. the two calls:
+ //
+ // load32(Address(addrReg), destReg);
+ // load32(addrReg, destReg);
+ //
+ // Are equivalent, and the explicit wrapping of the Address in the former
+ // is unnecessary.
+ struct ImplicitAddress {
+ ImplicitAddress(RegisterID base)
+ : base(base)
+ , offset(0)
+ {
+ }
+
+ ImplicitAddress(Address address)
+ : base(address.base)
+ , offset(address.offset)
+ {
+ }
+
+ RegisterID base;
+ int32_t offset;
+ };
+
+ // BaseIndex:
+ //
+ // Describes a complex addressing mode.
+ struct BaseIndex {
+ BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
+ : base(base)
+ , index(index)
+ , scale(scale)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ RegisterID index;
+ Scale scale;
+ int32_t offset;
+ };
+
+ // AbsoluteAddress:
+ //
+ // Describes an memory operand given by a pointer. For regular load & store
+ // operations an unwrapped void* will be used, rather than using this.
+ struct AbsoluteAddress {
+ explicit AbsoluteAddress(const void* ptr)
+ : m_ptr(ptr)
+ {
+ }
+
+ const void* m_ptr;
+ };
+
+ // TrustedImmPtr:
+ //
+ // A pointer sized immediate operand to an instruction - this is wrapped
+ // in a class requiring explicit construction in order to differentiate
+ // from pointers used as absolute addresses to memory operations
+ struct TrustedImmPtr {
+ TrustedImmPtr() { }
+
+ explicit TrustedImmPtr(const void* value)
+ : m_value(value)
+ {
+ }
+
+ // This is only here so that TrustedImmPtr(0) does not confuse the C++
+ // overload handling rules.
+ explicit TrustedImmPtr(int value)
+ : m_value(0)
+ {
+ ASSERT_UNUSED(value, !value);
+ }
+
+ explicit TrustedImmPtr(size_t value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ }
+
+ intptr_t asIntptr()
+ {
+ return reinterpret_cast<intptr_t>(m_value);
+ }
+
+ const void* m_value;
+ };
+
+ struct ImmPtr :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImmPtr
+#else
+ public TrustedImmPtr
+#endif
+ {
+ explicit ImmPtr(const void* value)
+ : TrustedImmPtr(value)
+ {
+ }
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
+ };
+
+ // TrustedImm32:
+ //
+ // A 32bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm32 {
+ TrustedImm32() { }
+
+ explicit TrustedImm32(int32_t value)
+ : m_value(value)
+ {
+ }
+
+#if !CPU(X86_64)
+ explicit TrustedImm32(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int32_t m_value;
+ };
+
+
+ struct Imm32 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm32
+#else
+ public TrustedImm32
+#endif
+ {
+ explicit Imm32(int32_t value)
+ : TrustedImm32(value)
+ {
+ }
+#if !CPU(X86_64)
+ explicit Imm32(TrustedImmPtr ptr)
+ : TrustedImm32(ptr)
+ {
+ }
+#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
+ };
+
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+
+#if CPU(X86_64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+
+ struct Imm64 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm64
+#else
+ public TrustedImm64
+#endif
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
+ // Section 2: MacroAssembler code buffer handles
+ //
+ // The following types are used to reference items in the code buffer
+ // during JIT code generation. For example, the type Jump is used to
+ // track the location of a jump instruction so that it may later be
+ // linked to a label marking its destination.
+
+
+ // Label:
+ //
+ // A Label records a point in the generated instruction stream, typically such that
+ // it may be used as a destination for a jump.
+ class Label {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend struct DFG::OSRExit;
+ friend class Jump;
+ friend class JumpReplacementWatchpoint;
+ friend class MacroAssemblerCodeRef;
+ friend class LinkBuffer;
+ friend class Watchpoint;
+
+ public:
+ Label()
+ {
+ }
+
+ Label(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // ConvertibleLoadLabel:
+ //
+ // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+ // so that:
+ //
+ // loadPtr(Address(a, i), b)
+ //
+ // becomes:
+ //
+ // addPtr(TrustedImmPtr(i), a, b)
+ class ConvertibleLoadLabel {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+
+ public:
+ ConvertibleLoadLabel()
+ {
+ }
+
+ ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelPtr:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabelPtr {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelPtr()
+ {
+ }
+
+ DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabel32:
+ //
+ // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
+ // patched after the code has been generated.
+ class DataLabel32 {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabel32()
+ {
+ }
+
+ DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ AssemblerLabel label() const { return m_label; }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelCompact:
+ //
+ // A DataLabelCompact is used to refer to a location in the code containing a
+ // compact immediate to be patched after the code has been generated.
+ class DataLabelCompact {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelCompact()
+ {
+ }
+
+ DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ DataLabelCompact(AssemblerLabel label)
+ : m_label(label)
+ {
+ }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // Call:
+ //
+ // A Call object is a reference to a call instruction that has been planted
+ // into the code buffer - it is typically used to link the call, setting the
+ // relative offset such that when executed it will call to the desired
+ // destination.
+ class Call {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+
+ public:
+ enum Flags {
+ None = 0x0,
+ Linkable = 0x1,
+ Near = 0x2,
+ LinkableNear = 0x3,
+ };
+
+ Call()
+ : m_flags(None)
+ {
+ }
+
+ Call(AssemblerLabel jmp, Flags flags)
+ : m_label(jmp)
+ , m_flags(flags)
+ {
+ }
+
+ bool isFlagSet(Flags flag)
+ {
+ return m_flags & flag;
+ }
+
+ static Call fromTailJump(Jump jump)
+ {
+ return Call(jump.m_label, Linkable);
+ }
+
+ AssemblerLabel m_label;
+ private:
+ Flags m_flags;
+ };
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ class Jump {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ friend struct DFG::OSRExit;
+ friend class LinkBuffer;
+ public:
+ Jump()
+ {
+ }
+
+#if CPU(ARM_THUMB2)
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
+#else
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
+ {
+ }
+#endif
+
+ Label label() const
+ {
+ Label result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+ masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
+ };
+
+ struct PatchableJump {
+ PatchableJump()
+ {
+ }
+
+ explicit PatchableJump(Jump jump)
+ : m_jump(jump)
+ {
+ }
+
+ operator Jump&() { return m_jump; }
+
+ Jump m_jump;
+ };
+
+ // JumpList:
+ //
+ // A JumpList is a set of Jump objects.
+ // All jumps in the set will be linked to the same destination.
+ class JumpList {
+ friend class LinkBuffer;
+
+ public:
+ typedef Vector<Jump, 2> JumpVector;
+
+ JumpList() { }
+
+ JumpList(Jump jump)
+ {
+ append(jump);
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].link(masm);
+ m_jumps.clear();
+ }
+
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ {
+ size_t size = m_jumps.size();
+ for (size_t i = 0; i < size; ++i)
+ m_jumps[i].linkTo(label, masm);
+ m_jumps.clear();
+ }
+
+ void append(Jump jump)
+ {
+ m_jumps.append(jump);
+ }
+
+ void append(const JumpList& other)
+ {
+ m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
+ }
+
+ bool empty()
+ {
+ return !m_jumps.size();
+ }
+
+ void clear()
+ {
+ m_jumps.clear();
+ }
+
+ const JumpVector& jumps() const { return m_jumps; }
+
+ private:
+ JumpVector m_jumps;
+ };
+
+
+ // Section 3: Misc admin methods
+#if ENABLE(DFG_JIT)
+ Label labelIgnoringWatchpoints()
+ {
+ Label result;
+ result.m_label = m_assembler.labelIgnoringWatchpoints();
+ return result;
+ }
+#else
+ Label labelIgnoringWatchpoints()
+ {
+ return label();
+ }
+#endif
+
+ Label label()
+ {
+ return Label(this);
+ }
+
+ void padBeforePatch()
+ {
+ // Rely on the fact that asking for a label already does the padding.
+ (void)label();
+ }
+
+ Label watchpointLabel()
+ {
+ Label result;
+ result.m_label = m_assembler.labelForWatchpoint();
+ return result;
+ }
+
+ Label align()
+ {
+ m_assembler.align(16);
+ return Label(this);
+ }
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ class RegisterAllocationOffset {
+ public:
+ RegisterAllocationOffset(unsigned offset)
+ : m_offset(offset)
+ {
+ }
+
+ void check(unsigned low, unsigned high)
+ {
+ RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+ }
+
+ private:
+ unsigned m_offset;
+ };
+
+ void addRegisterAllocationAtOffset(unsigned offset)
+ {
+ m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
+ }
+
+ void clearRegisterAllocationOffsets()
+ {
+ m_registerAllocationForOffsets.clear();
+ }
+
+ void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
+ {
+ if (offset1 > offset2)
+ std::swap(offset1, offset2);
+
+ size_t size = m_registerAllocationForOffsets.size();
+ for (size_t i = 0; i < size; ++i)
+ m_registerAllocationForOffsets[i].check(offset1, offset2);
+ }
+#endif
+
+ template<typename T, typename U>
+ static ptrdiff_t differenceBetween(T from, U to)
+ {
+ return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ }
+
+ static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
+ {
+ return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
+ }
+
+ unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+ ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+ {
+ AssemblerType::cacheFlush(code, size);
+ }
+protected:
+ AbstractMacroAssembler()
+ : m_randomSource(cryptographicallyRandomNumber())
+ {
+ }
+
+ AssemblerType m_assembler;
+
+ uint32_t random()
+ {
+ return m_randomSource.getUint32();
+ }
+
+ WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
+#endif
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool scratchRegisterForBlinding() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return true; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return true; }
+#endif
+
+ friend class LinkBuffer;
+ friend class RepatchBuffer;
+
+ static void linkJump(void* code, Jump jump, CodeLocationLabel target)
+ {
+ AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
+ }
+
+ static void linkPointer(void* code, AssemblerLabel label, void* value)
+ {
+ AssemblerType::linkPointer(code, label, value);
+ }
+
+ static void* getLinkerAddress(void* code, AssemblerLabel label)
+ {
+ return AssemblerType::getRelocatedAddress(code, label);
+ }
+
+ static unsigned getLinkerCallReturnOffset(Call call)
+ {
+ return AssemblerType::getCallReturnOffset(call.m_label);
+ }
+
+ static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
+ }
+
+ static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
+ {
+ AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
+ static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
+ {
+ AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
+ }
+
+ static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
+ {
+ AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
+ }
+
+ static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+ {
+ return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+ }
+
+ static void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithLoad(label.dataLocation());
+ }
+
+ static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithAddressComputation(label.dataLocation());
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AbstractMacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBuffer.h b/src/3rdparty/masm/assembler/AssemblerBuffer.h
new file mode 100644
index 0000000000..277ec1043c
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBuffer.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBuffer_h
+#define AssemblerBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "ExecutableAllocator.h"
+#include "JITCompilationEffort.h"
+#include "JSGlobalData.h"
+#include "stdint.h"
+#include <string.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
+
+namespace JSC {
+
+ struct AssemblerLabel {
+ AssemblerLabel()
+ : m_offset(std::numeric_limits<uint32_t>::max())
+ {
+ }
+
+ explicit AssemblerLabel(uint32_t offset)
+ : m_offset(offset)
+ {
+ }
+
+ bool isSet() const { return (m_offset != std::numeric_limits<uint32_t>::max()); }
+
+ AssemblerLabel labelAtOffset(int offset) const
+ {
+ return AssemblerLabel(m_offset + offset);
+ }
+
+ uint32_t m_offset;
+ };
+
+ class AssemblerBuffer {
+ static const int inlineCapacity = 128;
+ public:
+ AssemblerBuffer()
+ : m_storage(inlineCapacity)
+ , m_buffer(&(*m_storage.begin()))
+ , m_capacity(inlineCapacity)
+ , m_index(0)
+ {
+ }
+
+ ~AssemblerBuffer()
+ {
+ }
+
+ bool isAvailable(int space)
+ {
+ return m_index + space <= m_capacity;
+ }
+
+ void ensureSpace(int space)
+ {
+ if (!isAvailable(space))
+ grow();
+ }
+
+ bool isAligned(int alignment) const
+ {
+ return !(m_index & (alignment - 1));
+ }
+
+ template<typename IntegralType>
+ void putIntegral(IntegralType value)
+ {
+ ensureSpace(sizeof(IntegralType));
+ putIntegralUnchecked(value);
+ }
+
+ template<typename IntegralType>
+ void putIntegralUnchecked(IntegralType value)
+ {
+ ASSERT(isAvailable(sizeof(IntegralType)));
+ *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value;
+ m_index += sizeof(IntegralType);
+ }
+
+ void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
+ void putByte(int8_t value) { putIntegral(value); }
+ void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
+ void putShort(int16_t value) { putIntegral(value); }
+ void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
+ void putInt(int32_t value) { putIntegral(value); }
+ void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
+ void putInt64(int64_t value) { putIntegral(value); }
+
+ void* data() const
+ {
+ return m_buffer;
+ }
+
+ size_t codeSize() const
+ {
+ return m_index;
+ }
+
+ AssemblerLabel label() const
+ {
+ return AssemblerLabel(m_index);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ if (!m_index)
+ return 0;
+
+ RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID, effort);
+
+ if (!result)
+ return 0;
+
+ ExecutableAllocator::makeWritable(result->start(), result->sizeInBytes());
+
+ memcpy(result->start(), m_buffer, m_index);
+
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_index; }
+
+ protected:
+ void append(const char* data, int size)
+ {
+ if (!isAvailable(size))
+ grow(size);
+
+ memcpy(m_buffer + m_index, data, size);
+ m_index += size;
+ }
+
+ void grow(int extraCapacity = 0)
+ {
+ m_capacity += m_capacity / 2 + extraCapacity;
+
+ m_storage.grow(m_capacity);
+ m_buffer = &(*m_storage.begin());
+ }
+
+ private:
+ Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage;
+ char* m_buffer;
+ int m_capacity;
+ int m_index;
+ };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBuffer_h
diff --git a/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
new file mode 100644
index 0000000000..5377ef0c7a
--- /dev/null
+++ b/src/3rdparty/masm/assembler/AssemblerBufferWithConstantPool.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblerBufferWithConstantPool_h
+#define AssemblerBufferWithConstantPool_h
+
+#if ENABLE(ASSEMBLER)
+
+#include "AssemblerBuffer.h"
+#include <wtf/SegmentedVector.h>
+
+#define ASSEMBLER_HAS_CONSTANT_POOL 1
+
+namespace JSC {
+
+/*
+ On a constant pool 4 or 8 bytes data can be stored. The values can be
+ constants or addresses. The addresses should be 32 or 64 bits. The constants
+ should be double-precisions float or integer numbers which are hard to be
+ encoded as few machine instructions.
+
+ TODO: The pool is desinged to handle both 32 and 64 bits values, but
+ currently only the 4 bytes constants are implemented and tested.
+
+ The AssemblerBuffer can contain multiple constant pools. Each pool is inserted
+ into the instruction stream - protected by a jump instruction from the
+ execution flow.
+
+ The flush mechanism is called when no space remain to insert the next instruction
+ into the pool. Three values are used to determine when the constant pool itself
+ have to be inserted into the instruction stream (Assembler Buffer):
+
+ - maxPoolSize: size of the constant pool in bytes, this value cannot be
+ larger than the maximum offset of a PC relative memory load
+
+ - barrierSize: size of jump instruction in bytes which protects the
+ constant pool from execution
+
+ - maxInstructionSize: maximum length of a machine instruction in bytes
+
+ There are some callbacks which solve the target architecture specific
+ address handling:
+
+ - TYPE patchConstantPoolLoad(TYPE load, int value):
+ patch the 'load' instruction with the index of the constant in the
+ constant pool and return the patched instruction.
+
+ - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr):
+ patch the a PC relative load instruction at 'loadAddr' address with the
+ final relative offset. The offset can be computed with help of
+ 'constPoolAddr' (the address of the constant pool) and index of the
+ constant (which is stored previously in the load instruction itself).
+
+ - TYPE placeConstantPoolBarrier(int size):
+ return with a constant pool barrier instruction which jumps over the
+ constant pool.
+
+ The 'put*WithConstant*' functions should be used to place a data into the
+ constant pool.
+*/
+
+template <int maxPoolSize, int barrierSize, int maxInstructionSize, class AssemblerType>
+class AssemblerBufferWithConstantPool : public AssemblerBuffer {
+ typedef SegmentedVector<uint32_t, 512> LoadOffsets;
+ using AssemblerBuffer::putIntegral;
+ using AssemblerBuffer::putIntegralUnchecked;
+public:
+ typedef struct {
+ short high;
+ short low;
+ } TwoShorts;
+
+ enum {
+ UniqueConst,
+ ReusableConst,
+ UnusedEntry,
+ };
+
+ AssemblerBufferWithConstantPool()
+ : AssemblerBuffer()
+ , m_numConsts(0)
+ , m_maxDistance(maxPoolSize)
+ , m_lastConstDelta(0)
+ {
+ m_pool = static_cast<uint32_t*>(fastMalloc(maxPoolSize));
+ m_mask = static_cast<char*>(fastMalloc(maxPoolSize / sizeof(uint32_t)));
+ }
+
+ ~AssemblerBufferWithConstantPool()
+ {
+ fastFree(m_mask);
+ fastFree(m_pool);
+ }
+
+ void ensureSpace(int space)
+ {
+ flushIfNoSpaceFor(space);
+ AssemblerBuffer::ensureSpace(space);
+ }
+
+ void ensureSpace(int insnSpace, int constSpace)
+ {
+ flushIfNoSpaceFor(insnSpace, constSpace);
+ AssemblerBuffer::ensureSpace(insnSpace);
+ }
+
+ void ensureSpaceForAnyInstruction(int amount = 1)
+ {
+ flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t));
+ }
+
+ bool isAligned(int alignment)
+ {
+ flushIfNoSpaceFor(alignment);
+ return AssemblerBuffer::isAligned(alignment);
+ }
+
+ void putByteUnchecked(int value)
+ {
+ AssemblerBuffer::putByteUnchecked(value);
+ correctDeltas(1);
+ }
+
+ void putByte(int value)
+ {
+ flushIfNoSpaceFor(1);
+ AssemblerBuffer::putByte(value);
+ correctDeltas(1);
+ }
+
+ void putShortUnchecked(int value)
+ {
+ AssemblerBuffer::putShortUnchecked(value);
+ correctDeltas(2);
+ }
+
+ void putShort(int value)
+ {
+ flushIfNoSpaceFor(2);
+ AssemblerBuffer::putShort(value);
+ correctDeltas(2);
+ }
+
+ void putIntUnchecked(int value)
+ {
+ AssemblerBuffer::putIntUnchecked(value);
+ correctDeltas(4);
+ }
+
+ void putInt(int value)
+ {
+ flushIfNoSpaceFor(4);
+ AssemblerBuffer::putInt(value);
+ correctDeltas(4);
+ }
+
+ void putInt64Unchecked(int64_t value)
+ {
+ AssemblerBuffer::putInt64Unchecked(value);
+ correctDeltas(8);
+ }
+
+ void putIntegral(TwoShorts value)
+ {
+ putIntegral(value.high);
+ putIntegral(value.low);
+ }
+
+ void putIntegralUnchecked(TwoShorts value)
+ {
+ putIntegralUnchecked(value.high);
+ putIntegralUnchecked(value.low);
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ flushConstantPool(false);
+ return AssemblerBuffer::executableCopy(globalData, ownerUID, effort);
+ }
+
+ void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
+ {
+ putIntegralWithConstantInt(insn, constant, isReusable);
+ }
+
+ // This flushing mechanism can be called after any unconditional jumps.
+ void flushWithoutBarrier(bool isForced = false)
+ {
+ // Flush if constant pool is more than 60% full to avoid overuse of this function.
+ if (isForced || 5 * static_cast<uint32_t>(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t))
+ flushConstantPool(false);
+ }
+
+ uint32_t* poolAddress()
+ {
+ return m_pool;
+ }
+
+ int sizeOfConstantPool()
+ {
+ return m_numConsts;
+ }
+
+private:
+ void correctDeltas(int insnSize)
+ {
+ m_maxDistance -= insnSize;
+ m_lastConstDelta -= insnSize;
+ if (m_lastConstDelta < 0)
+ m_lastConstDelta = 0;
+ }
+
+ void correctDeltas(int insnSize, int constSize)
+ {
+ correctDeltas(insnSize);
+
+ m_maxDistance -= m_lastConstDelta;
+ m_lastConstDelta = constSize;
+ }
+
+ template<typename IntegralType>
+ void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable)
+ {
+ if (!m_numConsts)
+ m_maxDistance = maxPoolSize;
+ flushIfNoSpaceFor(sizeof(IntegralType), 4);
+
+ m_loadOffsets.append(codeSize());
+ if (isReusable) {
+ for (int i = 0; i < m_numConsts; ++i) {
+ if (m_mask[i] == ReusableConst && m_pool[i] == constant) {
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, i)));
+ correctDeltas(sizeof(IntegralType));
+ return;
+ }
+ }
+ }
+
+ m_pool[m_numConsts] = constant;
+ m_mask[m_numConsts] = static_cast<char>(isReusable ? ReusableConst : UniqueConst);
+
+ putIntegral(static_cast<IntegralType>(AssemblerType::patchConstantPoolLoad(insn, m_numConsts)));
+ ++m_numConsts;
+
+ correctDeltas(sizeof(IntegralType), 4);
+ }
+
+ void flushConstantPool(bool useBarrier = true)
+ {
+ if (m_numConsts == 0)
+ return;
+ int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
+
+ if (alignPool)
+ alignPool = sizeof(uint64_t) - alignPool;
+
+ // Callback to protect the constant pool from execution
+ if (useBarrier)
+ putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
+
+ if (alignPool) {
+ if (alignPool & 1)
+ AssemblerBuffer::putByte(AssemblerType::padForAlign8);
+ if (alignPool & 2)
+ AssemblerBuffer::putShort(AssemblerType::padForAlign16);
+ if (alignPool & 4)
+ AssemblerBuffer::putInt(AssemblerType::padForAlign32);
+ }
+
+ int constPoolOffset = codeSize();
+ append(reinterpret_cast<char*>(m_pool), m_numConsts * sizeof(uint32_t));
+
+ // Patch each PC relative load
+ for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
+ void* loadAddr = reinterpret_cast<char*>(data()) + *iter;
+ AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<char*>(data()) + constPoolOffset);
+ }
+
+ m_loadOffsets.clear();
+ m_numConsts = 0;
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
+ if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
+ flushConstantPool();
+ }
+
+ void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
+ {
+ if (m_numConsts == 0)
+ return;
+ if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
+ (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
+ flushConstantPool();
+ }
+
+ uint32_t* m_pool;
+ char* m_mask;
+ LoadOffsets m_loadOffsets;
+
+ int m_numConsts;
+ int m_maxDistance;
+ int m_lastConstDelta;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // AssemblerBufferWithConstantPool_h
diff --git a/src/3rdparty/masm/assembler/CodeLocation.h b/src/3rdparty/masm/assembler/CodeLocation.h
new file mode 100644
index 0000000000..86d1f2b755
--- /dev/null
+++ b/src/3rdparty/masm/assembler/CodeLocation.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeLocation_h
+#define CodeLocation_h
+
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
+class CodeLocationInstruction;
+class CodeLocationLabel;
+class CodeLocationJump;
+class CodeLocationCall;
+class CodeLocationNearCall;
+class CodeLocationDataLabelCompact;
+class CodeLocationDataLabel32;
+class CodeLocationDataLabelPtr;
+class CodeLocationConvertibleLoad;
+
+// The CodeLocation* types are all pretty much do-nothing wrappers around
+// CodePtr (or MacroAssemblerCodePtr, to give it its full name). These
+// classes only exist to provide type-safety when linking and patching code.
+//
+// The one new piece of functionallity introduced by these classes is the
+// ability to create (or put another way, to re-discover) another CodeLocation
+// at an offset from one you already know. When patching code to optimize it
+// we often want to patch a number of instructions that are short, fixed
+// offsets apart. To reduce memory overhead we will only retain a pointer to
+// one of the instructions, and we will use the *AtOffset methods provided by
+// CodeLocationCommon to find the other points in the code to modify.
+class CodeLocationCommon : public MacroAssemblerCodePtr {
+public:
+ CodeLocationInstruction instructionAtOffset(int offset);
+ CodeLocationLabel labelAtOffset(int offset);
+ CodeLocationJump jumpAtOffset(int offset);
+ CodeLocationCall callAtOffset(int offset);
+ CodeLocationNearCall nearCallAtOffset(int offset);
+ CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset);
+ CodeLocationDataLabel32 dataLabel32AtOffset(int offset);
+ CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset);
+ CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset);
+
+protected:
+ CodeLocationCommon()
+ {
+ }
+
+ CodeLocationCommon(MacroAssemblerCodePtr location)
+ : MacroAssemblerCodePtr(location)
+ {
+ }
+};
+
+class CodeLocationInstruction : public CodeLocationCommon {
+public:
+ CodeLocationInstruction() {}
+ explicit CodeLocationInstruction(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationInstruction(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationLabel : public CodeLocationCommon {
+public:
+ CodeLocationLabel() {}
+ explicit CodeLocationLabel(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationLabel(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationJump : public CodeLocationCommon {
+public:
+ CodeLocationJump() {}
+ explicit CodeLocationJump(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationJump(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationCall : public CodeLocationCommon {
+public:
+ CodeLocationCall() {}
+ explicit CodeLocationCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationNearCall : public CodeLocationCommon {
+public:
+ CodeLocationNearCall() {}
+ explicit CodeLocationNearCall(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationNearCall(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabel32 : public CodeLocationCommon {
+public:
+ CodeLocationDataLabel32() {}
+ explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabel32(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationDataLabelCompact : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelCompact() { }
+ explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationDataLabelCompact(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+class CodeLocationDataLabelPtr : public CodeLocationCommon {
+public:
+ CodeLocationDataLabelPtr() {}
+ explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) {}
+ explicit CodeLocationDataLabelPtr(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) {}
+};
+
+class CodeLocationConvertibleLoad : public CodeLocationCommon {
+public:
+ CodeLocationConvertibleLoad() { }
+ explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location)
+ : CodeLocationCommon(location) { }
+ explicit CodeLocationConvertibleLoad(void* location)
+ : CodeLocationCommon(MacroAssemblerCodePtr(location)) { }
+};
+
+inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationInstruction(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationLabel(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationJump(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelPtr(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabel32(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationDataLabelCompact(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset)
+{
+ ASSERT_VALID_CODE_OFFSET(offset);
+ return CodeLocationConvertibleLoad(reinterpret_cast<char*>(dataLocation()) + offset);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // CodeLocation_h
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.cpp b/src/3rdparty/masm/assembler/LinkBuffer.cpp
new file mode 100644
index 0000000000..645eba5380
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(ASSEMBLER)
+
+#include "Options.h"
+
+namespace JSC {
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
+{
+ performFinalization();
+
+ return CodeRef(m_executableMemory);
+}
+
+LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
+{
+ ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
+
+ CodeRef result = finalizeCodeWithoutDisassembly();
+
+ dataLogF("Generated JIT code for ");
+ va_list argList;
+ va_start(argList, format);
+ WTF::dataLogFV(format, argList);
+ va_end(argList);
+ dataLogF(":\n");
+
+ dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+ disassemble(result.code(), m_size, " ", WTF::dataFile());
+
+ return result;
+}
+
+void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+ ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+ m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = m_executableMemory->start();
+ m_size = m_assembler->m_assembler.codeSize();
+ ASSERT(m_code);
+#else
+ m_initialSize = m_assembler->m_assembler.codeSize();
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = (uint8_t*)m_executableMemory->start();
+ ASSERT(m_code);
+ ExecutableAllocator::makeWritable(m_code, m_initialSize);
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
+ uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
+ uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ // Compact branch if we can...
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
+
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_initialSize - readPtr;
+ m_executableMemory->shrink(m_size);
+
+#if DUMP_LINK_STATISTICS
+ dumpLinkStatistics(m_code, m_initialSize, m_size);
+#endif
+#if DUMP_CODE
+ dumpCode(m_code, m_size);
+#endif
+#endif
+}
+
+void LinkBuffer::performFinalization()
+{
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ m_completed = true;
+#endif
+
+#if ENABLE(BRANCH_COMPACTION)
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+#else
+ ExecutableAllocator::makeExecutable(code(), m_size);
+#endif
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+#if DUMP_LINK_STATISTICS
+void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
+{
+ static unsigned linkCount = 0;
+ static unsigned totalInitialSize = 0;
+ static unsigned totalFinalSize = 0;
+ linkCount++;
+ totalInitialSize += initialSize;
+ totalFinalSize += finalSize;
+ dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
+ code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
+ static_cast<unsigned>(initialSize - finalSize),
+ 100.0 * (initialSize - finalSize) / initialSize);
+ dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
+ linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+ 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+}
+#endif
+
+#if DUMP_CODE
+void LinkBuffer::dumpCode(void* code, size_t size)
+{
+#if CPU(ARM_THUMB2)
+ // Dump the generated code in an asm file format that can be assembled and then disassembled
+ // for debugging purposes. For example, save this output as jit.s:
+ // gcc -arch armv7 -c jit.s
+ // otool -tv jit.o
+ static unsigned codeCount = 0;
+ unsigned short* tcode = static_cast<unsigned short*>(code);
+ size_t tsize = size / sizeof(short);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.syntax unified\n"
+ "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+ "\t.globl\t%s\n"
+ "\t.align 2\n"
+ "\t.code 16\n"
+ "\t.thumb_func\t%s\n"
+ "# %p\n"
+ "%s:\n", nameBuf, nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.short\t0x%x\n", tcode[i]);
+#elif CPU(ARM_TRADITIONAL)
+ // gcc -c jit.s
+ // objdump -D jit.o
+ static unsigned codeCount = 0;
+ unsigned int* tcode = static_cast<unsigned int*>(code);
+ size_t tsize = size / sizeof(unsigned int);
+ char nameBuf[128];
+ snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
+ dataLogF("\t.globl\t%s\n"
+ "\t.align 4\n"
+ "\t.code 32\n"
+ "\t.text\n"
+ "# %p\n"
+ "%s:\n", nameBuf, code, nameBuf);
+
+ for (unsigned i = 0; i < tsize; i++)
+ dataLogF("\t.long\t0x%x\n", tcode[i]);
+#endif
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.h b/src/3rdparty/masm/assembler/LinkBuffer.h
new file mode 100644
index 0000000000..e1882433c1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/LinkBuffer.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2009, 2010, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LinkBuffer_h
+#define LinkBuffer_h
+
+#if ENABLE(ASSEMBLER)
+
+#define DUMP_LINK_STATISTICS 0
+#define DUMP_CODE 0
+
+#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
+#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
+
+#include "JITCompilationEffort.h"
+#include "MacroAssembler.h"
+#include <wtf/DataLog.h>
+#include <wtf/Noncopyable.h>
+
+namespace JSC {
+
+class JSGlobalData;
+
+// LinkBuffer:
+//
+// This class assists in linking code generated by the macro assembler, once code generation
+// has been completed, and the code has been copied to is final location in memory. At this
+// time pointers to labels within the code may be resolved, and relative offsets to external
+// addresses may be fixed.
+//
+// Specifically:
+// * Jump objects may be linked to external targets,
+// * The address of Jump objects may taken, such that it can later be relinked.
+// * The return address of a Call may be acquired.
+// * The address of a Label pointing into the code may be resolved.
+// * The value referenced by a DataLabel may be set.
+//
+class LinkBuffer {
+ WTF_MAKE_NONCOPYABLE(LinkBuffer);
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+ typedef MacroAssembler::Label Label;
+ typedef MacroAssembler::Jump Jump;
+ typedef MacroAssembler::PatchableJump PatchableJump;
+ typedef MacroAssembler::JumpList JumpList;
+ typedef MacroAssembler::Call Call;
+ typedef MacroAssembler::DataLabelCompact DataLabelCompact;
+ typedef MacroAssembler::DataLabel32 DataLabel32;
+ typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+public:
+ LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : m_size(0)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
+ , m_code(0)
+ , m_assembler(masm)
+ , m_globalData(&globalData)
+#ifndef NDEBUG
+ , m_completed(false)
+ , m_effort(effort)
+#endif
+ {
+ linkCode(ownerUID, effort);
+ }
+
+ ~LinkBuffer()
+ {
+ ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
+ }
+
+ bool didFailToAllocate() const
+ {
+ return !m_executableMemory;
+ }
+
+ bool isValid() const
+ {
+ return !didFailToAllocate();
+ }
+
+ // These methods are used to link or set values at code generation time.
+
+ void link(Call call, FunctionPtr function)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_label = applyOffset(call.m_label);
+ MacroAssembler::linkCall(code(), call, function);
+ }
+
+ void link(Jump jump, CodeLocationLabel label)
+ {
+ jump.m_label = applyOffset(jump.m_label);
+ MacroAssembler::linkJump(code(), jump, label);
+ }
+
+ void link(JumpList list, CodeLocationLabel label)
+ {
+ for (unsigned i = 0; i < list.m_jumps.size(); ++i)
+ link(list.m_jumps[i], label);
+ }
+
+ void patch(DataLabelPtr label, void* value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
+ }
+
+ void patch(DataLabelPtr label, CodeLocationLabel value)
+ {
+ AssemblerLabel target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
+ }
+
+ // These methods are used to obtain handles to allow the code to be relinked / repatched later.
+
+ CodeLocationCall locationOf(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(!call.isFlagSet(Call::Near));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationNearCall locationOfNearCall(Call call)
+ {
+ ASSERT(call.isFlagSet(Call::Linkable));
+ ASSERT(call.isFlagSet(Call::Near));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
+ }
+
+ CodeLocationLabel locationOf(PatchableJump jump)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
+ }
+
+ CodeLocationLabel locationOf(Label label)
+ {
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
+ {
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabel32 locationOf(DataLabel32 label)
+ {
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
+ {
+ return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label)
+ {
+ return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
+ }
+
+ // This method obtains the return address of the call, given as an offset from
+ // the start of the code.
+ unsigned returnAddressOffset(Call call)
+ {
+ call.m_label = applyOffset(call.m_label);
+ return MacroAssembler::getLinkerCallReturnOffset(call);
+ }
+
+ uint32_t offsetOf(Label label)
+ {
+ return applyOffset(label.m_label).m_offset;
+ }
+
+ // Upon completion of all patching 'FINALIZE_CODE()' should be called once to
+ // complete generation of the code. Alternatively, call
+ // finalizeCodeWithoutDisassembly() directly if you have your own way of
+ // displaying disassembly.
+
+ CodeRef finalizeCodeWithoutDisassembly();
+ CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
+ void* debugAddress()
+ {
+ return m_code;
+ }
+
+ size_t debugSize()
+ {
+ return m_size;
+ }
+
+private:
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
+ // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
+ void* code()
+ {
+ return m_code;
+ }
+
+ void linkCode(void* ownerUID, JITCompilationEffort);
+
+ void performFinalization();
+
+#if DUMP_LINK_STATISTICS
+ static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
+#endif
+
+#if DUMP_CODE
+ static void dumpCode(void* code, size_t);
+#endif
+
+ RefPtr<ExecutableMemoryHandle> m_executableMemory;
+ size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+ size_t m_initialSize;
+#endif
+ void* m_code;
+ MacroAssembler* m_assembler;
+ JSGlobalData* m_globalData;
+#ifndef NDEBUG
+ bool m_completed;
+ JITCompilationEffort m_effort;
+#endif
+};
+
+#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \
+ (UNLIKELY((condition)) \
+ ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \
+ : (linkBufferReference).finalizeCodeWithoutDisassembly())
+
+// Use this to finalize code, like so:
+//
+// CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number));
+//
+// Which, in disassembly mode, will print:
+//
+// Generated JIT code for my super thingy number 42:
+// Code at [0x123456, 0x234567]:
+// 0x123456: mov $0, 0
+// 0x12345a: ret
+//
+// ... and so on.
+//
+// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly
+// is true, so you can hide expensive disassembly-only computations inside there.
+
+#define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading)
+
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
+ FINALIZE_CODE_IF((Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // LinkBuffer_h
diff --git a/src/3rdparty/masm/assembler/MIPSAssembler.h b/src/3rdparty/masm/assembler/MIPSAssembler.h
new file mode 100644
index 0000000000..7f553bb9a1
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MIPSAssembler.h
@@ -0,0 +1,1107 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MIPSAssembler_h
+#define MIPSAssembler_h
+
+#if ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <wtf/Assertions.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+typedef uint32_t MIPSWord;
+
+namespace MIPSRegisters {
+typedef enum {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31
+} RegisterID;
+
+typedef enum {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31
+} FPRegisterID;
+
+} // namespace MIPSRegisters
+
+class MIPSAssembler {
+public:
+ typedef MIPSRegisters::RegisterID RegisterID;
+ typedef MIPSRegisters::FPRegisterID FPRegisterID;
+ typedef SegmentedVector<AssemblerLabel, 64> Jumps;
+
+ MIPSAssembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
+ {
+ }
+
+ // MIPS instruction opcode field position
+ enum {
+ OP_SH_RD = 11,
+ OP_SH_RT = 16,
+ OP_SH_RS = 21,
+ OP_SH_SHAMT = 6,
+ OP_SH_CODE = 16,
+ OP_SH_FD = 6,
+ OP_SH_FS = 11,
+ OP_SH_FT = 16
+ };
+
+ void emitInst(MIPSWord op)
+ {
+ void* oldBase = m_buffer.data();
+
+ m_buffer.putInt(op);
+
+ void* newBase = m_buffer.data();
+ if (oldBase != newBase)
+ relocateJumps(oldBase, newBase);
+ }
+
+ void nop()
+ {
+ emitInst(0x00000000);
+ }
+
+ /* Need to insert one load data delay nop for mips1. */
+ void loadDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ /* Need to insert one coprocessor access delay nop for mips1. */
+ void copDelayNop()
+ {
+#if WTF_MIPS_ISA(1)
+ nop();
+#endif
+ }
+
+ void move(RegisterID rd, RegisterID rs)
+ {
+ /* addu */
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS));
+ }
+
+ /* Set an immediate value to a register. This may generate 1 or 2
+ instructions. */
+ void li(RegisterID dest, int imm)
+ {
+ if (imm >= -32768 && imm <= 32767)
+ addiu(dest, MIPSRegisters::zero, imm);
+ else if (imm >= 0 && imm < 65536)
+ ori(dest, MIPSRegisters::zero, imm);
+ else {
+ lui(dest, imm >> 16);
+ if (imm & 0xffff)
+ ori(dest, dest, imm);
+ }
+ }
+
+ void lui(RegisterID rt, int imm)
+ {
+ emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void addiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void addu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void subu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mult(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000018 | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void div(RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000001a | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void mfhi(RegisterID rd)
+ {
+ emitInst(0x00000010 | (rd << OP_SH_RD));
+ }
+
+ void mflo(RegisterID rd)
+ {
+ emitInst(0x00000012 | (rd << OP_SH_RD));
+ }
+
+ void mul(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+#if WTF_MIPS_ISA_AT_LEAST(32)
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+#else
+ mult(rs, rt);
+ mflo(rd);
+#endif
+ }
+
+ void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void andi(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void nor(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void ori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void xori(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void slt(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
+ {
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
+ }
+
+ void sltiu(RegisterID rt, RegisterID rs, int imm)
+ {
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void sll(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void sllv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void sra(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srav(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void srl(RegisterID rd, RegisterID rt, int shamt)
+ {
+ emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
+ }
+
+ void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
+ {
+ emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void lb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lbu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwl(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lwr(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lhu(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void sb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sw(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void jr(RegisterID rs)
+ {
+ emitInst(0x00000008 | (rs << OP_SH_RS));
+ }
+
+ void jalr(RegisterID rs)
+ {
+ emitInst(0x0000f809 | (rs << OP_SH_RS));
+ }
+
+ void jal()
+ {
+ emitInst(0x0c000000);
+ }
+
+ void bkpt()
+ {
+ int value = 512; /* BRK_BUG */
+ emitInst(0x0000000d | ((value & 0x3ff) << OP_SH_CODE));
+ }
+
+ void bgez(RegisterID rs, int imm)
+ {
+ emitInst(0x04010000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void bltz(RegisterID rs, int imm)
+ {
+ emitInst(0x04000000 | (rs << OP_SH_RS) | (imm & 0xffff));
+ }
+
+ void beq(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x10000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bne(RegisterID rs, RegisterID rt, int imm)
+ {
+ emitInst(0x14000000 | (rs << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff));
+ }
+
+ void bc1t()
+ {
+ emitInst(0x45010000);
+ }
+
+ void bc1f()
+ {
+ emitInst(0x45000000);
+ }
+
+ void appendJump()
+ {
+ m_jumps.append(m_buffer.label());
+ }
+
+ void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ }
+
+ void lwc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ copDelayNop();
+ }
+
+ void ldc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void swc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sdc1(FPRegisterID ft, RegisterID rs, int offset)
+ {
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void mtc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44800000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mthc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44e00000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void mfc1(RegisterID rt, FPRegisterID fs)
+ {
+ emitInst(0x44000000 | (fs << OP_SH_FS) | (rt << OP_SH_RT));
+ copDelayNop();
+ }
+
+ void sqrtd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void movd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void negd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200007 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void truncwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x4620000d | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtdw(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtds(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtwd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void cvtsd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
+ void ceqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cngtd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003f | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cnged(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003d | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003c | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x4620003e | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cueqd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200033 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coled(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200036 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void coltd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200034 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void culed(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200037 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ void cultd(FPRegisterID fs, FPRegisterID ft)
+ {
+ emitInst(0x46200035 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
+ copDelayNop();
+ }
+
+ // General helpers
+
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_buffer.label();
+ }
+
+ AssemblerLabel labelForWatchpoint()
+ {
+ AssemblerLabel result = m_buffer.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_buffer.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_buffer.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
+ {
+ while (!m_buffer.isAligned(alignment))
+ bkpt();
+
+ return label();
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset);
+ }
+
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
+ {
+ return b.m_offset - a.m_offset;
+ }
+
+ // Assembler admin methods:
+
+ size_t codeSize() const
+ {
+ return m_buffer.codeSize();
+ }
+
+ PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort)
+ {
+ RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID, effort);
+ if (!result)
+ return 0;
+
+ relocateJumps(m_buffer.data(), result->start());
+ return result.release();
+ }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+
+ // Assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mfc1(rd1, rn);
+ mfhc1(rd2, rn);
+#else
+ mfc1(rd1, rn);
+ mfc1(rd2, FPRegisterID(rn + 1));
+#endif
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+#if WTF_MIPS_ISA_REV(2) && WTF_MIPS_FP64
+ mtc1(rn1, rd);
+ mthc1(rn2, rd);
+#else
+ mtc1(rn1, rd);
+ mtc1(rn2, FPRegisterID(rd + 1));
+#endif
+ }
+
+ static unsigned getCallReturnOffset(AssemblerLabel call)
+ {
+ // The return address is after a call and a delay slot instruction
+ return call.m_offset;
+ }
+
+ // Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
+
+ static size_t linkDirectJump(void* code, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code));
+ size_t ops = 0;
+ int32_t slotAddr = reinterpret_cast<int>(insn) + 4;
+ int32_t toAddr = reinterpret_cast<int>(to);
+
+ if ((slotAddr & 0xf0000000) != (toAddr & 0xf0000000)) {
+ // lui
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((toAddr >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (toAddr & 0xffff);
+ ++insn;
+ // jr
+ *insn = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ ++insn;
+ ops = 4 * sizeof(MIPSWord);
+ } else {
+ // j
+ *insn = 0x08000000 | ((toAddr & 0x0fffffff) >> 2);
+ ++insn;
+ ops = 2 * sizeof(MIPSWord);
+ }
+ // nop
+ *insn = 0x00000000;
+ return ops;
+ }
+
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
+ {
+ ASSERT(to.isSet());
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + from.m_offset);
+ MIPSWord* toPos = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(m_buffer.data()) + to.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, toPos);
+ }
+
+ static void linkJump(void* code, AssemblerLabel from, void* to)
+ {
+ ASSERT(from.isSet());
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 2)) && !(*(insn - 3)) && !(*(insn - 5)));
+ insn = insn - 6;
+ linkWithOffset(insn, to);
+ }
+
+ static void linkCall(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ linkCallInternal(insn, to);
+ }
+
+ static void linkPointer(void* code, AssemblerLabel from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+
+ ASSERT(!(*(insn - 1)) && !(*(insn - 5)));
+ insn = insn - 6;
+ int flushSize = linkWithOffset(insn, to);
+
+ cacheFlush(insn, flushSize);
+ }
+
+ static void relinkCall(void* from, void* to)
+ {
+ void* start;
+ int size = linkCallInternal(from, to);
+ if (size == sizeof(MIPSWord))
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 2 * sizeof(MIPSWord));
+ else
+ start = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(from) - 4 * sizeof(MIPSWord));
+
+ cacheFlush(start, size);
+ }
+
+ static void repatchInt32(void* from, int32_t to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ *insn = (*insn & 0xffff0000) | ((to >> 16) & 0xffff);
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ *insn = (*insn & 0xffff0000) | (to & 0xffff);
+ insn--;
+ cacheFlush(insn, 2 * sizeof(MIPSWord));
+ }
+
+ static int32_t readInt32(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return result;
+ }
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ repatchInt32(where, value);
+ }
+
+ static void repatchPointer(void* from, void* to)
+ {
+ repatchInt32(from, reinterpret_cast<int32_t>(to));
+ }
+
+ static void* readPointer(void* from)
+ {
+ return reinterpret_cast<void*>(readInt32(from));
+ }
+
+ static void* readCallTarget(void* from)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn -= 4;
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ int32_t result = (*insn & 0x0000ffff) << 16;
+ insn++;
+ ASSERT((*insn & 0xfc000000) == 0x34000000); // ori
+ result |= *insn & 0x0000ffff;
+ return reinterpret_cast<void*>(result);
+ }
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if GCC_VERSION_AT_LEAST(4, 3, 0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return sizeof(MIPSWord) * 4;
+ }
+
+ static void revertJumpToMove(void* instructionStart, RegisterID rt, int imm)
+ {
+ MIPSWord* insn = static_cast<MIPSWord*>(instructionStart);
+ size_t codeSize = 2 * sizeof(MIPSWord);
+
+ // lui
+ *insn = 0x3c000000 | (rt << OP_SH_RT) | ((imm >> 16) & 0xffff);
+ ++insn;
+ // ori
+ *insn = 0x34000000 | (rt << OP_SH_RS) | (rt << OP_SH_RT) | (imm & 0xffff);
+ ++insn;
+ // if jr $t9
+ if (*insn == 0x03200008) {
+ *insn = 0x00000000;
+ codeSize += sizeof(MIPSWord);
+ }
+ cacheFlush(insn, codeSize);
+ }
+
+ static void replaceWithJump(void* instructionStart, void* to)
+ {
+ ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 3));
+ ASSERT(!(bitwise_cast<uintptr_t>(to) & 3));
+ size_t ops = linkDirectJump(instructionStart, to);
+ cacheFlush(instructionStart, ops);
+ }
+
+ static void replaceWithLoad(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x8c000000 | ((*insn) & 0x3ffffff); // lw
+ cacheFlush(insn, 4);
+ }
+
+ static void replaceWithAddressComputation(void* instructionStart)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(instructionStart);
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ insn++;
+ ASSERT((*insn & 0xfc0007ff) == 0x00000021); // addu
+ insn++;
+ *insn = 0x24000000 | ((*insn) & 0x3ffffff); // addiu
+ cacheFlush(insn, 4);
+ }
+
+private:
+ /* Update each jump in the buffer of newBase. */
+ void relocateJumps(void* oldBase, void* newBase)
+ {
+ // Check each jump
+ for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
+ int pos = iter->m_offset;
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
+ insn = insn + 2;
+ // Need to make sure we have 5 valid instructions after pos
+ if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+ continue;
+
+ if ((*insn & 0xfc000000) == 0x08000000) { // j
+ int offset = *insn & 0x03ffffff;
+ int oldInsnAddress = (int)insn - (int)newBase + (int)oldBase;
+ int topFourBits = (oldInsnAddress + 4) >> 28;
+ int oldTargetAddress = (topFourBits << 28) | (offset << 2);
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ int newInsnAddress = (int)insn;
+ if (((newInsnAddress + 4) >> 28) == (newTargetAddress >> 28))
+ *insn = 0x08000000 | ((newTargetAddress >> 2) & 0x3ffffff);
+ else {
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ }
+ } else if ((*insn & 0xffe00000) == 0x3c000000) { // lui
+ int high = (*insn & 0xffff) << 16;
+ int low = *(insn + 1) & 0xffff;
+ int oldTargetAddress = high | low;
+ int newTargetAddress = oldTargetAddress - (int)oldBase + (int)newBase;
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ }
+ }
+ }
+
+ static int linkWithOffset(MIPSWord* insn, void* to)
+ {
+ ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+
+ if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
+ /*
+ Convert the sequence:
+ beq $2, $3, target
+ nop
+ b 1f
+ nop
+ nop
+ nop
+ 1:
+
+ to the new sequence if possible:
+ bne $2, $3, 1f
+ nop
+ j target
+ nop
+ nop
+ nop
+ 1:
+
+ OR to the new sequence:
+ bne $2, $3, 1f
+ nop
+ lui $25, target >> 16
+ ori $25, $25, target & 0xffff
+ jr $25
+ nop
+ 1:
+
+ Note: beq/bne/bc1t/bc1f are converted to bne/beq/bc1f/bc1t.
+ */
+
+ if (*(insn + 2) == 0x10000003) {
+ if ((*insn & 0xfc000000) == 0x10000000) // beq
+ *insn = (*insn & 0x03ff0000) | 0x14000005; // bne
+ else if ((*insn & 0xfc000000) == 0x14000000) // bne
+ *insn = (*insn & 0x03ff0000) | 0x10000005; // beq
+ else if ((*insn & 0xffff0000) == 0x45010000) // bc1t
+ *insn = 0x45000005; // bc1f
+ else if ((*insn & 0xffff0000) == 0x45000000) // bc1f
+ *insn = 0x45010005; // bc1t
+ else
+ ASSERT(0);
+ }
+
+ insn = insn + 2;
+ if ((reinterpret_cast<intptr_t>(insn) + 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *insn = 0x08000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ *(insn + 1) = 0;
+ return 4 * sizeof(MIPSWord);
+ }
+
+ intptr_t newTargetAddress = reinterpret_cast<intptr_t>(to);
+ /* lui */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((newTargetAddress >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (newTargetAddress & 0xffff);
+ /* jr */
+ *(insn + 2) = 0x00000008 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 5 * sizeof(MIPSWord);
+ }
+
+ *insn = (*insn & 0xffff0000) | (diff & 0xffff);
+ return sizeof(MIPSWord);
+ }
+
+ static int linkCallInternal(void* from, void* to)
+ {
+ MIPSWord* insn = reinterpret_cast<MIPSWord*>(from);
+ insn = insn - 4;
+
+ if ((*(insn + 2) & 0xfc000000) == 0x0c000000) { // jal
+ if ((reinterpret_cast<intptr_t>(from) - 4) >> 28
+ == reinterpret_cast<intptr_t>(to) >> 28) {
+ *(insn + 2) = 0x0c000000 | ((reinterpret_cast<intptr_t>(to) >> 2) & 0x3ffffff);
+ return sizeof(MIPSWord);
+ }
+
+ /* lui $25, (to >> 16) & 0xffff */
+ *insn = 0x3c000000 | (MIPSRegisters::t9 << OP_SH_RT) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori $25, $25, to & 0xffff */
+ *(insn + 1) = 0x34000000 | (MIPSRegisters::t9 << OP_SH_RT) | (MIPSRegisters::t9 << OP_SH_RS) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ /* jalr $25 */
+ *(insn + 2) = 0x0000f809 | (MIPSRegisters::t9 << OP_SH_RS);
+ return 3 * sizeof(MIPSWord);
+ }
+
+ ASSERT((*insn & 0xffe00000) == 0x3c000000); // lui
+ ASSERT((*(insn + 1) & 0xfc000000) == 0x34000000); // ori
+
+ /* lui */
+ *insn = (*insn & 0xffff0000) | ((reinterpret_cast<intptr_t>(to) >> 16) & 0xffff);
+ /* ori */
+ *(insn + 1) = (*(insn + 1) & 0xffff0000) | (reinterpret_cast<intptr_t>(to) & 0xffff);
+ return 2 * sizeof(MIPSWord);
+ }
+
+ AssemblerBuffer m_buffer;
+ Jumps m_jumps;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(MIPS)
+
+#endif // MIPSAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h
new file mode 100644
index 0000000000..f74680d7fc
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssembler.h
@@ -0,0 +1,1465 @@
+/*
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MacroAssembler_h
+#define MacroAssembler_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(ASSEMBLER)
+
+#if CPU(ARM_THUMB2)
+#include "MacroAssemblerARMv7.h"
+namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
+
+#elif CPU(ARM_TRADITIONAL)
+#include "MacroAssemblerARM.h"
+namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; };
+
+#elif CPU(MIPS)
+#include "MacroAssemblerMIPS.h"
+namespace JSC {
+typedef MacroAssemblerMIPS MacroAssemblerBase;
+};
+
+#elif CPU(X86)
+#include "MacroAssemblerX86.h"
+namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
+
+#elif CPU(X86_64)
+#include "MacroAssemblerX86_64.h"
+namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
+
+#elif CPU(SH4)
+#include "MacroAssemblerSH4.h"
+namespace JSC {
+typedef MacroAssemblerSH4 MacroAssemblerBase;
+};
+
+#else
+#error "The MacroAssembler is not supported on this platform."
+#endif
+
+namespace JSC {
+
+class MacroAssembler : public MacroAssemblerBase {
+public:
+
+ using MacroAssemblerBase::pop;
+ using MacroAssemblerBase::jump;
+ using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::move;
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::and32;
+ using MacroAssemblerBase::branchAdd32;
+ using MacroAssemblerBase::branchMul32;
+ using MacroAssemblerBase::branchSub32;
+ using MacroAssemblerBase::lshift32;
+ using MacroAssemblerBase::or32;
+ using MacroAssemblerBase::rshift32;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::urshift32;
+ using MacroAssemblerBase::xor32;
+#endif
+
+ static const double twoToThe32; // This is super useful for some double code.
+
+ // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+ using MacroAssemblerBase::invert;
+
+ static DoubleCondition invert(DoubleCondition cond)
+ {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
+ }
+ }
+
+ static bool isInvertible(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static ResultCondition invert(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return Zero; // Make compiler happy for release builds.
+ }
+ }
+#endif
+
+ // Platform agnostic onvenience functions,
+ // described in terms of other macro assembly methods.
+ void pop()
+ {
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ }
+
+ void peek(RegisterID dest, int index = 0)
+ {
+ loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ Address addressForPoke(int index)
+ {
+ return Address(stackPointerRegister, (index * sizeof(void*)));
+ }
+
+ void poke(RegisterID src, int index = 0)
+ {
+ storePtr(src, addressForPoke(index));
+ }
+
+ void poke(TrustedImm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(TrustedImmPtr imm, int index = 0)
+ {
+ storePtr(imm, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void peek64(RegisterID dest, int index = 0)
+ {
+ load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(TrustedImm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+
+ void poke64(RegisterID src, int index = 0)
+ {
+ store64(src, addressForPoke(index));
+ }
+#endif
+
+#if CPU(MIPS)
+ void poke(FPRegisterID src, int index = 0)
+ {
+ ASSERT(!(index & 1));
+ storeDouble(src, addressForPoke(index));
+ }
+#endif
+
+ // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
+ void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+ void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
+ {
+ branchPtr(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
+ {
+ branch32(cond, op1, op2).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
+ {
+ branch32(cond, op1, imm).linkTo(target, this);
+ }
+
+ void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
+ {
+ branch32(cond, left, right).linkTo(target, this);
+ }
+
+ Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
+ {
+ return branch32(commute(cond), right, left);
+ }
+
+ void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
+ {
+ branchTestPtr(cond, reg).linkTo(target, this);
+ }
+
+#if !CPU(ARM_THUMB2)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtr(cond, left, right));
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+ PatchableJump patchableJump()
+ {
+ return PatchableJump(jump());
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return PatchableJump(branchTest32(cond, reg, mask));
+ }
+#endif // !CPU(ARM_THUMB2)
+
+#if !CPU(ARM)
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, reg, imm));
+ }
+#endif // !(CPU(ARM)
+
+ void jump(Label target)
+ {
+ jump().linkTo(target, this);
+ }
+
+ // Commute a relational condition, returns a new condition that will produce
+ // the same results given the same inputs but with their positions exchanged.
+ static RelationalCondition commute(RelationalCondition condition)
+ {
+ switch (condition) {
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ default:
+ break;
+ }
+
+ ASSERT(condition == Equal || condition == NotEqual);
+ return condition;
+ }
+
+ static const unsigned BlindingModulus = 64;
+ bool shouldConsiderBlinding()
+ {
+ return !(random() & (BlindingModulus - 1));
+ }
+
+ // Ptr methods
+ // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
+ // FIXME: should this use a test for 32-bitness instead of this specific exception?
+#if !CPU(X86_64)
+ void addPtr(Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add32(imm, srcDest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add32(TrustedImm32(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add32(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and32(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and32(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg32(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or32(src, dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or32(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or32(TrustedImm32(imm), dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub32(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub32(TrustedImm32(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor32(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor32(imm, srcDest);
+ }
+
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load32WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ move(Imm32(imm.asTrustedImmPtr()), dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store32(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ void storePtr(ImmPtr imm, Address address)
+ {
+ store32(Imm32(imm.asTrustedImmPtr()), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, void* address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store32WithAddressOffsetPatch(src, address);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch32(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
+ {
+ return branch32(cond, left, TrustedImm32(right));
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, src, dest);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest32(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, imm, dest);
+ }
+ using MacroAssemblerBase::branchTest8;
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+#else
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, Address address)
+ {
+ add64(imm, address);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add64(TrustedImm64(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add64(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and64(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg64(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or64(src, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or64(TrustedImm64(imm), dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or64(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ or64(imm, src, dest);
+ }
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight64(imm, srcDst);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub64(TrustedImm64(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor64(imm, srcDest);
+ }
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store64WithAddressOffsetPatch(src, address);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ {
+ return branchTest64(cond, address, reg);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, imm, dest);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, imm, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ return branchSub64(cond, src1, src2, dest);
+ }
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ using MacroAssemblerBase::and64;
+ using MacroAssemblerBase::convertInt32ToDouble;
+ using MacroAssemblerBase::store64;
+ bool shouldBlindDouble(double value)
+ {
+ // Don't trust NaN or +/-Infinity
+ if (!std::isfinite(value))
+ return shouldConsiderBlinding();
+
+ // Try to force normalisation, and check that there's no change
+ // in the bit pattern
+ if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
+ return shouldConsiderBlinding();
+
+ value = abs(value);
+ // Only allow a limited set of fractional components
+ double scaledValue = value * 8;
+ if (scaledValue / 8 != value)
+ return shouldConsiderBlinding();
+ double frac = scaledValue - floor(scaledValue);
+ if (frac != 0.0)
+ return shouldConsiderBlinding();
+
+ return value > 0xff;
+ }
+
+ bool shouldBlind(ImmPtr imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImmPtr {
+ RotatedImmPtr(uintptr_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImmPtr value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImmPtr rotationBlindConstant(ImmPtr imm)
+ {
+ uint8_t rotation = random() % (sizeof(void*) * 8);
+ uintptr_t value = imm.asTrustedImmPtr().asIntptr();
+ value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
+ return RotatedImmPtr(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRightPtr(constant.rotation, dest);
+ }
+
+ bool shouldBlind(Imm64 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint64_t value = imm.asTrustedImm64().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+
+ JSValue jsValue = JSValue::decode(value);
+ if (jsValue.isInt32())
+ return shouldBlind(Imm32(jsValue.asInt32()));
+ if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
+ return false;
+
+ if (!shouldBlindDouble(bitwise_cast<double>(value)))
+ return false;
+ }
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImm64 {
+ RotatedImm64(uint64_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImm64 value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImm64 rotationBlindConstant(Imm64 imm)
+ {
+ uint8_t rotation = random() % (sizeof(int64_t) * 8);
+ uint64_t value = imm.asTrustedImm64().m_value;
+ value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+ return RotatedImm64(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRight64(constant.rotation, dest);
+ }
+
+ void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ convertInt32ToDouble(scratchRegister, dest);
+ } else
+ convertInt32ToDouble(imm.asTrustedImm32(), dest);
+ }
+
+ void move(ImmPtr imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImmPtr(), dest);
+ }
+
+ void move(Imm64 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm64(), dest);
+ }
+
+ void and64(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and64(key.value1, dest);
+ and64(key.value2, dest);
+ } else
+ and64(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
+ {
+ if (shouldBlind(right)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
+ return branchPtr(cond, left, scratchRegister);
+ }
+ return branchPtr(cond, left, right.asTrustedImmPtr());
+ }
+
+ void storePtr(ImmPtr imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ storePtr(scratchRegister, dest);
+ } else
+ storePtr(imm.asTrustedImmPtr(), dest);
+ }
+
+ void store64(Imm64 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ store64(scratchRegister, dest);
+ } else
+ store64(imm.asTrustedImm64(), dest);
+ }
+
+#endif
+
+#endif // !CPU(X86_64)
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ bool shouldBlind(Imm32 imm)
+ {
+#if ENABLE(FORCED_JIT_BLINDING)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#else
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint32_t value = imm.asTrustedImm32().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffff:
+ return false;
+ default:
+ if (value <= 0xff)
+ return false;
+ if (~value <= 0xff)
+ return false;
+ }
+
+ if (!shouldConsiderBlinding())
+ return false;
+
+ return shouldBlindForSpecificArch(value);
+#endif
+ }
+
+ struct BlindedImm32 {
+ BlindedImm32(int32_t v1, int32_t v2)
+ : value1(v1)
+ , value2(v2)
+ {
+ }
+ TrustedImm32 value1;
+ TrustedImm32 value2;
+ };
+
+ uint32_t keyForConstant(uint32_t value, uint32_t& mask)
+ {
+ uint32_t key = random();
+ if (value <= 0xff)
+ mask = 0xff;
+ else if (value <= 0xffff)
+ mask = 0xffff;
+ else if (value <= 0xffffff)
+ mask = 0xffffff;
+ else
+ mask = 0xffffffff;
+ return key & mask;
+ }
+
+ uint32_t keyForConstant(uint32_t value)
+ {
+ uint32_t mask = 0;
+ return keyForConstant(value, mask);
+ }
+
+ BlindedImm32 xorBlindConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue);
+ return BlindedImm32(baseValue ^ key, key);
+ }
+
+ BlindedImm32 additionBlindedConstant(Imm32 imm)
+ {
+ // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
+ static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
+
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
+ if (key > baseValue)
+ key = key - baseValue;
+ return BlindedImm32(baseValue - key, key);
+ }
+
+ BlindedImm32 andBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
+ }
+
+ BlindedImm32 orBlindedConstant(Imm32 imm)
+ {
+ uint32_t baseValue = imm.asTrustedImm32().m_value;
+ uint32_t mask = 0;
+ uint32_t key = keyForConstant(baseValue, mask);
+ ASSERT((baseValue & mask) == baseValue);
+ return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
+ }
+
+ void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
+ {
+ move(constant.value1, dest);
+ xor32(constant.value2, dest);
+ }
+
+ void add32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), dest);
+ }
+
+ void addPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ addPtr(key.value1, dest);
+ addPtr(key.value2, dest);
+ } else
+ addPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and32(key.value1, dest);
+ and32(key.value2, dest);
+ } else
+ and32(imm.asTrustedImm32(), dest);
+ }
+
+ void andPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ andPtr(key.value1, dest);
+ andPtr(key.value2, dest);
+ } else
+ andPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void and32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return and32(imm.asTrustedImm32(), dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ and32(src, dest);
+ } else
+ and32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void move(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm32(), dest);
+ }
+
+ void or32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src == dest)
+ return or32(imm, dest);
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ or32(src, dest);
+ } else
+ or32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void or32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = orBlindedConstant(imm);
+ or32(key.value1, dest);
+ or32(key.value2, dest);
+ } else
+ or32(imm.asTrustedImm32(), dest);
+ }
+
+ void poke(Imm32 value, int index = 0)
+ {
+ store32(value, addressForPoke(index));
+ }
+
+ void poke(ImmPtr value, int index = 0)
+ {
+ storePtr(value, addressForPoke(index));
+ }
+
+#if CPU(X86_64)
+ void poke(Imm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+#endif
+
+ void store32(Imm32 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+#if CPU(X86) || CPU(X86_64)
+ BlindedImm32 blind = xorBlindConstant(imm);
+ store32(blind.value1, dest);
+ xor32(blind.value2, dest);
+#else
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
+ store32(scratchRegister, dest);
+ } else {
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ store32(imm.asTrustedImm32(), dest);
+ }
+#endif
+ } else
+ store32(imm.asTrustedImm32(), dest);
+ }
+
+ void sub32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ sub32(key.value1, dest);
+ sub32(key.value2, dest);
+ } else
+ sub32(imm.asTrustedImm32(), dest);
+ }
+
+ void subPtr(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ subPtr(key.value1, dest);
+ subPtr(key.value2, dest);
+ } else
+ subPtr(imm.asTrustedImm32(), dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, src, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), src, dest);
+ }
+
+ void xor32(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 blind = xorBlindConstant(imm);
+ xor32(blind.value1, dest);
+ xor32(blind.value2, dest);
+ } else
+ xor32(imm.asTrustedImm32(), dest);
+ }
+
+ Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
+ {
+ if (shouldBlind(right)) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
+ return branch32(cond, left, scratchRegister);
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ return branch32(cond, left, right.asTrustedImm32());
+ }
+
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
+ }
+ return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (src == dest)
+ ASSERT(scratchRegisterForBlinding());
+
+ if (shouldBlind(imm)) {
+ if (src == dest) {
+ if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
+ move(src, scratchRegister);
+ src = scratchRegister;
+ }
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchMul32(cond, src, dest);
+ }
+ return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ }
+
+ // branchSub32 takes a scratch register as 32 bit platforms make use of this,
+ // with src == dst, and on x86-32 we don't have a platform scratch register.
+ Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
+ {
+ if (shouldBlind(imm)) {
+ ASSERT(scratch != dest);
+ ASSERT(scratch != src);
+ loadXorBlindedConstant(xorBlindConstant(imm), scratch);
+ return branchSub32(cond, src, scratch, dest);
+ }
+ return branchSub32(cond, src, imm.asTrustedImm32(), dest);
+ }
+
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
+ void lshift32(Imm32 imm, RegisterID dest)
+ {
+ lshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ lshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void rshift32(Imm32 imm, RegisterID dest)
+ {
+ rshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ rshift32(src, trustedImm32ForShift(amount), dest);
+ }
+
+ void urshift32(Imm32 imm, RegisterID dest)
+ {
+ urshift32(trustedImm32ForShift(imm), dest);
+ }
+
+ void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
+ {
+ urshift32(src, trustedImm32ForShift(amount), dest);
+ }
+#endif
+};
+
+} // namespace JSC
+
+#else // ENABLE(ASSEMBLER)
+
+// If there is no assembler for this platform, at least allow code to make references to
+// some of the things it would otherwise define, albeit without giving that code any way
+// of doing anything useful.
+class MacroAssembler {
+private:
+ MacroAssembler() { }
+
+public:
+
+ enum RegisterID { NoRegister };
+ enum FPRegisterID { NoFPRegister };
+};
+
+#endif // ENABLE(ASSEMBLER)
+
+#endif // MacroAssembler_h
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
new file mode 100644
index 0000000000..98dc3e9879
--- /dev/null
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2009 University of Szeged
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
+
+#include "MacroAssemblerARM.h"
+
+#if OS(LINUX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <elf.h>
+#include <asm/hwcap.h>
+#endif
+
+namespace JSC {
+
+static bool isVFPPresent()
+{